/*
- * Copyright (c) 2010, 2011, 2012 Nicira Networks.
+ * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "classifier.h"
#include "dynamic-string.h"
#include "meta-flow.h"
+#include "ofp-actions.h"
#include "ofp-errors.h"
#include "ofp-util.h"
#include "ofpbuf.h"
if (match_len < 4) {
if (match_len) {
- VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
+ VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
+ match_len);
}
return 0;
}
return header;
}
+/* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
+ * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
+ * finds one, logs a warning. */
+static void
+check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
+{
+ unsigned int width = mf->n_bytes;
+ const uint8_t *value = p + 4;
+ const uint8_t *mask = p + 4 + width;
+ unsigned int i;
+
+ for (i = 0; i < width; i++) {
+ if (value[i] & ~mask[i]) {
+ if (!VLOG_DROP_WARN(&rl)) {
+ char *s = nx_match_to_string(p, width * 2 + 4);
+ VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
+ "bits wildcarded by the mask. (Future versions "
+ "of OVS may report this as an OpenFlow error.)",
+ s);
+ break;
+ }
+ }
+ }
+}
+
static enum ofperr
-nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
- uint16_t priority, struct cls_rule *rule,
- ovs_be64 *cookie, ovs_be64 *cookie_mask)
+nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
+ struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
uint32_t header;
- uint8_t *p;
- assert((cookie != NULL) == (cookie_mask != NULL));
+ ovs_assert((cookie != NULL) == (cookie_mask != NULL));
- p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
- if (!p) {
- VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
- "multiple of 8, is longer than space in message (max "
- "length %zu)", match_len, b->size);
- return OFPERR_OFPBMC_BAD_LEN;
- }
-
- cls_rule_init_catchall(rule, priority);
+ match_init_catchall(match);
if (cookie) {
*cookie = *cookie_mask = htonll(0);
}
+ if (!match_len) {
+ return 0;
+ }
+
for (;
(header = nx_entry_ok(p, match_len)) != 0;
p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
} else {
continue;
}
- } else if (!mf_are_prereqs_ok(mf, &rule->flow)) {
+ } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
error = OFPERR_OFPBMC_BAD_PREREQ;
- } else if (!mf_is_all_wild(mf, &rule->wc)) {
+ } else if (!mf_is_all_wild(mf, &match->wc)) {
error = OFPERR_OFPBMC_DUP_FIELD;
} else {
unsigned int width = mf->n_bytes;
error = OFPERR_OFPBMC_BAD_VALUE;
} else if (!NXM_HASMASK(header)) {
error = 0;
- mf_set_value(mf, &value, rule);
+ mf_set_value(mf, &value, match);
} else {
union mf_value mask;
error = OFPERR_OFPBMC_BAD_MASK;
} else {
error = 0;
- mf_set(mf, &value, &mask, rule);
+ check_mask_consistency(p, mf);
+ mf_set(mf, &value, &mask, match);
}
}
}
if (NXM_HASMASK(header)) {
memcpy(cookie_mask, p + 4 + width, width);
} else {
- *cookie_mask = htonll(UINT64_MAX);
+ *cookie_mask = OVS_BE64_MAX;
}
error = 0;
}
return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
}
+static enum ofperr
+nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
+ struct match *match,
+ ovs_be64 *cookie, ovs_be64 *cookie_mask)
+{
+ uint8_t *p = NULL;
+
+ if (match_len) {
+ p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
+ if (!p) {
+ VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
+ "multiple of 8, is longer than space in message (max "
+ "length %"PRIuSIZE")", match_len, b->size);
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+ }
+
+ return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
+}
+
/* Parses the nx_match formatted match description in 'b' with length
- * 'match_len'. The results are stored in 'rule', which is initialized with
- * 'priority'. If 'cookie' and 'cookie_mask' contain valid pointers, then the
- * cookie and mask will be stored in them if a "NXM_NX_COOKIE*" match is
- * defined. Otherwise, 0 is stored in both.
+ * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
+ * are valid pointers, then stores the cookie and mask in them if 'b' contains
+ * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
*
- * Fails with an error when encountering unknown NXM headers.
+ * Fails with an error upon encountering an unknown NXM header.
*
* Returns 0 if successful, otherwise an OpenFlow error code. */
enum ofperr
-nx_pull_match(struct ofpbuf *b, unsigned int match_len,
- uint16_t priority, struct cls_rule *rule,
+nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
- return nx_pull_match__(b, match_len, true, priority, rule, cookie,
- cookie_mask);
+ return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
}
-/* Behaves the same as nx_pull_match() with one exception. Skips over unknown
- * NXM headers instead of failing with an error when they are encountered. */
+/* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
+ * instead of failing with an error. */
enum ofperr
nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
- uint16_t priority, struct cls_rule *rule,
+ struct match *match,
ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
- return nx_pull_match__(b, match_len, false, priority, rule, cookie,
- cookie_mask);
+ return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
+}
+
+static enum ofperr
+oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
+{
+ struct ofp11_match_header *omh = b->data;
+ uint8_t *p;
+ uint16_t match_len;
+
+ if (b->size < sizeof *omh) {
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ match_len = ntohs(omh->length);
+ if (match_len < sizeof *omh) {
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ if (omh->type != htons(OFPMT_OXM)) {
+ return OFPERR_OFPBMC_BAD_TYPE;
+ }
+
+ p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
+ if (!p) {
+ VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
+ "multiple of 8, is longer than space in message (max "
+ "length %"PRIuSIZE")", match_len, b->size);
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
+ strict, match, NULL, NULL);
+}
+
+/* Parses the oxm formatted match description preceded by a struct
+ * ofp11_match_header in 'b'. Stores the result in 'match'.
+ *
+ * Fails with an error when encountering unknown OXM headers.
+ *
+ * Returns 0 if successful, otherwise an OpenFlow error code. */
+enum ofperr
+oxm_pull_match(struct ofpbuf *b, struct match *match)
+{
+ return oxm_pull_match__(b, true, match);
+}
+
+/* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
+ * OXM headers instead of failing with an error when they are encountered. */
+enum ofperr
+oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
+{
+ return oxm_pull_match__(b, false, match);
}
\f
/* nx_put_match() and helpers.
case 0:
break;
- case CONSTANT_HTONS(UINT16_MAX):
+ case OVS_BE16_MAX:
nxm_put_16(b, header, value);
break;
case 0:
break;
- case CONSTANT_HTONL(UINT32_MAX):
+ case OVS_BE32_MAX:
nxm_put_32(b, header, value);
break;
case 0:
break;
- case CONSTANT_HTONLL(UINT64_MAX):
+ case OVS_BE64_MAX:
nxm_put_64(b, header, value);
break;
}
static void
-nxm_put_eth_dst(struct ofpbuf *b,
- flow_wildcards_t wc, const uint8_t value[ETH_ADDR_LEN])
+nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
+ const uint8_t value[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
{
- switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
- case FWW_DL_DST | FWW_ETH_MCAST:
- break;
- default:
- nxm_put_header(b, NXM_OF_ETH_DST_W);
- ofpbuf_put(b, value, ETH_ADDR_LEN);
- ofpbuf_put(b, flow_wildcards_to_dl_dst_mask(wc), ETH_ADDR_LEN);
- break;
- case 0:
- nxm_put_eth(b, NXM_OF_ETH_DST, value);
- break;
+ if (!eth_addr_is_zero(mask)) {
+ if (eth_mask_is_exact(mask)) {
+ nxm_put_eth(b, header, value);
+ } else {
+ nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
+ ofpbuf_put(b, value, ETH_ADDR_LEN);
+ ofpbuf_put(b, mask, ETH_ADDR_LEN);
+ }
}
}
}
static void
-nxm_put_frag(struct ofpbuf *b, const struct cls_rule *cr)
+nxm_put_frag(struct ofpbuf *b, const struct match *match)
{
- uint8_t nw_frag = cr->flow.nw_frag;
- uint8_t nw_frag_mask = cr->wc.nw_frag_mask;
+ uint8_t nw_frag = match->flow.nw_frag;
+ uint8_t nw_frag_mask = match->wc.masks.nw_frag;
switch (nw_frag_mask) {
case 0:
}
static void
-nxm_put_ip(struct ofpbuf *b, const struct cls_rule *cr,
- uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code)
+nxm_put_ip(struct ofpbuf *b, const struct match *match,
+ uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
+ bool oxm)
{
- const flow_wildcards_t wc = cr->wc.wildcards;
- const struct flow *flow = &cr->flow;
+ const struct flow *flow = &match->flow;
- nxm_put_frag(b, cr);
+ nxm_put_frag(b, match);
- if (!(wc & FWW_NW_DSCP)) {
- nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
+ if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
+ if (oxm) {
+ nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2);
+ } else {
+ nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
+ }
}
- if (!(wc & FWW_NW_ECN)) {
- nxm_put_8(b, NXM_NX_IP_ECN, flow->nw_tos & IP_ECN_MASK);
+ if (match->wc.masks.nw_tos & IP_ECN_MASK) {
+ nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
+ flow->nw_tos & IP_ECN_MASK);
}
- if (!(wc & FWW_NW_TTL)) {
+ if (!oxm && match->wc.masks.nw_ttl) {
nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
}
- if (!(wc & FWW_NW_PROTO)) {
- nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
+ if (match->wc.masks.nw_proto) {
+ nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
if (flow->nw_proto == IPPROTO_TCP) {
- nxm_put_16m(b, NXM_OF_TCP_SRC, flow->tp_src, cr->wc.tp_src_mask);
- nxm_put_16m(b, NXM_OF_TCP_DST, flow->tp_dst, cr->wc.tp_dst_mask);
+ nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
+ flow->tp_src, match->wc.masks.tp_src);
+ nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
+ flow->tp_dst, match->wc.masks.tp_dst);
+ nxm_put_16m(b, NXM_NX_TCP_FLAGS,
+ flow->tcp_flags, match->wc.masks.tcp_flags);
} else if (flow->nw_proto == IPPROTO_UDP) {
- nxm_put_16m(b, NXM_OF_UDP_SRC, flow->tp_src, cr->wc.tp_src_mask);
- nxm_put_16m(b, NXM_OF_UDP_DST, flow->tp_dst, cr->wc.tp_dst_mask);
+ nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
+ flow->tp_src, match->wc.masks.tp_src);
+ nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
+ flow->tp_dst, match->wc.masks.tp_dst);
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
+ match->wc.masks.tp_src);
+ nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
+ match->wc.masks.tp_dst);
} else if (flow->nw_proto == icmp_proto) {
- if (cr->wc.tp_src_mask) {
+ if (match->wc.masks.tp_src) {
nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
}
- if (cr->wc.tp_dst_mask) {
+ if (match->wc.masks.tp_dst) {
nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
}
}
}
}
-/* Appends to 'b' the nx_match format that expresses 'cr' (except for
- * 'cr->priority', because priority is not part of nx_match), plus enough
- * zero bytes to pad the nx_match out to a multiple of 8. For Flow Mod
- * and Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be
- * supplied. Otherwise, 'cookie_mask' should be zero.
+/* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
+ * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
+ * Otherwise, 'cookie_mask' should be zero.
*
* This function can cause 'b''s data to be reallocated.
*
* Returns the number of bytes appended to 'b', excluding padding.
*
- * If 'cr' is a catch-all rule that matches every packet, then this function
+ * If 'match' is a catch-all rule that matches every packet, then this function
* appends nothing to 'b' and returns 0. */
-int
-nx_put_match(struct ofpbuf *b, const struct cls_rule *cr,
- ovs_be64 cookie, ovs_be64 cookie_mask)
+static int
+nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask)
{
- const flow_wildcards_t wc = cr->wc.wildcards;
- const struct flow *flow = &cr->flow;
+ const struct flow *flow = &match->flow;
const size_t start_len = b->size;
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 9);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 23);
/* Metadata. */
- if (!(wc & FWW_IN_PORT)) {
- uint16_t in_port = flow->in_port;
- nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
+ if (match->wc.masks.in_port.ofp_port) {
+ ofp_port_t in_port = flow->in_port.ofp_port;
+ if (oxm) {
+ nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
+ } else {
+ nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port)));
+ }
}
/* Ethernet. */
- nxm_put_eth_dst(b, wc, flow->dl_dst);
- if (!(wc & FWW_DL_SRC)) {
- nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
- }
- if (!(wc & FWW_DL_TYPE)) {
- nxm_put_16(b, NXM_OF_ETH_TYPE,
- ofputil_dl_type_to_openflow(flow->dl_type));
- }
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
+ flow->dl_src, match->wc.masks.dl_src);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
+ flow->dl_dst, match->wc.masks.dl_dst);
+ nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
+ ofputil_dl_type_to_openflow(flow->dl_type),
+ match->wc.masks.dl_type);
/* 802.1Q. */
- nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
+ if (oxm) {
+ ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
+ ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
+ ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
+
+ if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
+ nxm_put_16(b, OXM_OF_VLAN_VID, vid);
+ } else if (mask) {
+ nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
+ }
+
+ if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
+ nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
+ }
+
+ } else {
+ nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
+ match->wc.masks.vlan_tci);
+ }
+
+ /* MPLS. */
+ if (eth_type_mpls(flow->dl_type)) {
+ if (match->wc.masks.mpls_lse & htonl(MPLS_TC_MASK)) {
+ nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse));
+ }
+
+ if (match->wc.masks.mpls_lse & htonl(MPLS_BOS_MASK)) {
+ nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse));
+ }
+
+ if (match->wc.masks.mpls_lse & htonl(MPLS_LABEL_MASK)) {
+ nxm_put_32(b, OXM_OF_MPLS_LABEL,
+ htonl(mpls_lse_to_label(flow->mpls_lse)));
+ }
+ }
/* L3. */
- if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
/* IP. */
- nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
- nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
- nxm_put_ip(b, cr, IPPROTO_ICMP, NXM_OF_ICMP_TYPE, NXM_OF_ICMP_CODE);
- } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
+ flow->nw_src, match->wc.masks.nw_src);
+ nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
+ flow->nw_dst, match->wc.masks.nw_dst);
+ nxm_put_ip(b, match, IPPROTO_ICMP,
+ oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
+ oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
/* IPv6. */
- nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src,
- &cr->wc.ipv6_src_mask);
- nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst,
- &cr->wc.ipv6_dst_mask);
- nxm_put_ip(b, cr,
- IPPROTO_ICMPV6, NXM_NX_ICMPV6_TYPE, NXM_NX_ICMPV6_CODE);
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
+ &flow->ipv6_src, &match->wc.masks.ipv6_src);
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
+ &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
+ nxm_put_ip(b, match, IPPROTO_ICMPV6,
+ oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
+ oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
- if (!(wc & FWW_IPV6_LABEL)) {
- nxm_put_32(b, NXM_NX_IPV6_LABEL, flow->ipv6_label);
- }
+ nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
+ flow->ipv6_label, match->wc.masks.ipv6_label);
if (flow->nw_proto == IPPROTO_ICMPV6
&& (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
- if (!(wc & FWW_ND_TARGET)) {
- nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target,
- &in6addr_exact);
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
+ &flow->nd_target, &match->wc.masks.nd_target);
+ if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
+ flow->arp_sha, match->wc.masks.arp_sha);
}
- if (!(wc & FWW_ARP_SHA)
- && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
- nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha);
- }
- if (!(wc & FWW_ARP_THA)
- && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
- nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha);
+ if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
+ flow->arp_tha, match->wc.masks.arp_tha);
}
}
- } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
/* ARP. */
- if (!(wc & FWW_NW_PROTO)) {
- nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
- }
- nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
- nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
- if (!(wc & FWW_ARP_SHA)) {
- nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha);
- }
- if (!(wc & FWW_ARP_THA)) {
- nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha);
+ if (match->wc.masks.nw_proto) {
+ nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
+ htons(flow->nw_proto));
}
+ nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
+ flow->nw_src, match->wc.masks.nw_src);
+ nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
+ flow->nw_dst, match->wc.masks.nw_dst);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
+ flow->arp_sha, match->wc.masks.arp_sha);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
+ flow->arp_tha, match->wc.masks.arp_tha);
}
/* Tunnel ID. */
- nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask);
+ nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
+ flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
+
+ /* Other tunnel metadata. */
+ nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC,
+ flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
+ nxm_put_32m(b, NXM_NX_TUN_IPV4_DST,
+ flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
/* Registers. */
for (i = 0; i < FLOW_N_REGS; i++) {
nxm_put_32m(b, NXM_NX_REG(i),
- htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
+ htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
}
+ /* Mark. */
+ nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
+ htonl(match->wc.masks.pkt_mark));
+
+ /* OpenFlow 1.1+ Metadata. */
+ nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
+
/* Cookie. */
nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
match_len = b->size - start_len;
+ return match_len;
+}
+
+/* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
+ * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
+ * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
+ * Otherwise, 'cookie_mask' should be zero.
+ *
+ * This function can cause 'b''s data to be reallocated.
+ *
+ * Returns the number of bytes appended to 'b', excluding padding. The return
+ * value can be zero if it appended nothing at all to 'b' (which happens if
+ * 'cr' is a catch-all rule that matches every packet). */
+int
+nx_put_match(struct ofpbuf *b, const struct match *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask)
+{
+ int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
+
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
}
+
+
+/* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
+ * expresses 'cr', plus enough zero bytes to pad the data appended out to a
+ * multiple of 8.
+ *
+ * This function can cause 'b''s data to be reallocated.
+ *
+ * Returns the number of bytes appended to 'b', excluding the padding. Never
+ * returns zero. */
+int
+oxm_put_match(struct ofpbuf *b, const struct match *match)
+{
+ int match_len;
+ struct ofp11_match_header *omh;
+ size_t start_len = b->size;
+ ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
+
+ ofpbuf_put_uninit(b, sizeof *omh);
+ match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
+ ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
+
+ omh = ofpbuf_at(b, start_len, sizeof *omh);
+ omh->type = htons(OFPMT_OXM);
+ omh->length = htons(match_len);
+
+ return match_len;
+}
\f
/* nx_match_to_string() and helpers. */
return ds_steal_cstr(&s);
}
+char *
+oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
+{
+ const struct ofp11_match_header *omh = p->data;
+ uint16_t match_len_;
+ struct ds s;
+
+ ds_init(&s);
+
+ if (match_len < sizeof *omh) {
+ ds_put_format(&s, "<match too short: %u>", match_len);
+ goto err;
+ }
+
+ if (omh->type != htons(OFPMT_OXM)) {
+ ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
+ goto err;
+ }
+
+ match_len_ = ntohs(omh->length);
+ if (match_len_ < sizeof *omh) {
+ ds_put_format(&s, "<match length field too short: %u>", match_len_);
+ goto err;
+ }
+
+ if (match_len_ != match_len) {
+ ds_put_format(&s, "<match length field incorrect: %u != %u>",
+ match_len_, match_len);
+ goto err;
+ }
+
+ return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
+ match_len - sizeof *omh);
+
+err:
+ return ds_steal_cstr(&s);
+}
+
static void
format_nxm_field_name(struct ds *s, uint32_t header)
{
const struct mf_field *mf = mf_from_nxm_header(header);
if (mf) {
- ds_put_cstr(s, mf->nxm_name);
+ ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
if (NXM_HASMASK(header)) {
ds_put_cstr(s, "_W");
}
for (i = 0; i < MFF_N_IDS; i++) {
const struct mf_field *mf = mf_from_id(i);
+ uint32_t header;
- if (mf->nxm_name
- && !strncmp(mf->nxm_name, name, name_len)
- && mf->nxm_name[name_len] == '\0') {
- if (!wild) {
- return mf->nxm_header;
- } else if (mf->maskable != MFM_NONE) {
- return NXM_MAKE_WILD_HEADER(mf->nxm_header);
- }
+ if (mf->nxm_name &&
+ !strncmp(mf->nxm_name, name, name_len) &&
+ mf->nxm_name[name_len] == '\0') {
+ header = mf->nxm_header;
+ } else if (mf->oxm_name &&
+ !strncmp(mf->oxm_name, name, name_len) &&
+ mf->oxm_name[name_len] == '\0') {
+ header = mf->oxm_header;
+ } else {
+ continue;
+ }
+
+ if (!wild) {
+ return header;
+ } else if (mf->maskable != MFM_NONE) {
+ return NXM_MAKE_WILD_HEADER(header);
}
}
- if (!strncmp("NXM_NX_COOKIE", name, name_len)
- && (name_len == strlen("NXM_NX_COOKIE"))) {
+ if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
+ (name_len == strlen("NXM_NX_COOKIE"))) {
if (!wild) {
return NXM_NX_COOKIE;
} else {
\f
/* nx_match_from_string(). */
-int
-nx_match_from_string(const char *s, struct ofpbuf *b)
+static int
+nx_match_from_string_raw(const char *s, struct ofpbuf *b)
{
const char *full_s = s;
const size_t start_len = b->size;
- int match_len;
if (!strcmp(s, "<any>")) {
/* Ensure that 'b->data' isn't actually null. */
s++;
}
- match_len = b->size - start_len;
+ return b->size - start_len;
+}
+
+int
+nx_match_from_string(const char *s, struct ofpbuf *b)
+{
+ int match_len = nx_match_from_string_raw(s, b);
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
}
+
+int
+oxm_match_from_string(const char *s, struct ofpbuf *b)
+{
+ int match_len;
+ struct ofp11_match_header *omh;
+ size_t start_len = b->size;
+
+ ofpbuf_put_uninit(b, sizeof *omh);
+ match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
+ ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
+
+ omh = ofpbuf_at(b, start_len, sizeof *omh);
+ omh->type = htons(OFPMT_OXM);
+ omh->length = htons(match_len);
+
+ return match_len;
+}
\f
-void
-nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
+/* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
+ * '*move'.
+ *
+ * Returns NULL if successful, otherwise a malloc()'d string describing the
+ * error. The caller is responsible for freeing the returned string. */
+char * WARN_UNUSED_RESULT
+nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
{
const char *full_s = s;
- struct mf_subfield src, dst;
+ char *error;
- s = mf_parse_subfield(&src, s);
+ error = mf_parse_subfield__(&move->src, &s);
+ if (error) {
+ return error;
+ }
if (strncmp(s, "->", 2)) {
- ovs_fatal(0, "%s: missing `->' following source", full_s);
+ return xasprintf("%s: missing `->' following source", full_s);
}
s += 2;
- s = mf_parse_subfield(&dst, s);
- if (*s != '\0') {
- ovs_fatal(0, "%s: trailing garbage following destination", full_s);
+ error = mf_parse_subfield(&move->dst, s);
+ if (error) {
+ return error;
}
- if (src.n_bits != dst.n_bits) {
- ovs_fatal(0, "%s: source field is %d bits wide but destination is "
- "%d bits wide", full_s, src.n_bits, dst.n_bits);
+ if (move->src.n_bits != move->dst.n_bits) {
+ return xasprintf("%s: source field is %d bits wide but destination is "
+ "%d bits wide", full_s,
+ move->src.n_bits, move->dst.n_bits);
}
-
- ofputil_init_NXAST_REG_MOVE(move);
- move->n_bits = htons(src.n_bits);
- move->src_ofs = htons(src.ofs);
- move->dst_ofs = htons(dst.ofs);
- move->src = htonl(src.field->nxm_header);
- move->dst = htonl(dst.field->nxm_header);
+ return NULL;
}
-void
-nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
+/* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
+ * '*load'.
+ *
+ * Returns NULL if successful, otherwise a malloc()'d string describing the
+ * error. The caller is responsible for freeing the returned string. */
+char * WARN_UNUSED_RESULT
+nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
{
const char *full_s = s;
- struct mf_subfield dst;
- uint64_t value;
+ uint64_t value = strtoull(s, (char **) &s, 0);
+ char *error;
- value = strtoull(s, (char **) &s, 0);
if (strncmp(s, "->", 2)) {
- ovs_fatal(0, "%s: missing `->' following value", full_s);
+ return xasprintf("%s: missing `->' following value", full_s);
}
s += 2;
- s = mf_parse_subfield(&dst, s);
- if (*s != '\0') {
- ovs_fatal(0, "%s: trailing garbage following destination", full_s);
+ error = mf_parse_subfield(&load->dst, s);
+ if (error) {
+ return error;
}
- if (dst.n_bits < 64 && (value >> dst.n_bits) != 0) {
- ovs_fatal(0, "%s: value %"PRIu64" does not fit into %u bits",
- full_s, value, dst.n_bits);
+ if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
+ return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
+ full_s, value, load->dst.n_bits);
}
- ofputil_init_NXAST_REG_LOAD(load);
- load->ofs_nbits = nxm_encode_ofs_nbits(dst.ofs, dst.n_bits);
- load->dst = htonl(dst.field->nxm_header);
- load->value = htonll(value);
+ load->subvalue.be64[0] = htonll(0);
+ load->subvalue.be64[1] = htonll(value);
+ return NULL;
}
\f
/* nxm_format_reg_move(), nxm_format_reg_load(). */
void
-nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
+nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
{
- struct mf_subfield src, dst;
-
- nxm_decode_discrete(&src, move->src, move->src_ofs, move->n_bits);
- nxm_decode_discrete(&dst, move->dst, move->dst_ofs, move->n_bits);
-
ds_put_format(s, "move:");
- mf_format_subfield(&src, s);
+ mf_format_subfield(&move->src, s);
ds_put_cstr(s, "->");
- mf_format_subfield(&dst, s);
+ mf_format_subfield(&move->dst, s);
}
void
-nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
+nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
+{
+ ds_put_cstr(s, "load:");
+ mf_format_subvalue(&load->subvalue, s);
+ ds_put_cstr(s, "->");
+ mf_format_subfield(&load->dst, s);
+}
+\f
+enum ofperr
+nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
+ struct ofpbuf *ofpacts)
{
- struct mf_subfield dst;
+ struct ofpact_reg_move *move;
- ds_put_format(s, "load:%#"PRIx64"->", ntohll(load->value));
+ move = ofpact_put_REG_MOVE(ofpacts);
+ move->src.field = mf_from_nxm_header(ntohl(narm->src));
+ move->src.ofs = ntohs(narm->src_ofs);
+ move->src.n_bits = ntohs(narm->n_bits);
+ move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
+ move->dst.ofs = ntohs(narm->dst_ofs);
+ move->dst.n_bits = ntohs(narm->n_bits);
- nxm_decode(&dst, load->dst, load->ofs_nbits);
- mf_format_subfield(&dst, s);
+ return nxm_reg_move_check(move, NULL);
}
-\f
-/* nxm_check_reg_move(), nxm_check_reg_load(). */
enum ofperr
-nxm_check_reg_move(const struct nx_action_reg_move *action,
- const struct flow *flow)
+nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
+ struct ofpbuf *ofpacts)
{
- struct mf_subfield src;
- struct mf_subfield dst;
- int error;
-
- nxm_decode_discrete(&src, action->src, action->src_ofs, action->n_bits);
- error = mf_check_src(&src, flow);
- if (error) {
- return error;
+ struct ofpact_reg_load *load;
+
+ load = ofpact_put_REG_LOAD(ofpacts);
+ load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
+ load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
+ load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
+ load->subvalue.be64[1] = narl->value;
+
+ /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
+ * narl->value. */
+ if (load->dst.n_bits < 64 &&
+ ntohll(narl->value) >> load->dst.n_bits) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
}
- nxm_decode_discrete(&dst, action->dst, action->dst_ofs, action->n_bits);
- return mf_check_dst(&dst, flow);
+ return nxm_reg_load_check(load, NULL);
}
-
+\f
enum ofperr
-nxm_check_reg_load(const struct nx_action_reg_load *action,
- const struct flow *flow)
+nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
{
- struct mf_subfield dst;
enum ofperr error;
- nxm_decode(&dst, action->dst, action->ofs_nbits);
- error = mf_check_dst(&dst, flow);
+ error = mf_check_src(&move->src, flow);
if (error) {
return error;
}
- /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
- * action->value. */
- if (dst.n_bits < 64 && ntohll(action->value) >> dst.n_bits) {
- return OFPERR_OFPBAC_BAD_ARGUMENT;
- }
+ return mf_check_dst(&move->dst, NULL);
+}
- return 0;
+enum ofperr
+nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
+{
+ return mf_check_dst(&load->dst, flow);
+}
+\f
+void
+nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
+ struct ofpbuf *openflow)
+{
+ struct nx_action_reg_move *narm;
+
+ narm = ofputil_put_NXAST_REG_MOVE(openflow);
+ narm->n_bits = htons(move->dst.n_bits);
+ narm->src_ofs = htons(move->src.ofs);
+ narm->dst_ofs = htons(move->dst.ofs);
+ narm->src = htonl(move->src.field->nxm_header);
+ narm->dst = htonl(move->dst.field->nxm_header);
+}
+
+void
+nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
+ struct ofpbuf *openflow)
+{
+ struct nx_action_reg_load *narl;
+
+ narl = ofputil_put_NXAST_REG_LOAD(openflow);
+ narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
+ narl->dst = htonl(load->dst.field->nxm_header);
+ narl->value = load->subvalue.be64[1];
}
\f
/* nxm_execute_reg_move(), nxm_execute_reg_load(). */
void
-nxm_execute_reg_move(const struct nx_action_reg_move *action,
- struct flow *flow)
+nxm_execute_reg_move(const struct ofpact_reg_move *move,
+ struct flow *flow, struct flow_wildcards *wc)
{
- struct mf_subfield src, dst;
union mf_value src_value;
union mf_value dst_value;
- nxm_decode_discrete(&src, action->src, action->src_ofs, action->n_bits);
- nxm_decode_discrete(&dst, action->dst, action->dst_ofs, action->n_bits);
+ mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
+ mf_mask_field_and_prereqs(move->src.field, &wc->masks);
- mf_get_value(dst.field, flow, &dst_value);
- mf_get_value(src.field, flow, &src_value);
- bitwise_copy(&src_value, src.field->n_bytes, src.ofs,
- &dst_value, dst.field->n_bytes, dst.ofs,
- src.n_bits);
- mf_set_flow_value(dst.field, &dst_value, flow);
+ mf_get_value(move->dst.field, flow, &dst_value);
+ mf_get_value(move->src.field, flow, &src_value);
+ bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
+ &dst_value, move->dst.field->n_bytes, move->dst.ofs,
+ move->src.n_bits);
+ mf_set_flow_value(move->dst.field, &dst_value, flow);
+}
+
+void
+nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
+ struct flow_wildcards *wc)
+{
+ /* Since at the datapath interface we do not have set actions for
+ * individual fields, but larger sets of fields for a given protocol
+ * layer, the set action will in practice only ever apply to exactly
+ * matched flows for the given protocol layer. For example, if the
+ * reg_load changes the IP TTL, the corresponding datapath action will
+ * rewrite also the IP addresses and TOS byte. Since these other field
+ * values may not be explicitly set, they depend on the incoming flow field
+ * values, and are hence all of them are set in the wildcards masks, when
+ * the action is committed to the datapath. For the rare case, where the
+ * reg_load action does not actually change the value, and no other flow
+ * field values are set (or loaded), the datapath action is skipped, and
+ * no mask bits are set. Such a datapath flow should, however, be
+ * dependent on the specific field value, so the corresponding wildcard
+ * mask bits must be set, lest the datapath flow be applied to packets
+ * containing some other value in the field and the field value remain
+ * unchanged regardless of the incoming value.
+ *
+ * We set the masks here for the whole fields, and their prerequisities.
+ * Even if only the lower byte of a TCP destination port is set,
+ * we set the mask for the whole field, and also the ip_proto in the IP
+ * header, so that the kernel flow would not be applied on, e.g., a UDP
+ * packet, or any other IP protocol in addition to TCP packets.
+ */
+ mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
+ mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
}
void
-nxm_execute_reg_load(const struct nx_action_reg_load *action,
- struct flow *flow)
+nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
+ struct flow *flow, struct flow_wildcards *wc)
{
- struct mf_subfield dst;
+ union mf_subvalue src_subvalue;
+ union mf_subvalue mask_value;
+ ovs_be64 src_data_be = htonll(src_data);
- nxm_decode(&dst, action->dst, action->ofs_nbits);
- mf_set_subfield_value(&dst, ntohll(action->value), flow);
+ memset(&mask_value, 0xff, sizeof mask_value);
+ mf_write_subfield_flow(dst, &mask_value, &wc->masks);
+
+ bitwise_copy(&src_data_be, sizeof src_data_be, 0,
+ &src_subvalue, sizeof src_subvalue, 0,
+ sizeof src_data_be * 8);
+ mf_write_subfield_flow(dst, &src_subvalue, flow);
}
+\f
+/* nxm_parse_stack_action, works for both push() and pop(). */
-/* Initializes 'sf->field' with the field corresponding to the given NXM
- * 'header' and 'sf->ofs' and 'sf->n_bits' decoded from 'ofs_nbits' with
- * nxm_decode_ofs() and nxm_decode_n_bits(), respectively.
- *
- * Afterward, 'sf' might be invalid in a few different ways:
- *
- * - 'sf->field' will be NULL if 'header' is unknown.
- *
- * - 'sf->ofs' and 'sf->n_bits' might exceed the width of sf->field.
+/* Parses 's' as a "push" or "pop" action, in the form described in
+ * ovs-ofctl(8), into '*stack_action'.
*
- * The caller should call mf_check_src() or mf_check_dst() to check for these
- * problems. */
+ * Returns NULL if successful, otherwise a malloc()'d string describing the
+ * error. The caller is responsible for freeing the returned string. */
+char * WARN_UNUSED_RESULT
+nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
+{
+ char *error;
+
+ error = mf_parse_subfield__(&stack_action->subfield, &s);
+ if (error) {
+ return error;
+ }
+
+ if (*s != '\0') {
+ return xasprintf("%s: trailing garbage following push or pop", s);
+ }
+
+ return NULL;
+}
+
void
-nxm_decode(struct mf_subfield *sf, ovs_be32 header, ovs_be16 ofs_nbits)
+nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
{
- sf->field = mf_from_nxm_header(ntohl(header));
- sf->ofs = nxm_decode_ofs(ofs_nbits);
- sf->n_bits = nxm_decode_n_bits(ofs_nbits);
+ ds_put_cstr(s, "push:");
+ mf_format_subfield(&push->subfield, s);
}
-/* Initializes 'sf->field' with the field corresponding to the given NXM
- * 'header' and 'sf->ofs' and 'sf->n_bits' from 'ofs' and 'n_bits',
- * respectively.
- *
- * Afterward, 'sf' might be invalid in a few different ways:
- *
- * - 'sf->field' will be NULL if 'header' is unknown.
- *
- * - 'sf->ofs' and 'sf->n_bits' might exceed the width of sf->field.
- *
- * The caller should call mf_check_src() or mf_check_dst() to check for these
- * problems. */
void
-nxm_decode_discrete(struct mf_subfield *sf, ovs_be32 header,
- ovs_be16 ofs, ovs_be16 n_bits)
+nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
+{
+ ds_put_cstr(s, "pop:");
+ mf_format_subfield(&pop->subfield, s);
+}
+
+/* Common set for both push and pop actions. */
+static void
+stack_action_from_openflow__(const struct nx_action_stack *nasp,
+ struct ofpact_stack *stack_action)
+{
+ stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
+ stack_action->subfield.ofs = ntohs(nasp->offset);
+ stack_action->subfield.n_bits = ntohs(nasp->n_bits);
+}
+
+static void
+nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
+ struct nx_action_stack *nasp)
{
- sf->field = mf_from_nxm_header(ntohl(header));
- sf->ofs = ntohs(ofs);
- sf->n_bits = ntohs(n_bits);
+ nasp->offset = htons(stack_action->subfield.ofs);
+ nasp->n_bits = htons(stack_action->subfield.n_bits);
+ nasp->field = htonl(stack_action->subfield.field->nxm_header);
+}
+
+enum ofperr
+nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
+ struct ofpbuf *ofpacts)
+{
+ struct ofpact_stack *push;
+
+ push = ofpact_put_STACK_PUSH(ofpacts);
+ stack_action_from_openflow__(nasp, push);
+
+ return nxm_stack_push_check(push, NULL);
+}
+
+enum ofperr
+nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
+ struct ofpbuf *ofpacts)
+{
+ struct ofpact_stack *pop;
+
+ pop = ofpact_put_STACK_POP(ofpacts);
+ stack_action_from_openflow__(nasp, pop);
+
+ return nxm_stack_pop_check(pop, NULL);
+}
+
+enum ofperr
+nxm_stack_push_check(const struct ofpact_stack *push,
+ const struct flow *flow)
+{
+ return mf_check_src(&push->subfield, flow);
+}
+
+enum ofperr
+nxm_stack_pop_check(const struct ofpact_stack *pop,
+ const struct flow *flow)
+{
+ return mf_check_dst(&pop->subfield, flow);
+}
+
+void
+nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
+ struct ofpbuf *openflow)
+{
+ nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
+}
+
+void
+nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
+ struct ofpbuf *openflow)
+{
+ nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
+}
+
+/* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
+static void
+nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
+{
+ ofpbuf_put(stack, v, sizeof *v);
+}
+
+static union mf_subvalue *
+nx_stack_pop(struct ofpbuf *stack)
+{
+ union mf_subvalue *v = NULL;
+
+ if (stack->size) {
+ stack->size -= sizeof *v;
+ v = (union mf_subvalue *) ofpbuf_tail(stack);
+ }
+
+ return v;
+}
+
+void
+nxm_execute_stack_push(const struct ofpact_stack *push,
+ const struct flow *flow, struct flow_wildcards *wc,
+ struct ofpbuf *stack)
+{
+ union mf_subvalue mask_value;
+ union mf_subvalue dst_value;
+
+ memset(&mask_value, 0xff, sizeof mask_value);
+ mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
+
+ mf_read_subfield(&push->subfield, flow, &dst_value);
+ nx_stack_push(stack, &dst_value);
+}
+
+void
+nxm_execute_stack_pop(const struct ofpact_stack *pop,
+ struct flow *flow, struct flow_wildcards *wc,
+ struct ofpbuf *stack)
+{
+ union mf_subvalue *src_value;
+
+ src_value = nx_stack_pop(stack);
+
+ /* Only pop if stack is not empty. Otherwise, give warning. */
+ if (src_value) {
+ union mf_subvalue mask_value;
+
+ memset(&mask_value, 0xff, sizeof mask_value);
+ mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
+ mf_write_subfield_flow(&pop->subfield, src_value, flow);
+ } else {
+ if (!VLOG_DROP_WARN(&rl)) {
+ char *flow_str = flow_to_string(flow);
+ VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"
+ " %s", flow_str);
+ free(flow_str);
+ }
+ }
}