/*
- * Copyright (c) 2010 Nicira Networks.
+ * Copyright (c) 2010, 2011, 2012 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "nx-match.h"
+#include <netinet/icmp6.h>
+
#include "classifier.h"
#include "dynamic-string.h"
+#include "meta-flow.h"
+#include "ofp-actions.h"
+#include "ofp-errors.h"
#include "ofp-util.h"
#include "ofpbuf.h"
#include "openflow/nicira-ext.h"
#include "packets.h"
#include "unaligned.h"
+#include "util.h"
#include "vlog.h"
VLOG_DEFINE_THIS_MODULE(nx_match);
* peer and so there's not much point in showing a lot of them. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-enum {
- NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID),
- NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE),
- NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE),
- NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK),
- NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ),
- NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE),
- BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT)
-};
-
-/* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from
- * zero. */
-enum nxm_field_index {
-#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) NFI_NXM_##HEADER,
-#include "nx-match.def"
- N_NXM_FIELDS
-};
-
-struct nxm_field {
- struct hmap_node hmap_node;
- enum nxm_field_index index; /* NFI_* value. */
- uint32_t header; /* NXM_* value. */
- flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */
- ovs_be16 dl_type; /* dl_type prerequisite, if nonzero. */
- uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */
- const char *name; /* "NXM_*" string. */
-};
-
-/* All the known fields. */
-static struct nxm_field nxm_fields[N_NXM_FIELDS] = {
-#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
- { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \
- CONSTANT_HTONS(DL_TYPE), NW_PROTO, "NXM_" #HEADER },
-#include "nx-match.def"
-};
-
-/* Hash table of 'nxm_fields'. */
-static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields);
-
-/* Possible masks for NXM_OF_ETH_DST_W. */
-static const uint8_t eth_all_0s[ETH_ADDR_LEN]
- = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-static const uint8_t eth_all_1s[ETH_ADDR_LEN]
- = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-static const uint8_t eth_mcast_1[ETH_ADDR_LEN]
- = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
-static const uint8_t eth_mcast_0[ETH_ADDR_LEN]
- = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-static void
-nxm_init(void)
-{
- if (hmap_is_empty(&all_nxm_fields)) {
- int i;
-
- for (i = 0; i < N_NXM_FIELDS; i++) {
- struct nxm_field *f = &nxm_fields[i];
- hmap_insert(&all_nxm_fields, &f->hmap_node,
- hash_int(f->header, 0));
- }
-
- /* Verify that the header values are unique (duplicate "case" values
- * cause a compile error). */
- switch (0) {
-#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO) \
- case NXM_##HEADER: break;
-#include "nx-match.def"
- }
- }
-}
-
-static const struct nxm_field *
-nxm_field_lookup(uint32_t header)
-{
- struct nxm_field *f;
-
- nxm_init();
-
- HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0),
- &all_nxm_fields) {
- if (f->header == header) {
- return f;
- }
- }
-
- return NULL;
-}
-
/* Returns the width of the data for a field with the given 'header', in
* bytes. */
-static int
+int
nxm_field_bytes(uint32_t header)
{
unsigned int length = NXM_LENGTH(header);
/* Returns the width of the data for a field with the given 'header', in
* bits. */
-static int
+int
nxm_field_bits(uint32_t header)
{
return nxm_field_bytes(header) * 8;
\f
/* nx_pull_match() and helpers. */
-static int
-parse_nx_reg(const struct nxm_field *f,
- struct flow *flow, struct flow_wildcards *wc,
- const void *value, const void *maskp)
-{
- int idx = NXM_NX_REG_IDX(f->header);
- if (wc->reg_masks[idx]) {
- return NXM_DUP_TYPE;
- } else {
- flow_wildcards_set_reg_mask(wc, idx,
- (NXM_HASMASK(f->header)
- ? ntohl(get_unaligned_be32(maskp))
- : UINT32_MAX));
- flow->regs[idx] = ntohl(get_unaligned_be32(value));
- flow->regs[idx] &= wc->reg_masks[idx];
- return 0;
- }
-}
-
-static int
-parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f,
- const void *value, const void *mask)
-{
- struct flow_wildcards *wc = &rule->wc;
- struct flow *flow = &rule->flow;
-
- switch (f->index) {
- /* Metadata. */
- case NFI_NXM_OF_IN_PORT:
- flow->in_port = ntohs(get_unaligned_be16(value));
- if (flow->in_port == OFPP_LOCAL) {
- flow->in_port = ODPP_LOCAL;
- }
- return 0;
-
- /* Ethernet header. */
- case NFI_NXM_OF_ETH_DST:
- if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
- != (FWW_DL_DST | FWW_ETH_MCAST)) {
- return NXM_DUP_TYPE;
- } else {
- wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
- memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
- return 0;
- }
- case NFI_NXM_OF_ETH_DST_W:
- if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST))
- != (FWW_DL_DST | FWW_ETH_MCAST)) {
- return NXM_DUP_TYPE;
- } else if (eth_addr_equals(mask, eth_mcast_1)) {
- wc->wildcards &= ~FWW_ETH_MCAST;
- flow->dl_dst[0] = *(uint8_t *) value & 0x01;
- } else if (eth_addr_equals(mask, eth_mcast_0)) {
- wc->wildcards &= ~FWW_DL_DST;
- memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
- flow->dl_dst[0] &= 0xfe;
- } else if (eth_addr_equals(mask, eth_all_0s)) {
- return 0;
- } else if (eth_addr_equals(mask, eth_all_1s)) {
- wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
- memcpy(flow->dl_dst, value, ETH_ADDR_LEN);
- return 0;
- } else {
- return NXM_BAD_MASK;
- }
- case NFI_NXM_OF_ETH_SRC:
- memcpy(flow->dl_src, value, ETH_ADDR_LEN);
- return 0;
- case NFI_NXM_OF_ETH_TYPE:
- flow->dl_type = get_unaligned_be16(value);
- return 0;
-
- /* 802.1Q header. */
- case NFI_NXM_OF_VLAN_TCI:
- if (wc->vlan_tci_mask) {
- return NXM_DUP_TYPE;
- } else {
- cls_rule_set_dl_tci(rule, get_unaligned_be16(value));
- return 0;
- }
- case NFI_NXM_OF_VLAN_TCI_W:
- if (wc->vlan_tci_mask) {
- return NXM_DUP_TYPE;
- } else {
- cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value),
- get_unaligned_be16(mask));
- return 0;
- }
-
- /* IP header. */
- case NFI_NXM_OF_IP_TOS:
- if (*(uint8_t *) value & 0x03) {
- return NXM_BAD_VALUE;
- } else {
- flow->nw_tos = *(uint8_t *) value;
- return 0;
- }
- case NFI_NXM_OF_IP_PROTO:
- flow->nw_proto = *(uint8_t *) value;
- return 0;
-
- /* IP addresses in IP and ARP headers. */
- case NFI_NXM_OF_IP_SRC:
- case NFI_NXM_OF_ARP_SPA:
- if (wc->nw_src_mask) {
- return NXM_DUP_TYPE;
- } else {
- cls_rule_set_nw_src(rule, get_unaligned_be32(value));
- return 0;
- }
- case NFI_NXM_OF_IP_SRC_W:
- case NFI_NXM_OF_ARP_SPA_W:
- if (wc->nw_src_mask) {
- return NXM_DUP_TYPE;
- } else {
- ovs_be32 ip = get_unaligned_be32(value);
- ovs_be32 netmask = get_unaligned_be32(mask);
- if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) {
- return NXM_BAD_MASK;
- }
- return 0;
- }
- case NFI_NXM_OF_IP_DST:
- case NFI_NXM_OF_ARP_TPA:
- if (wc->nw_dst_mask) {
- return NXM_DUP_TYPE;
- } else {
- cls_rule_set_nw_dst(rule, get_unaligned_be32(value));
- return 0;
- }
- case NFI_NXM_OF_IP_DST_W:
- case NFI_NXM_OF_ARP_TPA_W:
- if (wc->nw_dst_mask) {
- return NXM_DUP_TYPE;
- } else {
- ovs_be32 ip = get_unaligned_be32(value);
- ovs_be32 netmask = get_unaligned_be32(mask);
- if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) {
- return NXM_BAD_MASK;
- }
- return 0;
- }
-
- /* TCP header. */
- case NFI_NXM_OF_TCP_SRC:
- flow->tp_src = get_unaligned_be16(value);
- return 0;
- case NFI_NXM_OF_TCP_DST:
- flow->tp_dst = get_unaligned_be16(value);
- return 0;
-
- /* UDP header. */
- case NFI_NXM_OF_UDP_SRC:
- flow->tp_src = get_unaligned_be16(value);
- return 0;
- case NFI_NXM_OF_UDP_DST:
- flow->tp_dst = get_unaligned_be16(value);
- return 0;
-
- /* ICMP header. */
- case NFI_NXM_OF_ICMP_TYPE:
- flow->tp_src = htons(*(uint8_t *) value);
- return 0;
- case NFI_NXM_OF_ICMP_CODE:
- flow->tp_dst = htons(*(uint8_t *) value);
- return 0;
-
- /* ARP header. */
- case NFI_NXM_OF_ARP_OP:
- if (ntohs(get_unaligned_be16(value)) > 255) {
- return NXM_BAD_VALUE;
- } else {
- flow->nw_proto = ntohs(get_unaligned_be16(value));
- return 0;
- }
-
- /* Tunnel ID. */
- case NFI_NXM_NX_TUN_ID:
- flow->tun_id = htonl(ntohll(get_unaligned_be64(value)));
- return 0;
-
- /* Registers. */
- case NFI_NXM_NX_REG0:
- case NFI_NXM_NX_REG0_W:
-#if FLOW_N_REGS >= 2
- case NFI_NXM_NX_REG1:
- case NFI_NXM_NX_REG1_W:
-#endif
-#if FLOW_N_REGS >= 3
- case NFI_NXM_NX_REG2:
- case NFI_NXM_NX_REG2_W:
-#endif
-#if FLOW_N_REGS >= 4
- case NFI_NXM_NX_REG3:
- case NFI_NXM_NX_REG3_W:
-#endif
-#if FLOW_N_REGS > 4
-#error
-#endif
- return parse_nx_reg(f, flow, wc, value, mask);
-
- case N_NXM_FIELDS:
- NOT_REACHED();
- }
- NOT_REACHED();
-}
-
-static bool
-nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow)
-{
- return (!field->dl_type
- || (field->dl_type == flow->dl_type
- && (!field->nw_proto || field->nw_proto == flow->nw_proto)));
-}
-
static uint32_t
nx_entry_ok(const void *p, unsigned int match_len)
{
if (match_len < 4) {
if (match_len) {
- VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header");
+ VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
+ match_len);
}
return 0;
}
return header;
}
-int
-nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority,
- struct cls_rule *rule)
+/* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
+ * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
+ * finds one, logs a warning. */
+static void
+check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
+{
+ unsigned int width = mf->n_bytes;
+ const uint8_t *value = p + 4;
+ const uint8_t *mask = p + 4 + width;
+ unsigned int i;
+
+ for (i = 0; i < width; i++) {
+ if (value[i] & ~mask[i]) {
+ if (!VLOG_DROP_WARN(&rl)) {
+ char *s = nx_match_to_string(p, width * 2 + 4);
+ VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
+ "bits wildcarded by the mask. (Future versions "
+ "of OVS may report this as an OpenFlow error.)",
+ s);
+ break;
+ }
+ }
+ }
+}
+
+static enum ofperr
+nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
+ struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
{
uint32_t header;
- uint8_t *p;
- p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
- if (!p) {
- VLOG_DBG_RL(&rl, "nx_match length %zu, rounded up to a "
- "multiple of 8, is longer than space in message (max "
- "length %zu)", match_len, b->size);
- return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
+ ovs_assert((cookie != NULL) == (cookie_mask != NULL));
+
+ match_init_catchall(match);
+ if (cookie) {
+ *cookie = *cookie_mask = htonll(0);
+ }
+ if (!match_len) {
+ return 0;
}
- cls_rule_init_catchall(rule, priority);
- while ((header = nx_entry_ok(p, match_len)) != 0) {
- unsigned length = NXM_LENGTH(header);
- const struct nxm_field *f;
- int error;
-
- f = nxm_field_lookup(header);
- if (!f) {
- error = NXM_BAD_TYPE;
- } else if (!nxm_prereqs_ok(f, &rule->flow)) {
- error = NXM_BAD_PREREQ;
- } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) {
- error = NXM_DUP_TYPE;
+ for (;
+ (header = nx_entry_ok(p, match_len)) != 0;
+ p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
+ const struct mf_field *mf;
+ enum ofperr error;
+
+ mf = mf_from_nxm_header(header);
+ if (!mf) {
+ if (strict) {
+ error = OFPERR_OFPBMC_BAD_FIELD;
+ } else {
+ continue;
+ }
+ } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
+ error = OFPERR_OFPBMC_BAD_PREREQ;
+ } else if (!mf_is_all_wild(mf, &match->wc)) {
+ error = OFPERR_OFPBMC_DUP_FIELD;
+ } else if (header != OXM_OF_IN_PORT) {
+ unsigned int width = mf->n_bytes;
+ union mf_value value;
+
+ memcpy(&value, p + 4, width);
+ if (!mf_is_value_valid(mf, &value)) {
+ error = OFPERR_OFPBMC_BAD_VALUE;
+ } else if (!NXM_HASMASK(header)) {
+ error = 0;
+ mf_set_value(mf, &value, match);
+ } else {
+ union mf_value mask;
+
+ memcpy(&mask, p + 4 + width, width);
+ if (!mf_is_mask_valid(mf, &mask)) {
+ error = OFPERR_OFPBMC_BAD_MASK;
+ } else {
+ error = 0;
+ check_mask_consistency(p, mf);
+ mf_set(mf, &value, &mask, match);
+ }
+ }
} else {
- /* 'hasmask' and 'length' are known to be correct at this point
- * because they are included in 'header' and nxm_field_lookup()
- * checked them already. */
- rule->wc.wildcards &= ~f->wildcard;
- error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2);
+ /* Special case for 32bit ports when using OXM,
+ * ports are 16 bits wide otherwise. */
+ ovs_be32 port_of11;
+ uint16_t port;
+
+ memcpy(&port_of11, p + 4, sizeof port_of11);
+ error = ofputil_port_from_ofp11(port_of11, &port);
+ if (!error) {
+ match_set_in_port(match, port);
+ }
}
+
+ /* Check if the match is for a cookie rather than a classifier rule. */
+ if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
+ if (*cookie_mask) {
+ error = OFPERR_OFPBMC_DUP_FIELD;
+ } else {
+ unsigned int width = sizeof *cookie;
+
+ memcpy(cookie, p + 4, width);
+ if (NXM_HASMASK(header)) {
+ memcpy(cookie_mask, p + 4 + width, width);
+ } else {
+ *cookie_mask = htonll(UINT64_MAX);
+ }
+ error = 0;
+ }
+ }
+
if (error) {
- VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", "
- "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" "
- "(error %x)",
+ VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
+ "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
+ "(%s)", header,
NXM_VENDOR(header), NXM_FIELD(header),
- NXM_HASMASK(header), NXM_TYPE(header),
- error);
+ NXM_HASMASK(header), NXM_LENGTH(header),
+ ofperr_to_string(error));
return error;
}
+ }
+ return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
+}
- p += 4 + length;
- match_len -= 4 + length;
+static enum ofperr
+nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
+ struct match *match,
+ ovs_be64 *cookie, ovs_be64 *cookie_mask)
+{
+ uint8_t *p = NULL;
+
+ if (match_len) {
+ p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
+ if (!p) {
+ VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
+ "multiple of 8, is longer than space in message (max "
+ "length %zu)", match_len, b->size);
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
}
- return match_len ? NXM_INVALID : 0;
+ return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
+}
+
+/* Parses the nx_match formatted match description in 'b' with length
+ * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
+ * are valid pointers, then stores the cookie and mask in them if 'b' contains
+ * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
+ *
+ * Fails with an error upon encountering an unknown NXM header.
+ *
+ * Returns 0 if successful, otherwise an OpenFlow error code. */
+enum ofperr
+nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
+ ovs_be64 *cookie, ovs_be64 *cookie_mask)
+{
+ return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
+}
+
+/* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
+ * instead of failing with an error. */
+enum ofperr
+nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
+ struct match *match,
+ ovs_be64 *cookie, ovs_be64 *cookie_mask)
+{
+ return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
+}
+
+static enum ofperr
+oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
+{
+ struct ofp11_match_header *omh = b->data;
+ uint8_t *p;
+ uint16_t match_len;
+
+ if (b->size < sizeof *omh) {
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ match_len = ntohs(omh->length);
+ if (match_len < sizeof *omh) {
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ if (omh->type != htons(OFPMT_OXM)) {
+ return OFPERR_OFPBMC_BAD_TYPE;
+ }
+
+ p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
+ if (!p) {
+ VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
+ "multiple of 8, is longer than space in message (max "
+ "length %zu)", match_len, b->size);
+ return OFPERR_OFPBMC_BAD_LEN;
+ }
+
+ return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
+ strict, match, NULL, NULL);
+}
+
+/* Parses the oxm formatted match description preceeded by a struct ofp11_match
+ * in 'b' with length 'match_len'. Stores the result in 'match'.
+ *
+ * Fails with an error when encountering unknown OXM headers.
+ *
+ * Returns 0 if successful, otherwise an OpenFlow error code. */
+enum ofperr
+oxm_pull_match(struct ofpbuf *b, struct match *match)
+{
+ return oxm_pull_match__(b, true, match);
+}
+
+/* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
+ * PXM headers instead of failing with an error when they are encountered. */
+enum ofperr
+oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
+{
+ return oxm_pull_match__(b, false, match);
}
\f
/* nx_put_match() and helpers.
ofpbuf_put(b, &value, sizeof value);
}
+static void
+nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
+{
+ switch (mask) {
+ case 0:
+ break;
+
+ case UINT8_MAX:
+ nxm_put_8(b, header, value);
+ break;
+
+ default:
+ nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
+ ofpbuf_put(b, &value, sizeof value);
+ ofpbuf_put(b, &mask, sizeof mask);
+ }
+}
+
static void
nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
{
ofpbuf_put(b, &value, sizeof value);
}
+static void
+nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
+{
+ nxm_put_header(b, header);
+ ofpbuf_put(b, &value, sizeof value);
+ ofpbuf_put(b, &mask, sizeof mask);
+}
+
+static void
+nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
+{
+ switch (mask) {
+ case 0:
+ break;
+
+ case CONSTANT_HTONLL(UINT64_MAX):
+ nxm_put_64(b, header, value);
+ break;
+
+ default:
+ nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
+ break;
+ }
+}
+
static void
nxm_put_eth(struct ofpbuf *b, uint32_t header,
const uint8_t value[ETH_ADDR_LEN])
}
static void
-nxm_put_eth_dst(struct ofpbuf *b,
- uint32_t wc, const uint8_t value[ETH_ADDR_LEN])
+nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
+ const uint8_t value[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
{
- switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) {
- case FWW_DL_DST | FWW_ETH_MCAST:
- break;
- case FWW_DL_DST:
- nxm_put_header(b, NXM_OF_ETH_DST_W);
- ofpbuf_put(b, value, ETH_ADDR_LEN);
- ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN);
+ if (!eth_addr_is_zero(mask)) {
+ if (eth_mask_is_exact(mask)) {
+ nxm_put_eth(b, header, value);
+ } else {
+ nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
+ ofpbuf_put(b, value, ETH_ADDR_LEN);
+ ofpbuf_put(b, mask, ETH_ADDR_LEN);
+ }
+ }
+}
+
+static void
+nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
+ const struct in6_addr *value, const struct in6_addr *mask)
+{
+ if (ipv6_mask_is_any(mask)) {
+ return;
+ } else if (ipv6_mask_is_exact(mask)) {
+ nxm_put_header(b, header);
+ ofpbuf_put(b, value, sizeof *value);
+ } else {
+ nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
+ ofpbuf_put(b, value, sizeof *value);
+ ofpbuf_put(b, mask, sizeof *mask);
+ }
+}
+
+static void
+nxm_put_frag(struct ofpbuf *b, const struct match *match)
+{
+ uint8_t nw_frag = match->flow.nw_frag;
+ uint8_t nw_frag_mask = match->wc.masks.nw_frag;
+
+ switch (nw_frag_mask) {
+ case 0:
break;
- case FWW_ETH_MCAST:
- nxm_put_header(b, NXM_OF_ETH_DST_W);
- ofpbuf_put(b, value, ETH_ADDR_LEN);
- ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN);
+
+ case FLOW_NW_FRAG_MASK:
+ nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
break;
- case 0:
- nxm_put_eth(b, NXM_OF_ETH_DST, value);
+
+ default:
+ nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
+ nw_frag_mask & FLOW_NW_FRAG_MASK);
break;
}
}
-int
-nx_put_match(struct ofpbuf *b, const struct cls_rule *cr)
+static void
+nxm_put_ip(struct ofpbuf *b, const struct match *match,
+ uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
+ bool oxm)
{
- const flow_wildcards_t wc = cr->wc.wildcards;
- const struct flow *flow = &cr->flow;
+ const struct flow *flow = &match->flow;
+
+ nxm_put_frag(b, match);
+
+ if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
+ nxm_put_8(b, oxm ? OXM_OF_IP_DSCP : NXM_OF_IP_TOS,
+ flow->nw_tos & IP_DSCP_MASK);
+ }
+
+ if (match->wc.masks.nw_tos & IP_ECN_MASK) {
+ nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
+ flow->nw_tos & IP_ECN_MASK);
+ }
+
+ if (!oxm && match->wc.masks.nw_ttl) {
+ nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
+ }
+
+ if (match->wc.masks.nw_proto) {
+ nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
+
+ if (flow->nw_proto == IPPROTO_TCP) {
+ nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
+ flow->tp_src, match->wc.masks.tp_src);
+ nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
+ flow->tp_dst, match->wc.masks.tp_dst);
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
+ flow->tp_src, match->wc.masks.tp_src);
+ nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
+ flow->tp_dst, match->wc.masks.tp_dst);
+ } else if (flow->nw_proto == icmp_proto) {
+ if (match->wc.masks.tp_src) {
+ nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
+ }
+ if (match->wc.masks.tp_dst) {
+ nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
+ }
+ }
+ }
+}
+
+/* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
+ * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
+ * Otherwise, 'cookie_mask' should be zero.
+ *
+ * This function can cause 'b''s data to be reallocated.
+ *
+ * Returns the number of bytes appended to 'b', excluding padding.
+ *
+ * If 'match' is a catch-all rule that matches every packet, then this function
+ * appends nothing to 'b' and returns 0. */
+static int
+nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask)
+{
+ const struct flow *flow = &match->flow;
const size_t start_len = b->size;
int match_len;
int i;
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+
/* Metadata. */
- if (!(wc & FWW_IN_PORT)) {
+ if (match->wc.masks.in_port) {
uint16_t in_port = flow->in_port;
- if (in_port == ODPP_LOCAL) {
- in_port = OFPP_LOCAL;
+ if (oxm) {
+ nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
+ } else {
+ nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
}
- nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port));
}
/* Ethernet. */
- nxm_put_eth_dst(b, wc, flow->dl_dst);
- if (!(wc & FWW_DL_SRC)) {
- nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src);
- }
- if (!(wc & FWW_DL_TYPE)) {
- nxm_put_16(b, NXM_OF_ETH_TYPE, flow->dl_type);
- }
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
+ flow->dl_src, match->wc.masks.dl_src);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
+ flow->dl_dst, match->wc.masks.dl_dst);
+ nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
+ ofputil_dl_type_to_openflow(flow->dl_type),
+ match->wc.masks.dl_type);
/* 802.1Q. */
- nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask);
+ if (oxm) {
+ ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
+ ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
+ ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
+
+ if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
+ nxm_put_16(b, OXM_OF_VLAN_VID, vid);
+ } else if (mask) {
+ nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
+ }
- /* L3. */
- if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) {
- /* IP. */
- if (!(wc & FWW_NW_TOS)) {
- nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc);
+ if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
+ nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
}
- nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask);
- nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask);
-
- if (!(wc & FWW_NW_PROTO)) {
- nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto);
- switch (flow->nw_proto) {
- /* TCP. */
- case IP_TYPE_TCP:
- if (!(wc & FWW_TP_SRC)) {
- nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src);
- }
- if (!(wc & FWW_TP_DST)) {
- nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst);
- }
- break;
- /* UDP. */
- case IP_TYPE_UDP:
- if (!(wc & FWW_TP_SRC)) {
- nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src);
- }
- if (!(wc & FWW_TP_DST)) {
- nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst);
- }
- break;
+ } else {
+ nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
+ match->wc.masks.vlan_tci);
+ }
- /* ICMP. */
- case IP_TYPE_ICMP:
- if (!(wc & FWW_TP_SRC)) {
- nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src));
- }
- if (!(wc & FWW_TP_DST)) {
- nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst));
- }
- break;
+ /* MPLS. */
+ if (eth_type_mpls(flow->dl_type)) {
+ if (match->wc.masks.mpls_lse & htonl(MPLS_TC_MASK)) {
+ nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse));
+ }
+
+ if (match->wc.masks.mpls_lse & htonl(MPLS_BOS_MASK)) {
+ nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse));
+ }
+
+ if (match->wc.masks.mpls_lse & htonl(MPLS_LABEL_MASK)) {
+ nxm_put_32(b, OXM_OF_MPLS_LABEL,
+ htonl(mpls_lse_to_label(flow->mpls_lse)));
+ }
+ }
+
+ /* L3. */
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ /* IP. */
+ nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
+ flow->nw_src, match->wc.masks.nw_src);
+ nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
+ flow->nw_dst, match->wc.masks.nw_dst);
+ nxm_put_ip(b, match, IPPROTO_ICMP,
+ oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
+ oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ /* IPv6. */
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
+ &flow->ipv6_src, &match->wc.masks.ipv6_src);
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
+ &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
+ nxm_put_ip(b, match, IPPROTO_ICMPV6,
+ oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
+ oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
+
+ nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
+ flow->ipv6_label, match->wc.masks.ipv6_label);
+
+ if (flow->nw_proto == IPPROTO_ICMPV6
+ && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
+ flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
+ nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
+ &flow->nd_target, &match->wc.masks.nd_target);
+ if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
+ flow->arp_sha, match->wc.masks.arp_sha);
+ }
+ if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
+ nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
+ flow->arp_tha, match->wc.masks.arp_tha);
}
}
- } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) {
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
/* ARP. */
- if (!(wc & FWW_NW_PROTO)) {
- nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto));
+ if (match->wc.masks.nw_proto) {
+ nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
+ htons(flow->nw_proto));
}
- nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask);
- nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask);
+ nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
+ flow->nw_src, match->wc.masks.nw_src);
+ nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
+ flow->nw_dst, match->wc.masks.nw_dst);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
+ flow->arp_sha, match->wc.masks.arp_sha);
+ nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
+ flow->arp_tha, match->wc.masks.arp_tha);
}
/* Tunnel ID. */
- if (!(wc & FWW_TUN_ID)) {
- nxm_put_64(b, NXM_NX_TUN_ID, htonll(ntohl(flow->tun_id)));
- }
+ nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
+ flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
/* Registers. */
for (i = 0; i < FLOW_N_REGS; i++) {
nxm_put_32m(b, NXM_NX_REG(i),
- htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i]));
+ htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
}
+ /* OpenFlow 1.1+ Metadata. */
+ nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
+
+ /* Cookie. */
+ nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
+
match_len = b->size - start_len;
+ return match_len;
+}
+
+/* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
+ * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
+ * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
+ * Otherwise, 'cookie_mask' should be zero.
+ *
+ * This function can cause 'b''s data to be reallocated.
+ *
+ * Returns the number of bytes appended to 'b', excluding padding. The return
+ * value can be zero if it appended nothing at all to 'b' (which happens if
+ * 'cr' is a catch-all rule that matches every packet). */
+int
+nx_put_match(struct ofpbuf *b, const struct match *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask)
+{
+ int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
+
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
}
+
+
+/* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
+ * expresses 'cr', plus enough zero bytes to pad the data appended out to a
+ * multiple of 8.
+ *
+ * This function can cause 'b''s data to be reallocated.
+ *
+ * Returns the number of bytes appended to 'b', excluding the padding. Never
+ * returns zero. */
+int
+oxm_put_match(struct ofpbuf *b, const struct match *match)
+{
+ int match_len;
+ struct ofp11_match_header *omh;
+ size_t start_len = b->size;
+ ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
+
+ ofpbuf_put_uninit(b, sizeof *omh);
+ match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
+ ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
+
+ omh = (struct ofp11_match_header *)((char *)b->data + start_len);
+ omh->type = htons(OFPMT_OXM);
+ omh->length = htons(match_len);
+
+ return match_len;
+}
\f
/* nx_match_to_string() and helpers. */
return ds_steal_cstr(&s);
}
+char *
+oxm_match_to_string(const uint8_t *p, unsigned int match_len)
+{
+ const struct ofp11_match_header *omh = (struct ofp11_match_header *)p;
+ uint16_t match_len_;
+ struct ds s;
+
+ ds_init(&s);
+
+ if (match_len < sizeof *omh) {
+ ds_put_format(&s, "<match too short: %u>", match_len);
+ goto err;
+ }
+
+ if (omh->type != htons(OFPMT_OXM)) {
+ ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
+ goto err;
+ }
+
+ match_len_ = ntohs(omh->length);
+ if (match_len_ < sizeof *omh) {
+ ds_put_format(&s, "<match length field too short: %u>", match_len_);
+ goto err;
+ }
+
+ if (match_len_ != match_len) {
+ ds_put_format(&s, "<match length field incorrect: %u != %u>",
+ match_len_, match_len);
+ goto err;
+ }
+
+ return nx_match_to_string(p + sizeof *omh, match_len - sizeof *omh);
+
+err:
+ return ds_steal_cstr(&s);
+}
+
static void
format_nxm_field_name(struct ds *s, uint32_t header)
{
- const struct nxm_field *f = nxm_field_lookup(header);
- if (f) {
- ds_put_cstr(s, f->name);
+ const struct mf_field *mf = mf_from_nxm_header(header);
+ if (mf) {
+ ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
+ if (NXM_HASMASK(header)) {
+ ds_put_cstr(s, "_W");
+ }
+ } else if (header == NXM_NX_COOKIE) {
+ ds_put_cstr(s, "NXM_NX_COOKIE");
+ } else if (header == NXM_NX_COOKIE_W) {
+ ds_put_cstr(s, "NXM_NX_COOKIE_W");
} else {
ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
}
static uint32_t
parse_nxm_field_name(const char *name, int name_len)
{
- const struct nxm_field *f;
+ bool wild;
+ int i;
/* Check whether it's a field name. */
- for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) {
- if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') {
- return f->header;
+ wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
+ if (wild) {
+ name_len -= 2;
+ }
+
+ for (i = 0; i < MFF_N_IDS; i++) {
+ const struct mf_field *mf = mf_from_id(i);
+ uint32_t header;
+
+ if (mf->nxm_name &&
+ !strncmp(mf->nxm_name, name, name_len) &&
+ mf->nxm_name[name_len] == '\0') {
+ header = mf->nxm_header;
+ } else if (mf->oxm_name &&
+ !strncmp(mf->oxm_name, name, name_len) &&
+ mf->oxm_name[name_len] == '\0') {
+ header = mf->oxm_header;
+ } else {
+ continue;
+ }
+
+ if (!wild) {
+ return header;
+ } else if (mf->maskable != MFM_NONE) {
+ return NXM_MAKE_WILD_HEADER(header);
+ }
+ }
+
+ if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
+ (name_len == strlen("NXM_NX_COOKIE"))) {
+ if (!wild) {
+ return NXM_NX_COOKIE;
+ } else {
+ return NXM_NX_COOKIE_W;
}
}
return 0;
}
-
-static const char *
-parse_hex_bytes(struct ofpbuf *b, const char *s, unsigned int n)
-{
- while (n--) {
- uint8_t byte;
- bool ok;
-
- s += strspn(s, " ");
- byte = hexits_value(s, 2, &ok);
- if (!ok) {
- ovs_fatal(0, "%.2s: hex digits expected", s);
- }
-
- ofpbuf_put(b, &byte, 1);
- s += 2;
- }
- return s;
-}
\f
/* nx_match_from_string(). */
-int
-nx_match_from_string(const char *s, struct ofpbuf *b)
+static int
+nx_match_from_string_raw(const char *s, struct ofpbuf *b)
{
const char *full_s = s;
const size_t start_len = b->size;
- int match_len;
if (!strcmp(s, "<any>")) {
/* Ensure that 'b->data' isn't actually null. */
const char *name;
uint32_t header;
int name_len;
+ size_t n;
name = s;
name_len = strcspn(s, "(");
s += name_len + 1;
nxm_put_header(b, header);
- s = parse_hex_bytes(b, s, nxm_field_bytes(header));
+ s = ofpbuf_put_hex(b, s, &n);
+ if (n != nxm_field_bytes(header)) {
+ ovs_fatal(0, "%.2s: hex digits expected", s);
+ }
if (NXM_HASMASK(header)) {
s += strspn(s, " ");
if (*s != '/') {
ovs_fatal(0, "%s: missing / in masked field %.*s",
full_s, name_len, name);
}
- s = parse_hex_bytes(b, s + 1, nxm_field_bytes(header));
+ s = ofpbuf_put_hex(b, s + 1, &n);
+ if (n != nxm_field_bytes(header)) {
+ ovs_fatal(0, "%.2s: hex digits expected", s);
+ }
}
s += strspn(s, " ");
s++;
}
- match_len = b->size - start_len;
+ return b->size - start_len;
+}
+
+int
+nx_match_from_string(const char *s, struct ofpbuf *b)
+{
+ int match_len = nx_match_from_string_raw(s, b);
ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
return match_len;
}
-\f
-static const char *
-parse_nxm_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp)
+
+int
+oxm_match_from_string(const char *s, struct ofpbuf *b)
{
- const char *full_s = s;
- const char *name;
- uint32_t header;
- int start, end;
- int name_len;
- int width;
-
- name = s;
- name_len = strcspn(s, "[");
- if (s[name_len] != '[') {
- ovs_fatal(0, "%s: missing [ looking for field name", full_s);
- }
+ int match_len;
+ struct ofp11_match_header *omh;
+ size_t start_len = b->size;
- header = parse_nxm_field_name(name, name_len);
- if (!header) {
- ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
- }
- width = nxm_field_bits(header);
-
- s += name_len;
- if (sscanf(s, "[%d..%d]", &start, &end) == 2) {
- /* Nothing to do. */
- } else if (sscanf(s, "[%d]", &start) == 1) {
- end = start;
- } else if (!strncmp(s, "[]", 2)) {
- start = 0;
- end = width - 1;
- } else {
- ovs_fatal(0, "%s: syntax error expecting [] or [<bit>] or "
- "[<start>..<end>]", full_s);
- }
- s = strchr(s, ']') + 1;
-
- if (start > end) {
- ovs_fatal(0, "%s: starting bit %d is after ending bit %d",
- full_s, start, end);
- } else if (start >= width) {
- ovs_fatal(0, "%s: starting bit %d is not valid because field is only "
- "%d bits wide", full_s, start, width);
- } else if (end >= width){
- ovs_fatal(0, "%s: ending bit %d is not valid because field is only "
- "%d bits wide", full_s, end, width);
- }
+ ofpbuf_put_uninit(b, sizeof *omh);
+ match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
+ ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
- *headerp = header;
- *ofsp = start;
- *n_bitsp = end - start + 1;
+ omh = (struct ofp11_match_header *)((char *)b->data + start_len);
+ omh->type = htons(OFPMT_OXM);
+ omh->length = htons(match_len);
- return s;
+ return match_len;
}
-
+\f
void
-nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s)
+nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
{
const char *full_s = s;
- uint32_t src, dst;
- int src_ofs, dst_ofs;
- int src_n_bits, dst_n_bits;
- s = parse_nxm_field_bits(s, &src, &src_ofs, &src_n_bits);
+ s = mf_parse_subfield(&move->src, s);
if (strncmp(s, "->", 2)) {
ovs_fatal(0, "%s: missing `->' following source", full_s);
}
s += 2;
- s = parse_nxm_field_bits(s, &dst, &dst_ofs, &dst_n_bits);
+ s = mf_parse_subfield(&move->dst, s);
if (*s != '\0') {
ovs_fatal(0, "%s: trailing garbage following destination", full_s);
}
- if (src_n_bits != dst_n_bits) {
+ if (move->src.n_bits != move->dst.n_bits) {
ovs_fatal(0, "%s: source field is %d bits wide but destination is "
- "%d bits wide", full_s, src_n_bits, dst_n_bits);
+ "%d bits wide", full_s,
+ move->src.n_bits, move->dst.n_bits);
}
-
- move->type = htons(OFPAT_VENDOR);
- move->len = htons(sizeof *move);
- move->vendor = htonl(NX_VENDOR_ID);
- move->subtype = htons(NXAST_REG_MOVE);
- move->n_bits = htons(src_n_bits);
- move->src_ofs = htons(src_ofs);
- move->dst_ofs = htons(dst_ofs);
- move->src = htonl(src);
- move->dst = htonl(dst);
}
void
-nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s)
+nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
{
const char *full_s = s;
- uint32_t dst;
- int ofs, n_bits;
- uint64_t value;
+ uint64_t value = strtoull(s, (char **) &s, 0);
- value = strtoull(s, (char **) &s, 0);
if (strncmp(s, "->", 2)) {
ovs_fatal(0, "%s: missing `->' following value", full_s);
}
s += 2;
- s = parse_nxm_field_bits(s, &dst, &ofs, &n_bits);
+ s = mf_parse_subfield(&load->dst, s);
if (*s != '\0') {
ovs_fatal(0, "%s: trailing garbage following destination", full_s);
}
- if (n_bits < 64 && (value >> n_bits) != 0) {
- ovs_fatal(0, "%s: value %llu does not fit into %d bits",
- full_s, value, n_bits);
+ if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
+ ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits",
+ full_s, value, load->dst.n_bits);
}
- load->type = htons(OFPAT_VENDOR);
- load->len = htons(sizeof *load);
- load->vendor = htonl(NX_VENDOR_ID);
- load->subtype = htons(NXAST_REG_LOAD);
- load->ofs_nbits = htons((ofs << 6) | (n_bits - 1));
- load->dst = htonl(dst);
- load->value = htonll(value);
+ load->subvalue.be64[0] = htonll(0);
+ load->subvalue.be64[1] = htonll(value);
}
\f
/* nxm_format_reg_move(), nxm_format_reg_load(). */
-static void
-format_nxm_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits)
+void
+nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
{
- format_nxm_field_name(s, header);
- if (n_bits != 1) {
- ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1);
- } else {
- ds_put_format(s, "[%d]", ofs);
- }
+ ds_put_format(s, "move:");
+ mf_format_subfield(&move->src, s);
+ ds_put_cstr(s, "->");
+ mf_format_subfield(&move->dst, s);
}
-void
-nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s)
+static void
+set_field_format(const struct ofpact_reg_load *load, struct ds *s)
{
- int n_bits = ntohs(move->n_bits);
- int src_ofs = ntohs(move->src_ofs);
- int dst_ofs = ntohs(move->dst_ofs);
- uint32_t src = ntohl(move->src);
- uint32_t dst = ntohl(move->dst);
+ const struct mf_field *mf = load->dst.field;
+ union mf_value value;
+
+ ovs_assert(load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD);
+ ds_put_format(s, "set_field:");
+ memset(&value, 0, sizeof value);
+ bitwise_copy(&load->subvalue, sizeof load->subvalue, 0,
+ &value, mf->n_bytes, 0, load->dst.n_bits);
+ mf_format(mf, &value, NULL, s);
+ ds_put_format(s, "->%s", mf->name);
+}
- ds_put_format(s, "move:");
- format_nxm_field_bits(s, src, src_ofs, n_bits);
+static void
+load_format(const struct ofpact_reg_load *load, struct ds *s)
+{
+ ds_put_cstr(s, "load:");
+ mf_format_subvalue(&load->subvalue, s);
ds_put_cstr(s, "->");
- format_nxm_field_bits(s, dst, dst_ofs, n_bits);
+ mf_format_subfield(&load->dst, s);
}
void
-nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s)
+nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
{
- uint16_t ofs_nbits = ntohs(load->ofs_nbits);
- int ofs = ofs_nbits >> 6;
- int n_bits = (ofs_nbits & 0x3f) + 1;
- uint32_t dst = ntohl(load->dst);
- uint64_t value = ntohll(load->value);
-
- ds_put_format(s, "load:%"PRIu64"->", value);
- format_nxm_field_bits(s, dst, ofs, n_bits);
+ if (load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD) {
+ set_field_format(load, s);
+ } else {
+ load_format(load, s);
+ }
}
\f
-/* nxm_check_reg_move(), nxm_check_reg_load(). */
-
-static bool
-field_ok(const struct nxm_field *f, const struct flow *flow, int size)
+enum ofperr
+nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
+ struct ofpbuf *ofpacts)
{
- return (f && !NXM_HASMASK(f->header)
- && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header));
+ struct ofpact_reg_move *move;
+
+ move = ofpact_put_REG_MOVE(ofpacts);
+ move->src.field = mf_from_nxm_header(ntohl(narm->src));
+ move->src.ofs = ntohs(narm->src_ofs);
+ move->src.n_bits = ntohs(narm->n_bits);
+ move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
+ move->dst.ofs = ntohs(narm->dst_ofs);
+ move->dst.n_bits = ntohs(narm->n_bits);
+
+ return nxm_reg_move_check(move, NULL);
}
-int
-nxm_check_reg_move(const struct nx_action_reg_move *action,
- const struct flow *flow)
+enum ofperr
+nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
+ struct ofpbuf *ofpacts)
{
- const struct nxm_field *src;
- const struct nxm_field *dst;
-
- if (action->n_bits == htons(0)) {
- return BAD_ARGUMENT;
+ struct ofpact_reg_load *load;
+
+ load = ofpact_put_REG_LOAD(ofpacts);
+ load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
+ load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
+ load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
+ load->subvalue.be64[1] = narl->value;
+
+ /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
+ * narl->value. */
+ if (load->dst.n_bits < 64 &&
+ ntohll(narl->value) >> load->dst.n_bits) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
}
- src = nxm_field_lookup(ntohl(action->src));
- if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) {
- return BAD_ARGUMENT;
+ return nxm_reg_load_check(load, NULL);
+}
+
+enum ofperr
+nxm_reg_load_from_openflow12_set_field(
+ const struct ofp12_action_set_field * oasf, struct ofpbuf *ofpacts)
+{
+ uint16_t oasf_len = ntohs(oasf->len);
+ uint32_t oxm_header = ntohl(oasf->dst);
+ uint8_t oxm_length = NXM_LENGTH(oxm_header);
+ struct ofpact_reg_load *load;
+ const struct mf_field *mf;
+
+ /* ofp12_action_set_field is padded to 64 bits by zero */
+ if (oasf_len != ROUND_UP(sizeof(*oasf) + oxm_length, 8)) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
+ }
+ if (!is_all_zeros((const uint8_t *)(oasf) + sizeof *oasf + oxm_length,
+ oasf_len - oxm_length - sizeof *oasf)) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
}
- dst = nxm_field_lookup(ntohl(action->dst));
- if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) {
- return BAD_ARGUMENT;
+ if (NXM_HASMASK(oxm_header)) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
+ }
+ mf = mf_from_nxm_header(oxm_header);
+ if (!mf) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
}
+ load = ofpact_put_REG_LOAD(ofpacts);
+ ofpact_set_field_init(load, mf, oasf + 1);
+
+ return nxm_reg_load_check(load, NULL);
+}
+\f
+enum ofperr
+nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
+{
+ enum ofperr error;
- if (!NXM_IS_NX_REG(dst->header)
- && dst->header != NXM_OF_VLAN_TCI
- && dst->header != NXM_NX_TUN_ID) {
- return BAD_ARGUMENT;
+ error = mf_check_src(&move->src, flow);
+ if (error) {
+ return error;
}
- return 0;
+ return mf_check_dst(&move->dst, NULL);
}
-int
-nxm_check_reg_load(const struct nx_action_reg_load *action,
- const struct flow *flow)
+enum ofperr
+nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
{
- const struct nxm_field *dst;
- int ofs, n_bits;
+ return mf_check_dst(&load->dst, flow);
+}
+\f
+void
+nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
+ struct ofpbuf *openflow)
+{
+ struct nx_action_reg_move *narm;
+
+ narm = ofputil_put_NXAST_REG_MOVE(openflow);
+ narm->n_bits = htons(move->dst.n_bits);
+ narm->src_ofs = htons(move->src.ofs);
+ narm->dst_ofs = htons(move->dst.ofs);
+ narm->src = htonl(move->src.field->nxm_header);
+ narm->dst = htonl(move->dst.field->nxm_header);
+}
- ofs = ntohs(action->ofs_nbits) >> 6;
- n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
- dst = nxm_field_lookup(ntohl(action->dst));
- if (!field_ok(dst, flow, ofs + n_bits)) {
- return BAD_ARGUMENT;
- }
+static void
+reg_load_to_nxast(const struct ofpact_reg_load *load, struct ofpbuf *openflow)
+{
+ struct nx_action_reg_load *narl;
- /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in
- * action->value. */
- if (n_bits < 64 && ntohll(action->value) >> n_bits) {
- return BAD_ARGUMENT;
- }
+ narl = ofputil_put_NXAST_REG_LOAD(openflow);
+ narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
+ narl->dst = htonl(load->dst.field->nxm_header);
+ narl->value = load->subvalue.be64[1];
+}
- if (!NXM_IS_NX_REG(dst->header)) {
- return BAD_ARGUMENT;
- }
+static void
+set_field_to_ofast(const struct ofpact_reg_load *load,
+ struct ofpbuf *openflow)
+{
+ const struct mf_field *mf = load->dst.field;
+ uint16_t padded_value_len = ROUND_UP(mf->n_bytes, 8);
+ struct ofp12_action_set_field *oasf;
+ char *value;
+
+ /* Set field is the only action of variable length (so far),
+ * so handling the variable length portion is open-coded here */
+ oasf = ofputil_put_OFPAT12_SET_FIELD(openflow);
+ oasf->dst = htonl(mf->oxm_header);
+ oasf->len = htons(ntohs(oasf->len) + padded_value_len);
+
+ value = ofpbuf_put_zeros(openflow, padded_value_len);
+ bitwise_copy(&load->subvalue, sizeof load->subvalue, load->dst.ofs,
+ value, mf->n_bytes, load->dst.ofs, load->dst.n_bits);
+}
- return 0;
+void
+nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
+ struct ofpbuf *openflow)
+{
+
+ if (load->ofpact.compat == OFPUTIL_OFPAT12_SET_FIELD) {
+ struct ofp_header *oh = (struct ofp_header *)openflow->l2;
+
+ switch(oh->version) {
+ case OFP13_VERSION:
+ case OFP12_VERSION:
+ set_field_to_ofast(load, openflow);
+ break;
+
+ case OFP11_VERSION:
+ case OFP10_VERSION:
+ if (load->dst.n_bits < 64) {
+ reg_load_to_nxast(load, openflow);
+ } else {
+ /* Split into 64bit chunks */
+ int chunk, ofs;
+ for (ofs = 0; ofs < load->dst.n_bits; ofs += chunk) {
+ struct ofpact_reg_load subload = *load;
+
+ chunk = MIN(load->dst.n_bits - ofs, 64);
+
+ subload.dst.field = load->dst.field;
+ subload.dst.ofs = load->dst.ofs + ofs;
+ subload.dst.n_bits = chunk;
+ bitwise_copy(&load->subvalue, sizeof load->subvalue, ofs,
+ &subload.subvalue, sizeof subload.subvalue, 0,
+ chunk);
+ reg_load_to_nxast(&subload, openflow);
+ }
+ }
+ break;
+
+ default:
+ NOT_REACHED();
+ }
+ } else {
+ reg_load_to_nxast(load, openflow);
+ }
}
\f
/* nxm_execute_reg_move(), nxm_execute_reg_load(). */
-static uint64_t
-nxm_read_field(const struct nxm_field *src, const struct flow *flow)
-{
- switch (src->index) {
- case NFI_NXM_OF_IN_PORT:
- return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port;
-
- case NFI_NXM_OF_ETH_DST:
- return eth_addr_to_uint64(flow->dl_dst);
-
- case NFI_NXM_OF_ETH_SRC:
- return eth_addr_to_uint64(flow->dl_src);
-
- case NFI_NXM_OF_ETH_TYPE:
- return ntohs(flow->dl_type);
-
- case NFI_NXM_OF_VLAN_TCI:
- return ntohs(flow->vlan_tci);
-
- case NFI_NXM_OF_IP_TOS:
- return flow->nw_tos;
-
- case NFI_NXM_OF_IP_PROTO:
- case NFI_NXM_OF_ARP_OP:
- return flow->nw_proto;
-
- case NFI_NXM_OF_IP_SRC:
- case NFI_NXM_OF_ARP_SPA:
- return ntohl(flow->nw_src);
-
- case NFI_NXM_OF_IP_DST:
- case NFI_NXM_OF_ARP_TPA:
- return ntohl(flow->nw_dst);
-
- case NFI_NXM_OF_TCP_SRC:
- case NFI_NXM_OF_UDP_SRC:
- return ntohs(flow->tp_src);
-
- case NFI_NXM_OF_TCP_DST:
- case NFI_NXM_OF_UDP_DST:
- return ntohs(flow->tp_dst);
-
- case NFI_NXM_OF_ICMP_TYPE:
- return ntohs(flow->tp_src) & 0xff;
-
- case NFI_NXM_OF_ICMP_CODE:
- return ntohs(flow->tp_dst) & 0xff;
-
- case NFI_NXM_NX_TUN_ID:
- return ntohl(flow->tun_id);
-
-#define NXM_READ_REGISTER(IDX) \
- case NFI_NXM_NX_REG##IDX: \
- return flow->regs[IDX]; \
- case NFI_NXM_NX_REG##IDX##_W: \
- NOT_REACHED();
-
- NXM_READ_REGISTER(0);
-#if FLOW_N_REGS >= 2
- NXM_READ_REGISTER(1);
-#endif
-#if FLOW_N_REGS >= 3
- NXM_READ_REGISTER(2);
-#endif
-#if FLOW_N_REGS >= 4
- NXM_READ_REGISTER(3);
-#endif
-#if FLOW_N_REGS > 4
-#error
-#endif
-
- case NFI_NXM_OF_ETH_DST_W:
- case NFI_NXM_OF_VLAN_TCI_W:
- case NFI_NXM_OF_IP_SRC_W:
- case NFI_NXM_OF_IP_DST_W:
- case NFI_NXM_OF_ARP_SPA_W:
- case NFI_NXM_OF_ARP_TPA_W:
- case N_NXM_FIELDS:
- NOT_REACHED();
+void
+nxm_execute_reg_move(const struct ofpact_reg_move *move,
+ struct flow *flow)
+{
+ union mf_value src_value;
+ union mf_value dst_value;
+
+ mf_get_value(move->dst.field, flow, &dst_value);
+ mf_get_value(move->src.field, flow, &src_value);
+ bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
+ &dst_value, move->dst.field->n_bytes, move->dst.ofs,
+ move->src.n_bits);
+ mf_set_flow_value(move->dst.field, &dst_value, flow);
+}
+
+void
+nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow)
+{
+ mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
+}
+
+void
+nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
+ struct flow *flow)
+{
+ union mf_subvalue src_subvalue;
+ ovs_be64 src_data_be = htonll(src_data);
+
+ bitwise_copy(&src_data_be, sizeof src_data_be, 0,
+ &src_subvalue, sizeof src_subvalue, 0,
+ sizeof src_data_be * 8);
+ mf_write_subfield_flow(dst, &src_subvalue, flow);
+}
+\f
+/* nxm_parse_stack_action, works for both push() and pop(). */
+void
+nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
+{
+ s = mf_parse_subfield(&stack_action->subfield, s);
+ if (*s != '\0') {
+ ovs_fatal(0, "%s: trailing garbage following push or pop", s);
}
+}
- NOT_REACHED();
+void
+nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
+{
+ ds_put_cstr(s, "push:");
+ mf_format_subfield(&push->subfield, s);
}
void
-nxm_execute_reg_move(const struct nx_action_reg_move *action,
- struct flow *flow)
+nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
{
- /* Preparation. */
- int n_bits = ntohs(action->n_bits);
- uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1;
-
- /* Get the interesting bits of the source field. */
- const struct nxm_field *src = nxm_field_lookup(ntohl(action->src));
- int src_ofs = ntohs(action->src_ofs);
- uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs);
-
- /* Get the remaining bits of the destination field. */
- const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst));
- int dst_ofs = ntohs(action->dst_ofs);
- uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs);
-
- /* Get the final value. */
- uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs);
-
- /* Store the result. */
- if (NXM_IS_NX_REG(dst->header)) {
- flow->regs[NXM_NX_REG_IDX(dst->header)] = new_data;
- } else if (dst->header == NXM_OF_VLAN_TCI) {
- flow->vlan_tci = htons(new_data);
- } else if (dst->header == NXM_NX_TUN_ID) {
- flow->tun_id = htonl(new_data);
- } else {
- NOT_REACHED();
+ ds_put_cstr(s, "pop:");
+ mf_format_subfield(&pop->subfield, s);
+}
+
+/* Common set for both push and pop actions. */
+static void
+stack_action_from_openflow__(const struct nx_action_stack *nasp,
+ struct ofpact_stack *stack_action)
+{
+ stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
+ stack_action->subfield.ofs = ntohs(nasp->offset);
+ stack_action->subfield.n_bits = ntohs(nasp->n_bits);
+}
+
+static void
+nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
+ struct nx_action_stack *nasp)
+{
+ nasp->offset = htons(stack_action->subfield.ofs);
+ nasp->n_bits = htons(stack_action->subfield.n_bits);
+ nasp->field = htonl(stack_action->subfield.field->nxm_header);
+}
+
+enum ofperr
+nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
+ struct ofpbuf *ofpacts)
+{
+ struct ofpact_stack *push;
+
+ push = ofpact_put_STACK_PUSH(ofpacts);
+ stack_action_from_openflow__(nasp, push);
+
+ return nxm_stack_push_check(push, NULL);
+}
+
+enum ofperr
+nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
+ struct ofpbuf *ofpacts)
+{
+ struct ofpact_stack *pop;
+
+ pop = ofpact_put_STACK_POP(ofpacts);
+ stack_action_from_openflow__(nasp, pop);
+
+ return nxm_stack_pop_check(pop, NULL);
+}
+
+enum ofperr
+nxm_stack_push_check(const struct ofpact_stack *push,
+ const struct flow *flow)
+{
+ return mf_check_src(&push->subfield, flow);
+}
+
+enum ofperr
+nxm_stack_pop_check(const struct ofpact_stack *pop,
+ const struct flow *flow)
+{
+ return mf_check_dst(&pop->subfield, flow);
+}
+
+void
+nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
+ struct ofpbuf *openflow)
+{
+ nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
+}
+
+void
+nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
+ struct ofpbuf *openflow)
+{
+ nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
+}
+
+/* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
+static void
+nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
+{
+ ofpbuf_put(stack, v, sizeof *v);
+}
+
+static union mf_subvalue *
+nx_stack_pop(struct ofpbuf *stack)
+{
+ union mf_subvalue *v = NULL;
+
+ if (stack->size) {
+ stack->size -= sizeof *v;
+ v = (union mf_subvalue *) ofpbuf_tail(stack);
}
+
+ return v;
}
void
-nxm_execute_reg_load(const struct nx_action_reg_load *action,
- struct flow *flow)
+nxm_execute_stack_push(const struct ofpact_stack *push,
+ const struct flow *flow, struct ofpbuf *stack)
{
- /* Preparation. */
- int n_bits = (ntohs(action->ofs_nbits) & 0x3f) + 1;
- uint32_t mask = n_bits == 32 ? UINT32_MAX : (UINT32_C(1) << n_bits) - 1;
- uint32_t *reg = &flow->regs[NXM_NX_REG_IDX(ntohl(action->dst))];
+ union mf_subvalue dst_value;
- /* Get source data. */
- uint32_t src_data = ntohll(action->value);
+ mf_read_subfield(&push->subfield, flow, &dst_value);
+ nx_stack_push(stack, &dst_value);
+}
+
+void
+nxm_execute_stack_pop(const struct ofpact_stack *pop,
+ struct flow *flow, struct ofpbuf *stack)
+{
+ union mf_subvalue *src_value;
- /* Get remaining bits of the destination field. */
- int dst_ofs = ntohs(action->ofs_nbits) >> 6;
- uint32_t dst_data = *reg & ~(mask << dst_ofs);
+ src_value = nx_stack_pop(stack);
- *reg = dst_data | (src_data << dst_ofs);
+ /* Only pop if stack is not empty. Otherwise, give warning. */
+ if (src_value) {
+ mf_write_subfield_flow(&pop->subfield, src_value, flow);
+ } else {
+ if (!VLOG_DROP_WARN(&rl)) {
+ char *flow_str = flow_to_string(flow);
+ VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"
+ " %s", flow_str);
+ free(flow_str);
+ }
+ }
}