X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fnx-match.c;h=3839f19922159d0f6f5457b0ec41ce1779934e1e;hb=003ce655b7116d18c86a74c50391e54990346931;hp=ecc284e945b7f37e3ca9a482abb7c1df5a96583f;hpb=a877206f2f0ca10c625e311614d68a81bdc913ee;p=sliver-openvswitch.git diff --git a/lib/nx-match.c b/lib/nx-match.c index ecc284e94..3839f1992 100644 --- a/lib/nx-match.c +++ b/lib/nx-match.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2011 Nicira Networks. + * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,11 +22,15 @@ #include "classifier.h" #include "dynamic-string.h" +#include "meta-flow.h" +#include "ofp-actions.h" +#include "ofp-errors.h" #include "ofp-util.h" #include "ofpbuf.h" #include "openflow/nicira-ext.h" #include "packets.h" #include "unaligned.h" +#include "util.h" #include "vlog.h" VLOG_DEFINE_THIS_MODULE(nx_match); @@ -35,88 +39,6 @@ VLOG_DEFINE_THIS_MODULE(nx_match); * peer and so there's not much point in showing a lot of them. */ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); -enum { - NXM_INVALID = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_INVALID), - NXM_BAD_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_TYPE), - NXM_BAD_VALUE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_VALUE), - NXM_BAD_MASK = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_MASK), - NXM_BAD_PREREQ = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_BAD_PREREQ), - NXM_DUP_TYPE = OFP_MKERR_NICIRA(OFPET_BAD_REQUEST, NXBRC_NXM_DUP_TYPE), - BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT) -}; - -/* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from - * zero. */ -enum nxm_field_index { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ - NFI_NXM_##HEADER, -#include "nx-match.def" - N_NXM_FIELDS -}; - -struct nxm_field { - struct hmap_node hmap_node; - enum nxm_field_index index; /* NFI_* value. */ - uint32_t header; /* NXM_* value. */ - flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */ - ovs_be16 dl_type[N_NXM_DL_TYPES]; /* dl_type prerequisites. */ - uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */ - const char *name; /* "NXM_*" string. */ - bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */ -}; - - -/* All the known fields. */ -static struct nxm_field nxm_fields[N_NXM_FIELDS] = { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ - { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \ - DL_CONVERT DL_TYPES, NW_PROTO, "NXM_" #HEADER, WRITABLE }, -#define DL_CONVERT(T1, T2) { CONSTANT_HTONS(T1), CONSTANT_HTONS(T2) } -#include "nx-match.def" -}; - -/* Hash table of 'nxm_fields'. */ -static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields); - -static void -nxm_init(void) -{ - if (hmap_is_empty(&all_nxm_fields)) { - int i; - - for (i = 0; i < N_NXM_FIELDS; i++) { - struct nxm_field *f = &nxm_fields[i]; - hmap_insert(&all_nxm_fields, &f->hmap_node, - hash_int(f->header, 0)); - } - - /* Verify that the header values are unique (duplicate "case" values - * cause a compile error). */ - switch (0) { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \ - case NXM_##HEADER: break; -#include "nx-match.def" - } - } -} - -static const struct nxm_field * -nxm_field_lookup(uint32_t header) -{ - struct nxm_field *f; - - nxm_init(); - - HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0), - &all_nxm_fields) { - if (f->header == header) { - return f; - } - } - - return NULL; -} - /* Returns the width of the data for a field with the given 'header', in * bytes. */ int @@ -136,316 +58,6 @@ nxm_field_bits(uint32_t header) /* nx_pull_match() and helpers. */ -static int -parse_nx_reg(const struct nxm_field *f, - struct flow *flow, struct flow_wildcards *wc, - const void *value, const void *maskp) -{ - int idx = NXM_NX_REG_IDX(f->header); - if (wc->reg_masks[idx]) { - return NXM_DUP_TYPE; - } else { - flow_wildcards_set_reg_mask(wc, idx, - (NXM_HASMASK(f->header) - ? ntohl(get_unaligned_be32(maskp)) - : UINT32_MAX)); - flow->regs[idx] = ntohl(get_unaligned_be32(value)); - flow->regs[idx] &= wc->reg_masks[idx]; - return 0; - } -} - -static int -parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, - const void *value, const void *mask) -{ - struct flow_wildcards *wc = &rule->wc; - struct flow *flow = &rule->flow; - - BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); - - switch (f->index) { - /* Metadata. */ - case NFI_NXM_OF_IN_PORT: - flow->in_port = ntohs(get_unaligned_be16(value)); - return 0; - - /* Ethernet header. */ - case NFI_NXM_OF_ETH_DST: - if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) - != (FWW_DL_DST | FWW_ETH_MCAST)) { - return NXM_DUP_TYPE; - } else { - wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - return 0; - } - case NFI_NXM_OF_ETH_DST_W: - if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) - != (FWW_DL_DST | FWW_ETH_MCAST)) { - return NXM_DUP_TYPE; - } else if (flow_wildcards_is_dl_dst_mask_valid(mask)) { - cls_rule_set_dl_dst_masked(rule, value, mask); - return 0; - } else { - return NXM_BAD_MASK; - } - case NFI_NXM_OF_ETH_SRC: - memcpy(flow->dl_src, value, ETH_ADDR_LEN); - return 0; - case NFI_NXM_OF_ETH_TYPE: - flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value)); - return 0; - - /* 802.1Q header. */ - case NFI_NXM_OF_VLAN_TCI: - if (wc->vlan_tci_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_dl_tci(rule, get_unaligned_be16(value)); - return 0; - } - case NFI_NXM_OF_VLAN_TCI_W: - if (wc->vlan_tci_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value), - get_unaligned_be16(mask)); - return 0; - } - - /* IP header. */ - case NFI_NXM_OF_IP_TOS: - if (*(uint8_t *) value & 0x03) { - return NXM_BAD_VALUE; - } else { - flow->nw_tos = *(uint8_t *) value; - return 0; - } - case NFI_NXM_OF_IP_PROTO: - flow->nw_proto = *(uint8_t *) value; - return 0; - - /* IP addresses in IP and ARP headers. */ - case NFI_NXM_OF_IP_SRC: - case NFI_NXM_OF_ARP_SPA: - if (wc->nw_src_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_nw_src(rule, get_unaligned_be32(value)); - return 0; - } - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_ARP_SPA_W: - if (wc->nw_src_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be32 ip = get_unaligned_be32(value); - ovs_be32 netmask = get_unaligned_be32(mask); - if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - case NFI_NXM_OF_IP_DST: - case NFI_NXM_OF_ARP_TPA: - if (wc->nw_dst_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_nw_dst(rule, get_unaligned_be32(value)); - return 0; - } - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_TPA_W: - if (wc->nw_dst_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be32 ip = get_unaligned_be32(value); - ovs_be32 netmask = get_unaligned_be32(mask); - if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - - /* IPv6 addresses. */ - case NFI_NXM_NX_IPV6_SRC: - if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6; - memcpy(&ipv6, value, sizeof ipv6); - cls_rule_set_ipv6_src(rule, &ipv6); - return 0; - } - case NFI_NXM_NX_IPV6_SRC_W: - if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6, netmask; - memcpy(&ipv6, value, sizeof ipv6); - memcpy(&netmask, mask, sizeof netmask); - if (!cls_rule_set_ipv6_src_masked(rule, &ipv6, &netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - case NFI_NXM_NX_IPV6_DST: - if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6; - memcpy(&ipv6, value, sizeof ipv6); - cls_rule_set_ipv6_dst(rule, &ipv6); - return 0; - } - case NFI_NXM_NX_IPV6_DST_W: - if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6, netmask; - memcpy(&ipv6, value, sizeof ipv6); - memcpy(&netmask, mask, sizeof netmask); - if (!cls_rule_set_ipv6_dst_masked(rule, &ipv6, &netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - - /* TCP header. */ - case NFI_NXM_OF_TCP_SRC: - flow->tp_src = get_unaligned_be16(value); - return 0; - case NFI_NXM_OF_TCP_DST: - flow->tp_dst = get_unaligned_be16(value); - return 0; - - /* UDP header. */ - case NFI_NXM_OF_UDP_SRC: - flow->tp_src = get_unaligned_be16(value); - return 0; - case NFI_NXM_OF_UDP_DST: - flow->tp_dst = get_unaligned_be16(value); - return 0; - - /* ICMP header. */ - case NFI_NXM_OF_ICMP_TYPE: - flow->tp_src = htons(*(uint8_t *) value); - return 0; - case NFI_NXM_OF_ICMP_CODE: - flow->tp_dst = htons(*(uint8_t *) value); - return 0; - - /* ICMPv6 header. */ - case NFI_NXM_NX_ICMPV6_TYPE: - flow->tp_src = htons(*(uint8_t *) value); - return 0; - case NFI_NXM_NX_ICMPV6_CODE: - flow->tp_dst = htons(*(uint8_t *) value); - return 0; - - /* IPv6 Neighbor Discovery. */ - case NFI_NXM_NX_ND_TARGET: - /* We've already verified that it's an ICMPv6 message. */ - if ((flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) - && (flow->tp_src != htons(ND_NEIGHBOR_ADVERT))) { - return NXM_BAD_PREREQ; - } - memcpy(&flow->nd_target, value, sizeof flow->nd_target); - return 0; - case NFI_NXM_NX_ND_SLL: - /* We've already verified that it's an ICMPv6 message. */ - if (flow->tp_src != htons(ND_NEIGHBOR_SOLICIT)) { - return NXM_BAD_PREREQ; - } - memcpy(flow->arp_sha, value, ETH_ADDR_LEN); - return 0; - case NFI_NXM_NX_ND_TLL: - /* We've already verified that it's an ICMPv6 message. */ - if (flow->tp_src != htons(ND_NEIGHBOR_ADVERT)) { - return NXM_BAD_PREREQ; - } - memcpy(flow->arp_tha, value, ETH_ADDR_LEN); - return 0; - - /* ARP header. */ - case NFI_NXM_OF_ARP_OP: - if (ntohs(get_unaligned_be16(value)) > 255) { - return NXM_BAD_VALUE; - } else { - flow->nw_proto = ntohs(get_unaligned_be16(value)); - return 0; - } - - case NFI_NXM_NX_ARP_SHA: - memcpy(flow->arp_sha, value, ETH_ADDR_LEN); - return 0; - case NFI_NXM_NX_ARP_THA: - memcpy(flow->arp_tha, value, ETH_ADDR_LEN); - return 0; - - /* Tunnel ID. */ - case NFI_NXM_NX_TUN_ID: - if (wc->tun_id_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_tun_id(rule, get_unaligned_be64(value)); - return 0; - } - case NFI_NXM_NX_TUN_ID_W: - if (wc->tun_id_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be64 tun_id = get_unaligned_be64(value); - ovs_be64 tun_mask = get_unaligned_be64(mask); - cls_rule_set_tun_id_masked(rule, tun_id, tun_mask); - return 0; - } - - /* Registers. */ - case NFI_NXM_NX_REG0: - case NFI_NXM_NX_REG0_W: -#if FLOW_N_REGS >= 2 - case NFI_NXM_NX_REG1: - case NFI_NXM_NX_REG1_W: -#endif -#if FLOW_N_REGS >= 3 - case NFI_NXM_NX_REG2: - case NFI_NXM_NX_REG2_W: -#endif -#if FLOW_N_REGS >= 4 - case NFI_NXM_NX_REG3: - case NFI_NXM_NX_REG3_W: -#endif -#if FLOW_N_REGS > 4 -#error -#endif - return parse_nx_reg(f, flow, wc, value, mask); - - case N_NXM_FIELDS: - NOT_REACHED(); - } - NOT_REACHED(); -} - -static bool -nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow) -{ - if (field->nw_proto && field->nw_proto != flow->nw_proto) { - return false; - } - - if (!field->dl_type[0]) { - return true; - } else if (field->dl_type[0] == flow->dl_type) { - return true; - } else if (field->dl_type[1] && field->dl_type[1] == flow->dl_type) { - return true; - } - - return false; -} - static uint32_t nx_entry_ok(const void *p, unsigned int match_len) { @@ -455,7 +67,8 @@ nx_entry_ok(const void *p, unsigned int match_len) if (match_len < 4) { if (match_len) { - VLOG_DBG_RL(&rl, "nx_match ends with partial nxm_header"); + VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header", + match_len); } return 0; } @@ -477,57 +90,214 @@ nx_entry_ok(const void *p, unsigned int match_len) return header; } -int -nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority, - struct cls_rule *rule) +/* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long, + * checks for any 1-bit in the value where there is a 0-bit in the mask. If it + * finds one, logs a warning. */ +static void +check_mask_consistency(const uint8_t *p, const struct mf_field *mf) +{ + unsigned int width = mf->n_bytes; + const uint8_t *value = p + 4; + const uint8_t *mask = p + 4 + width; + unsigned int i; + + for (i = 0; i < width; i++) { + if (value[i] & ~mask[i]) { + if (!VLOG_DROP_WARN(&rl)) { + char *s = nx_match_to_string(p, width * 2 + 4); + VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for " + "bits wildcarded by the mask. (Future versions " + "of OVS may report this as an OpenFlow error.)", + s); + break; + } + } + } +} + +static enum ofperr +nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict, + struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask) { uint32_t header; - uint8_t *p; - p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); - if (!p) { - VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a " - "multiple of 8, is longer than space in message (max " - "length %zu)", match_len, b->size); - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); + ovs_assert((cookie != NULL) == (cookie_mask != NULL)); + + match_init_catchall(match); + if (cookie) { + *cookie = *cookie_mask = htonll(0); + } + if (!match_len) { + return 0; } - cls_rule_init_catchall(rule, priority); - while ((header = nx_entry_ok(p, match_len)) != 0) { - unsigned length = NXM_LENGTH(header); - const struct nxm_field *f; - int error; - - f = nxm_field_lookup(header); - if (!f) { - error = NXM_BAD_TYPE; - } else if (!nxm_prereqs_ok(f, &rule->flow)) { - error = NXM_BAD_PREREQ; - } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) { - error = NXM_DUP_TYPE; + for (; + (header = nx_entry_ok(p, match_len)) != 0; + p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) { + const struct mf_field *mf; + enum ofperr error; + + mf = mf_from_nxm_header(header); + if (!mf) { + if (strict) { + error = OFPERR_OFPBMC_BAD_FIELD; + } else { + continue; + } + } else if (!mf_are_prereqs_ok(mf, &match->flow)) { + error = OFPERR_OFPBMC_BAD_PREREQ; + } else if (!mf_is_all_wild(mf, &match->wc)) { + error = OFPERR_OFPBMC_DUP_FIELD; } else { - /* 'hasmask' and 'length' are known to be correct at this point - * because they are included in 'header' and nxm_field_lookup() - * checked them already. */ - rule->wc.wildcards &= ~f->wildcard; - error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2); + unsigned int width = mf->n_bytes; + union mf_value value; + + memcpy(&value, p + 4, width); + if (!mf_is_value_valid(mf, &value)) { + error = OFPERR_OFPBMC_BAD_VALUE; + } else if (!NXM_HASMASK(header)) { + error = 0; + mf_set_value(mf, &value, match); + } else { + union mf_value mask; + + memcpy(&mask, p + 4 + width, width); + if (!mf_is_mask_valid(mf, &mask)) { + error = OFPERR_OFPBMC_BAD_MASK; + } else { + error = 0; + check_mask_consistency(p, mf); + mf_set(mf, &value, &mask, match); + } + } + } + + /* Check if the match is for a cookie rather than a classifier rule. */ + if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) { + if (*cookie_mask) { + error = OFPERR_OFPBMC_DUP_FIELD; + } else { + unsigned int width = sizeof *cookie; + + memcpy(cookie, p + 4, width); + if (NXM_HASMASK(header)) { + memcpy(cookie_mask, p + 4 + width, width); + } else { + *cookie_mask = OVS_BE64_MAX; + } + error = 0; + } } + if (error) { - VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", " - "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" " - "(error %x)", + VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", " + "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), " + "(%s)", header, NXM_VENDOR(header), NXM_FIELD(header), - NXM_HASMASK(header), NXM_TYPE(header), - error); + NXM_HASMASK(header), NXM_LENGTH(header), + ofperr_to_string(error)); return error; } + } + return match_len ? OFPERR_OFPBMC_BAD_LEN : 0; +} - p += 4 + length; - match_len -= 4 + length; +static enum ofperr +nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict, + struct match *match, + ovs_be64 *cookie, ovs_be64 *cookie_mask) +{ + uint8_t *p = NULL; + + if (match_len) { + p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); + if (!p) { + VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a " + "multiple of 8, is longer than space in message (max " + "length %"PRIu32")", match_len, ofpbuf_size(b)); + return OFPERR_OFPBMC_BAD_LEN; + } } - return match_len ? NXM_INVALID : 0; + return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask); +} + +/* Parses the nx_match formatted match description in 'b' with length + * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask' + * are valid pointers, then stores the cookie and mask in them if 'b' contains + * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both. + * + * Fails with an error upon encountering an unknown NXM header. + * + * Returns 0 if successful, otherwise an OpenFlow error code. */ +enum ofperr +nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match, + ovs_be64 *cookie, ovs_be64 *cookie_mask) +{ + return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask); +} + +/* Behaves the same as nx_pull_match(), but skips over unknown NXM headers, + * instead of failing with an error. */ +enum ofperr +nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len, + struct match *match, + ovs_be64 *cookie, ovs_be64 *cookie_mask) +{ + return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask); +} + +static enum ofperr +oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match) +{ + struct ofp11_match_header *omh = ofpbuf_data(b); + uint8_t *p; + uint16_t match_len; + + if (ofpbuf_size(b) < sizeof *omh) { + return OFPERR_OFPBMC_BAD_LEN; + } + + match_len = ntohs(omh->length); + if (match_len < sizeof *omh) { + return OFPERR_OFPBMC_BAD_LEN; + } + + if (omh->type != htons(OFPMT_OXM)) { + return OFPERR_OFPBMC_BAD_TYPE; + } + + p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); + if (!p) { + VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a " + "multiple of 8, is longer than space in message (max " + "length %"PRIu32")", match_len, ofpbuf_size(b)); + return OFPERR_OFPBMC_BAD_LEN; + } + + return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh, + strict, match, NULL, NULL); +} + +/* Parses the oxm formatted match description preceded by a struct + * ofp11_match_header in 'b'. Stores the result in 'match'. + * + * Fails with an error when encountering unknown OXM headers. + * + * Returns 0 if successful, otherwise an OpenFlow error code. */ +enum ofperr +oxm_pull_match(struct ofpbuf *b, struct match *match) +{ + return oxm_pull_match__(b, true, match); +} + +/* Behaves the same as oxm_pull_match() with one exception. Skips over unknown + * OXM headers instead of failing with an error when they are encountered. */ +enum ofperr +oxm_pull_match_loose(struct ofpbuf *b, struct match *match) +{ + return oxm_pull_match__(b, false, match); } /* nx_put_match() and helpers. @@ -551,6 +321,24 @@ nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value) ofpbuf_put(b, &value, sizeof value); } +static void +nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask) +{ + switch (mask) { + case 0: + break; + + case UINT8_MAX: + nxm_put_8(b, header, value); + break; + + default: + nxm_put_header(b, NXM_MAKE_WILD_HEADER(header)); + ofpbuf_put(b, &value, sizeof value); + ofpbuf_put(b, &mask, sizeof mask); + } +} + static void nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value) { @@ -573,7 +361,7 @@ nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask) case 0: break; - case CONSTANT_HTONS(UINT16_MAX): + case OVS_BE16_MAX: nxm_put_16(b, header, value); break; @@ -605,7 +393,7 @@ nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask) case 0: break; - case CONSTANT_HTONL(UINT32_MAX): + case OVS_BE32_MAX: nxm_put_32(b, header, value); break; @@ -637,7 +425,7 @@ nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask) case 0: break; - case CONSTANT_HTONLL(UINT64_MAX): + case OVS_BE64_MAX: nxm_put_64(b, header, value); break; @@ -656,20 +444,18 @@ nxm_put_eth(struct ofpbuf *b, uint32_t header, } static void -nxm_put_eth_dst(struct ofpbuf *b, - flow_wildcards_t wc, const uint8_t value[ETH_ADDR_LEN]) +nxm_put_eth_masked(struct ofpbuf *b, uint32_t header, + const uint8_t value[ETH_ADDR_LEN], + const uint8_t mask[ETH_ADDR_LEN]) { - switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) { - case FWW_DL_DST | FWW_ETH_MCAST: - break; - default: - nxm_put_header(b, NXM_OF_ETH_DST_W); - ofpbuf_put(b, value, ETH_ADDR_LEN); - ofpbuf_put(b, flow_wildcards_to_dl_dst_mask(wc), ETH_ADDR_LEN); - break; - case 0: - nxm_put_eth(b, NXM_OF_ETH_DST, value); - break; + if (!eth_addr_is_zero(mask)) { + if (eth_mask_is_exact(mask)) { + nxm_put_eth(b, header, value); + } else { + nxm_put_header(b, NXM_MAKE_WILD_HEADER(header)); + ofpbuf_put(b, value, ETH_ADDR_LEN); + ofpbuf_put(b, mask, ETH_ADDR_LEN); + } } } @@ -689,176 +475,303 @@ nxm_put_ipv6(struct ofpbuf *b, uint32_t header, } } -/* Appends to 'b' the nx_match format that expresses 'cr' (except for - * 'cr->priority', because priority is not part of nx_match), plus enough - * zero bytes to pad the nx_match out to a multiple of 8. +static void +nxm_put_frag(struct ofpbuf *b, const struct match *match) +{ + uint8_t nw_frag = match->flow.nw_frag; + uint8_t nw_frag_mask = match->wc.masks.nw_frag; + + switch (nw_frag_mask) { + case 0: + break; + + case FLOW_NW_FRAG_MASK: + nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag); + break; + + default: + nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag, + nw_frag_mask & FLOW_NW_FRAG_MASK); + break; + } +} + +static void +nxm_put_ip(struct ofpbuf *b, const struct match *match, + uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code, + bool oxm) +{ + const struct flow *flow = &match->flow; + + nxm_put_frag(b, match); + + if (match->wc.masks.nw_tos & IP_DSCP_MASK) { + if (oxm) { + nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2); + } else { + nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK); + } + } + + if (match->wc.masks.nw_tos & IP_ECN_MASK) { + nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN, + flow->nw_tos & IP_ECN_MASK); + } + + if (!oxm && match->wc.masks.nw_ttl) { + nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl); + } + + if (match->wc.masks.nw_proto) { + nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto); + + if (flow->nw_proto == IPPROTO_TCP) { + nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC, + flow->tp_src, match->wc.masks.tp_src); + nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST, + flow->tp_dst, match->wc.masks.tp_dst); + nxm_put_16m(b, NXM_NX_TCP_FLAGS, + flow->tcp_flags, match->wc.masks.tcp_flags); + } else if (flow->nw_proto == IPPROTO_UDP) { + nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC, + flow->tp_src, match->wc.masks.tp_src); + nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST, + flow->tp_dst, match->wc.masks.tp_dst); + } else if (flow->nw_proto == IPPROTO_SCTP) { + nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src, + match->wc.masks.tp_src); + nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst, + match->wc.masks.tp_dst); + } else if (flow->nw_proto == icmp_proto) { + if (match->wc.masks.tp_src) { + nxm_put_8(b, icmp_type, ntohs(flow->tp_src)); + } + if (match->wc.masks.tp_dst) { + nxm_put_8(b, icmp_code, ntohs(flow->tp_dst)); + } + } + } +} + +/* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and + * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied. + * Otherwise, 'cookie_mask' should be zero. * * This function can cause 'b''s data to be reallocated. * * Returns the number of bytes appended to 'b', excluding padding. * - * If 'cr' is a catch-all rule that matches every packet, then this function + * If 'match' is a catch-all rule that matches every packet, then this function * appends nothing to 'b' and returns 0. */ -int -nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) +static int +nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match, + ovs_be64 cookie, ovs_be64 cookie_mask) { - const flow_wildcards_t wc = cr->wc.wildcards; - const struct flow *flow = &cr->flow; - const size_t start_len = b->size; + const struct flow *flow = &match->flow; + const size_t start_len = ofpbuf_size(b); int match_len; int i; - BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26); /* Metadata. */ - if (!(wc & FWW_IN_PORT)) { - uint16_t in_port = flow->in_port; - nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port)); + if (match->wc.masks.dp_hash) { + if (!oxm) { + nxm_put_32m(b, NXM_NX_DP_HASH, htonl(flow->dp_hash), + htonl(match->wc.masks.dp_hash)); + } } - /* Ethernet. */ - nxm_put_eth_dst(b, wc, flow->dl_dst); - if (!(wc & FWW_DL_SRC)) { - nxm_put_eth(b, NXM_OF_ETH_SRC, flow->dl_src); + if (match->wc.masks.recirc_id) { + if (!oxm) { + nxm_put_32(b, NXM_NX_RECIRC_ID, htonl(flow->recirc_id)); + } } - if (!(wc & FWW_DL_TYPE)) { - nxm_put_16(b, NXM_OF_ETH_TYPE, - ofputil_dl_type_to_openflow(flow->dl_type)); + + if (match->wc.masks.in_port.ofp_port) { + ofp_port_t in_port = flow->in_port.ofp_port; + if (oxm) { + nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port)); + } else { + nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port))); + } } + /* Ethernet. */ + nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC, + flow->dl_src, match->wc.masks.dl_src); + nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST, + flow->dl_dst, match->wc.masks.dl_dst); + nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE, + ofputil_dl_type_to_openflow(flow->dl_type), + match->wc.masks.dl_type); + /* 802.1Q. */ - nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, cr->wc.vlan_tci_mask); + if (oxm) { + ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI); + ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK; + ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK; + + if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) { + nxm_put_16(b, OXM_OF_VLAN_VID, vid); + } else if (mask) { + nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask); + } - /* L3. */ - if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { - /* IP. */ - if (!(wc & FWW_NW_TOS)) { - nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); + if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) { + nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci)); } - nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask); - nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask); - - if (!(wc & FWW_NW_PROTO)) { - nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); - switch (flow->nw_proto) { - /* TCP. */ - case IPPROTO_TCP: - if (!(wc & FWW_TP_SRC)) { - nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src); - } - if (!(wc & FWW_TP_DST)) { - nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst); - } - break; - /* UDP. */ - case IPPROTO_UDP: - if (!(wc & FWW_TP_SRC)) { - nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src); - } - if (!(wc & FWW_TP_DST)) { - nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst); - } - break; + } else { + nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci, + match->wc.masks.vlan_tci); + } - /* ICMP. */ - case IPPROTO_ICMP: - if (!(wc & FWW_TP_SRC)) { - nxm_put_8(b, NXM_OF_ICMP_TYPE, ntohs(flow->tp_src)); - } - if (!(wc & FWW_TP_DST)) { - nxm_put_8(b, NXM_OF_ICMP_CODE, ntohs(flow->tp_dst)); - } - break; - } + /* MPLS. */ + if (eth_type_mpls(flow->dl_type)) { + if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) { + nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse[0])); } - } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) { - /* IPv6. */ - if (!(wc & FWW_NW_TOS)) { - nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); + if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) { + nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse[0])); } - nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src, - &cr->wc.ipv6_src_mask); - nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst, - &cr->wc.ipv6_dst_mask); - - if (!(wc & FWW_NW_PROTO)) { - nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); - switch (flow->nw_proto) { - /* TCP. */ - case IPPROTO_TCP: - if (!(wc & FWW_TP_SRC)) { - nxm_put_16(b, NXM_OF_TCP_SRC, flow->tp_src); - } - if (!(wc & FWW_TP_DST)) { - nxm_put_16(b, NXM_OF_TCP_DST, flow->tp_dst); - } - break; - /* UDP. */ - case IPPROTO_UDP: - if (!(wc & FWW_TP_SRC)) { - nxm_put_16(b, NXM_OF_UDP_SRC, flow->tp_src); - } - if (!(wc & FWW_TP_DST)) { - nxm_put_16(b, NXM_OF_UDP_DST, flow->tp_dst); - } - break; + if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) { + nxm_put_32(b, OXM_OF_MPLS_LABEL, + htonl(mpls_lse_to_label(flow->mpls_lse[0]))); + } + } - /* ICMPv6. */ - case IPPROTO_ICMPV6: - if (!(wc & FWW_TP_SRC)) { - nxm_put_8(b, NXM_NX_ICMPV6_TYPE, ntohs(flow->tp_src)); - - if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) || - flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { - if (!(wc & FWW_ND_TARGET)) { - nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target, - &in6addr_exact); - } - if (!(wc & FWW_ARP_SHA) - && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) { - nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha); - } - if (!(wc & FWW_ARP_THA) - && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { - nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha); - } - } - } - if (!(wc & FWW_TP_DST)) { - nxm_put_8(b, NXM_NX_ICMPV6_CODE, ntohs(flow->tp_dst)); - } - break; + /* L3. */ + if (flow->dl_type == htons(ETH_TYPE_IP)) { + /* IP. */ + nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC, + flow->nw_src, match->wc.masks.nw_src); + nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST, + flow->nw_dst, match->wc.masks.nw_dst); + nxm_put_ip(b, match, IPPROTO_ICMP, + oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE, + oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm); + } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) { + /* IPv6. */ + nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC, + &flow->ipv6_src, &match->wc.masks.ipv6_src); + nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST, + &flow->ipv6_dst, &match->wc.masks.ipv6_dst); + nxm_put_ip(b, match, IPPROTO_ICMPV6, + oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE, + oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm); + + nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL, + flow->ipv6_label, match->wc.masks.ipv6_label); + + if (flow->nw_proto == IPPROTO_ICMPV6 + && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) || + flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) { + nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET, + &flow->nd_target, &match->wc.masks.nd_target); + if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) { + nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL, + flow->arp_sha, match->wc.masks.arp_sha); + } + if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { + nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL, + flow->arp_tha, match->wc.masks.arp_tha); } } - } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_ARP)) { + } else if (flow->dl_type == htons(ETH_TYPE_ARP) || + flow->dl_type == htons(ETH_TYPE_RARP)) { /* ARP. */ - if (!(wc & FWW_NW_PROTO)) { - nxm_put_16(b, NXM_OF_ARP_OP, htons(flow->nw_proto)); - } - nxm_put_32m(b, NXM_OF_ARP_SPA, flow->nw_src, cr->wc.nw_src_mask); - nxm_put_32m(b, NXM_OF_ARP_TPA, flow->nw_dst, cr->wc.nw_dst_mask); - if (!(wc & FWW_ARP_SHA)) { - nxm_put_eth(b, NXM_NX_ARP_SHA, flow->arp_sha); - } - if (!(wc & FWW_ARP_THA)) { - nxm_put_eth(b, NXM_NX_ARP_THA, flow->arp_tha); + if (match->wc.masks.nw_proto) { + nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP, + htons(flow->nw_proto)); } + nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA, + flow->nw_src, match->wc.masks.nw_src); + nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA, + flow->nw_dst, match->wc.masks.nw_dst); + nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA, + flow->arp_sha, match->wc.masks.arp_sha); + nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA, + flow->arp_tha, match->wc.masks.arp_tha); } /* Tunnel ID. */ - nxm_put_64m(b, NXM_NX_TUN_ID, flow->tun_id, cr->wc.tun_id_mask); + nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID, + flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id); + + /* Other tunnel metadata. */ + nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC, + flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src); + nxm_put_32m(b, NXM_NX_TUN_IPV4_DST, + flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst); /* Registers. */ for (i = 0; i < FLOW_N_REGS; i++) { nxm_put_32m(b, NXM_NX_REG(i), - htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i])); + htonl(flow->regs[i]), htonl(match->wc.masks.regs[i])); } - match_len = b->size - start_len; - ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); + /* Mark. */ + nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark), + htonl(match->wc.masks.pkt_mark)); + + /* OpenFlow 1.1+ Metadata. */ + nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata); + + /* Cookie. */ + nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask); + + match_len = ofpbuf_size(b) - start_len; + return match_len; +} + +/* Appends to 'b' the nx_match format that expresses 'match', plus enough zero + * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow + * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied. + * Otherwise, 'cookie_mask' should be zero. + * + * This function can cause 'b''s data to be reallocated. + * + * Returns the number of bytes appended to 'b', excluding padding. The return + * value can be zero if it appended nothing at all to 'b' (which happens if + * 'cr' is a catch-all rule that matches every packet). */ +int +nx_put_match(struct ofpbuf *b, const struct match *match, + ovs_be64 cookie, ovs_be64 cookie_mask) +{ + int match_len = nx_put_raw(b, false, match, cookie, cookie_mask); + + ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); + return match_len; +} + + +/* Appends to 'b' an struct ofp11_match_header followed by the oxm format that + * expresses 'cr', plus enough zero bytes to pad the data appended out to a + * multiple of 8. + * + * This function can cause 'b''s data to be reallocated. + * + * Returns the number of bytes appended to 'b', excluding the padding. Never + * returns zero. */ +int +oxm_put_match(struct ofpbuf *b, const struct match *match) +{ + int match_len; + struct ofp11_match_header *omh; + size_t start_len = ofpbuf_size(b); + ovs_be64 cookie = htonll(0), cookie_mask = htonll(0); + + ofpbuf_put_uninit(b, sizeof *omh); + match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh; + ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); + + omh = ofpbuf_at(b, start_len, sizeof *omh); + omh->type = htons(OFPMT_OXM); + omh->length = htons(match_len); + return match_len; } @@ -917,12 +830,57 @@ nx_match_to_string(const uint8_t *p, unsigned int match_len) return ds_steal_cstr(&s); } +char * +oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len) +{ + const struct ofp11_match_header *omh = ofpbuf_data(p); + uint16_t match_len_; + struct ds s; + + ds_init(&s); + + if (match_len < sizeof *omh) { + ds_put_format(&s, "", match_len); + goto err; + } + + if (omh->type != htons(OFPMT_OXM)) { + ds_put_format(&s, "", ntohs(omh->type)); + goto err; + } + + match_len_ = ntohs(omh->length); + if (match_len_ < sizeof *omh) { + ds_put_format(&s, "", match_len_); + goto err; + } + + if (match_len_ != match_len) { + ds_put_format(&s, "", + match_len_, match_len); + goto err; + } + + return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0), + match_len - sizeof *omh); + +err: + return ds_steal_cstr(&s); +} + static void format_nxm_field_name(struct ds *s, uint32_t header) { - const struct nxm_field *f = nxm_field_lookup(header); - if (f) { - ds_put_cstr(s, f->name); + const struct mf_field *mf = mf_from_nxm_header(header); + if (mf) { + ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name); + if (NXM_HASMASK(header)) { + ds_put_cstr(s, "_W"); + } + } else if (header == NXM_NX_COOKIE) { + ds_put_cstr(s, "NXM_NX_COOKIE"); + } else if (header == NXM_NX_COOKIE_W) { + ds_put_cstr(s, "NXM_NX_COOKIE_W"); } else { ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header)); } @@ -931,12 +889,44 @@ format_nxm_field_name(struct ds *s, uint32_t header) static uint32_t parse_nxm_field_name(const char *name, int name_len) { - const struct nxm_field *f; + bool wild; + int i; /* Check whether it's a field name. */ - for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) { - if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') { - return f->header; + wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2); + if (wild) { + name_len -= 2; + } + + for (i = 0; i < MFF_N_IDS; i++) { + const struct mf_field *mf = mf_from_id(i); + uint32_t header; + + if (mf->nxm_name && + !strncmp(mf->nxm_name, name, name_len) && + mf->nxm_name[name_len] == '\0') { + header = mf->nxm_header; + } else if (mf->oxm_name && + !strncmp(mf->oxm_name, name, name_len) && + mf->oxm_name[name_len] == '\0') { + header = mf->oxm_header; + } else { + continue; + } + + if (!wild) { + return header; + } else if (mf->maskable != MFM_NONE) { + return NXM_MAKE_WILD_HEADER(header); + } + } + + if (!strncmp("NXM_NX_COOKIE", name, name_len) && + (name_len == strlen("NXM_NX_COOKIE"))) { + if (!wild) { + return NXM_NX_COOKIE; + } else { + return NXM_NX_COOKIE_W; } } @@ -954,15 +944,14 @@ parse_nxm_field_name(const char *name, int name_len) /* nx_match_from_string(). */ -int -nx_match_from_string(const char *s, struct ofpbuf *b) +static int +nx_match_from_string_raw(const char *s, struct ofpbuf *b) { const char *full_s = s; - const size_t start_len = b->size; - int match_len; + const size_t start_len = ofpbuf_size(b); if (!strcmp(s, "")) { - /* Ensure that 'b->data' isn't actually null. */ + /* Ensure that 'ofpbuf_data(b)' isn't actually null. */ ofpbuf_prealloc_tailroom(b, 1); return 0; } @@ -1011,496 +1000,437 @@ nx_match_from_string(const char *s, struct ofpbuf *b) s++; } - match_len = b->size - start_len; - ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); + return ofpbuf_size(b) - start_len; +} + +int +nx_match_from_string(const char *s, struct ofpbuf *b) +{ + int match_len = nx_match_from_string_raw(s, b); + ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); return match_len; } - -const char * -nxm_parse_field_bits(const char *s, uint32_t *headerp, int *ofsp, int *n_bitsp) + +int +oxm_match_from_string(const char *s, struct ofpbuf *b) { - const char *full_s = s; - const char *name; - uint32_t header; - int start, end; - int name_len; - int width; - - name = s; - name_len = strcspn(s, "["); - if (s[name_len] != '[') { - ovs_fatal(0, "%s: missing [ looking for field name", full_s); - } + int match_len; + struct ofp11_match_header *omh; + size_t start_len = ofpbuf_size(b); - header = parse_nxm_field_name(name, name_len); - if (!header) { - ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s); - } - width = nxm_field_bits(header); - - s += name_len; - if (sscanf(s, "[%d..%d]", &start, &end) == 2) { - /* Nothing to do. */ - } else if (sscanf(s, "[%d]", &start) == 1) { - end = start; - } else if (!strncmp(s, "[]", 2)) { - start = 0; - end = width - 1; - } else { - ovs_fatal(0, "%s: syntax error expecting [] or [] or " - "[..]", full_s); - } - s = strchr(s, ']') + 1; - - if (start > end) { - ovs_fatal(0, "%s: starting bit %d is after ending bit %d", - full_s, start, end); - } else if (start >= width) { - ovs_fatal(0, "%s: starting bit %d is not valid because field is only " - "%d bits wide", full_s, start, width); - } else if (end >= width){ - ovs_fatal(0, "%s: ending bit %d is not valid because field is only " - "%d bits wide", full_s, end, width); - } + ofpbuf_put_uninit(b, sizeof *omh); + match_len = nx_match_from_string_raw(s, b) + sizeof *omh; + ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8)); - *headerp = header; - *ofsp = start; - *n_bitsp = end - start + 1; + omh = ofpbuf_at(b, start_len, sizeof *omh); + omh->type = htons(OFPMT_OXM); + omh->length = htons(match_len); - return s; + return match_len; } - -void -nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s) + +/* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into + * '*move'. + * + * Returns NULL if successful, otherwise a malloc()'d string describing the + * error. The caller is responsible for freeing the returned string. */ +char * WARN_UNUSED_RESULT +nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s) { const char *full_s = s; - uint32_t src, dst; - int src_ofs, dst_ofs; - int src_n_bits, dst_n_bits; + char *error; - s = nxm_parse_field_bits(s, &src, &src_ofs, &src_n_bits); + error = mf_parse_subfield__(&move->src, &s); + if (error) { + return error; + } if (strncmp(s, "->", 2)) { - ovs_fatal(0, "%s: missing `->' following source", full_s); + return xasprintf("%s: missing `->' following source", full_s); } s += 2; - s = nxm_parse_field_bits(s, &dst, &dst_ofs, &dst_n_bits); - if (*s != '\0') { - ovs_fatal(0, "%s: trailing garbage following destination", full_s); + error = mf_parse_subfield(&move->dst, s); + if (error) { + return error; } - if (src_n_bits != dst_n_bits) { - ovs_fatal(0, "%s: source field is %d bits wide but destination is " - "%d bits wide", full_s, src_n_bits, dst_n_bits); + if (move->src.n_bits != move->dst.n_bits) { + return xasprintf("%s: source field is %d bits wide but destination is " + "%d bits wide", full_s, + move->src.n_bits, move->dst.n_bits); } - - move->type = htons(OFPAT_VENDOR); - move->len = htons(sizeof *move); - move->vendor = htonl(NX_VENDOR_ID); - move->subtype = htons(NXAST_REG_MOVE); - move->n_bits = htons(src_n_bits); - move->src_ofs = htons(src_ofs); - move->dst_ofs = htons(dst_ofs); - move->src = htonl(src); - move->dst = htonl(dst); + return NULL; } -void -nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s) +/* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into + * '*load'. + * + * Returns NULL if successful, otherwise a malloc()'d string describing the + * error. The caller is responsible for freeing the returned string. */ +char * WARN_UNUSED_RESULT +nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s) { const char *full_s = s; - uint32_t dst; - int ofs, n_bits; - uint64_t value; + uint64_t value = strtoull(s, (char **) &s, 0); + char *error; - value = strtoull(s, (char **) &s, 0); if (strncmp(s, "->", 2)) { - ovs_fatal(0, "%s: missing `->' following value", full_s); + return xasprintf("%s: missing `->' following value", full_s); } s += 2; - s = nxm_parse_field_bits(s, &dst, &ofs, &n_bits); - if (*s != '\0') { - ovs_fatal(0, "%s: trailing garbage following destination", full_s); + error = mf_parse_subfield(&load->dst, s); + if (error) { + return error; } - if (n_bits < 64 && (value >> n_bits) != 0) { - ovs_fatal(0, "%s: value %"PRIu64" does not fit into %d bits", - full_s, value, n_bits); + if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) { + return xasprintf("%s: value %"PRIu64" does not fit into %d bits", + full_s, value, load->dst.n_bits); } - load->type = htons(OFPAT_VENDOR); - load->len = htons(sizeof *load); - load->vendor = htonl(NX_VENDOR_ID); - load->subtype = htons(NXAST_REG_LOAD); - load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits); - load->dst = htonl(dst); - load->value = htonll(value); + load->subvalue.be64[0] = htonll(0); + load->subvalue.be64[1] = htonll(value); + return NULL; } /* nxm_format_reg_move(), nxm_format_reg_load(). */ void -nxm_format_field_bits(struct ds *s, uint32_t header, int ofs, int n_bits) +nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s) { - format_nxm_field_name(s, header); - if (ofs == 0 && n_bits == nxm_field_bits(header)) { - ds_put_cstr(s, "[]"); - } else if (n_bits == 1) { - ds_put_format(s, "[%d]", ofs); - } else { - ds_put_format(s, "[%d..%d]", ofs, ofs + n_bits - 1); - } + ds_put_format(s, "move:"); + mf_format_subfield(&move->src, s); + ds_put_cstr(s, "->"); + mf_format_subfield(&move->dst, s); } void -nxm_format_reg_move(const struct nx_action_reg_move *move, struct ds *s) +nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s) { - int n_bits = ntohs(move->n_bits); - int src_ofs = ntohs(move->src_ofs); - int dst_ofs = ntohs(move->dst_ofs); - uint32_t src = ntohl(move->src); - uint32_t dst = ntohl(move->dst); - - ds_put_format(s, "move:"); - nxm_format_field_bits(s, src, src_ofs, n_bits); + ds_put_cstr(s, "load:"); + mf_format_subvalue(&load->subvalue, s); ds_put_cstr(s, "->"); - nxm_format_field_bits(s, dst, dst_ofs, n_bits); + mf_format_subfield(&load->dst, s); } - -void -nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s) + +enum ofperr +nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm, + struct ofpbuf *ofpacts) { - int ofs = nxm_decode_ofs(load->ofs_nbits); - int n_bits = nxm_decode_n_bits(load->ofs_nbits); - uint32_t dst = ntohl(load->dst); - uint64_t value = ntohll(load->value); + struct ofpact_reg_move *move; - ds_put_format(s, "load:%#"PRIx64"->", value); - nxm_format_field_bits(s, dst, ofs, n_bits); + move = ofpact_put_REG_MOVE(ofpacts); + move->src.field = mf_from_nxm_header(ntohl(narm->src)); + move->src.ofs = ntohs(narm->src_ofs); + move->src.n_bits = ntohs(narm->n_bits); + move->dst.field = mf_from_nxm_header(ntohl(narm->dst)); + move->dst.ofs = ntohs(narm->dst_ofs); + move->dst.n_bits = ntohs(narm->n_bits); + + return nxm_reg_move_check(move, NULL); } - -/* nxm_check_reg_move(), nxm_check_reg_load(). */ -static bool -field_ok(const struct nxm_field *f, const struct flow *flow, int size) +enum ofperr +nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl, + struct ofpbuf *ofpacts) { - return (f && !NXM_HASMASK(f->header) - && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header)); -} + struct ofpact_reg_load *load; + + load = ofpact_put_REG_LOAD(ofpacts); + load->dst.field = mf_from_nxm_header(ntohl(narl->dst)); + load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits); + load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits); + load->subvalue.be64[1] = narl->value; + + /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in + * narl->value. */ + if (load->dst.n_bits < 64 && + ntohll(narl->value) >> load->dst.n_bits) { + return OFPERR_OFPBAC_BAD_ARGUMENT; + } -int -nxm_check_reg_move(const struct nx_action_reg_move *action, - const struct flow *flow) + return nxm_reg_load_check(load, NULL); +} + +enum ofperr +nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow) { - const struct nxm_field *src; - const struct nxm_field *dst; + enum ofperr error; - if (action->n_bits == htons(0)) { - return BAD_ARGUMENT; + error = mf_check_src(&move->src, flow); + if (error) { + return error; } - src = nxm_field_lookup(ntohl(action->src)); - if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) { - return BAD_ARGUMENT; - } + return mf_check_dst(&move->dst, NULL); +} - dst = nxm_field_lookup(ntohl(action->dst)); - if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) { - return BAD_ARGUMENT; - } +enum ofperr +nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow) +{ + return mf_check_dst(&load->dst, flow); +} + +void +nxm_reg_move_to_nxast(const struct ofpact_reg_move *move, + struct ofpbuf *openflow) +{ + struct nx_action_reg_move *narm; + + narm = ofputil_put_NXAST_REG_MOVE(openflow); + narm->n_bits = htons(move->dst.n_bits); + narm->src_ofs = htons(move->src.ofs); + narm->dst_ofs = htons(move->dst.ofs); + narm->src = htonl(move->src.field->nxm_header); + narm->dst = htonl(move->dst.field->nxm_header); +} - if (!dst->writable) { - return BAD_ARGUMENT; - } +void +nxm_reg_load_to_nxast(const struct ofpact_reg_load *load, + struct ofpbuf *openflow) +{ + struct nx_action_reg_load *narl; - return 0; + narl = ofputil_put_NXAST_REG_LOAD(openflow); + narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits); + narl->dst = htonl(load->dst.field->nxm_header); + narl->value = load->subvalue.be64[1]; } + +/* nxm_execute_reg_move(), nxm_execute_reg_load(). */ -/* Given a flow, checks that the destination field represented by 'dst_header' - * and 'ofs_nbits' is valid and large enough for 'min_n_bits' bits of data. */ -int -nxm_dst_check(ovs_be32 dst_header, ovs_be16 ofs_nbits, size_t min_n_bits, - const struct flow *flow) -{ - const struct nxm_field *dst; - int ofs, n_bits; - - ofs = nxm_decode_ofs(ofs_nbits); - n_bits = nxm_decode_n_bits(ofs_nbits); - dst = nxm_field_lookup(ntohl(dst_header)); - - if (!field_ok(dst, flow, ofs + n_bits)) { - VLOG_WARN_RL(&rl, "invalid destination field"); - } else if (!dst->writable) { - VLOG_WARN_RL(&rl, "destination field is not writable"); - } else if (n_bits < min_n_bits) { - VLOG_WARN_RL(&rl, "insufficient bits in destination"); - } else { - return 0; - } +void +nxm_execute_reg_move(const struct ofpact_reg_move *move, + struct flow *flow, struct flow_wildcards *wc) +{ + union mf_value src_value; + union mf_value dst_value; + + mf_mask_field_and_prereqs(move->dst.field, &wc->masks); + mf_mask_field_and_prereqs(move->src.field, &wc->masks); + + mf_get_value(move->dst.field, flow, &dst_value); + mf_get_value(move->src.field, flow, &src_value); + bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs, + &dst_value, move->dst.field->n_bytes, move->dst.ofs, + move->src.n_bits); + mf_set_flow_value(move->dst.field, &dst_value, flow); +} - return BAD_ARGUMENT; +void +nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow, + struct flow_wildcards *wc) +{ + /* Since at the datapath interface we do not have set actions for + * individual fields, but larger sets of fields for a given protocol + * layer, the set action will in practice only ever apply to exactly + * matched flows for the given protocol layer. For example, if the + * reg_load changes the IP TTL, the corresponding datapath action will + * rewrite also the IP addresses and TOS byte. Since these other field + * values may not be explicitly set, they depend on the incoming flow field + * values, and are hence all of them are set in the wildcards masks, when + * the action is committed to the datapath. For the rare case, where the + * reg_load action does not actually change the value, and no other flow + * field values are set (or loaded), the datapath action is skipped, and + * no mask bits are set. Such a datapath flow should, however, be + * dependent on the specific field value, so the corresponding wildcard + * mask bits must be set, lest the datapath flow be applied to packets + * containing some other value in the field and the field value remain + * unchanged regardless of the incoming value. + * + * We set the masks here for the whole fields, and their prerequisities. + * Even if only the lower byte of a TCP destination port is set, + * we set the mask for the whole field, and also the ip_proto in the IP + * header, so that the kernel flow would not be applied on, e.g., a UDP + * packet, or any other IP protocol in addition to TCP packets. + */ + mf_mask_field_and_prereqs(load->dst.field, &wc->masks); + mf_write_subfield_flow(&load->dst, &load->subvalue, flow); } -int -nxm_check_reg_load(const struct nx_action_reg_load *action, - const struct flow *flow) +void +nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data, + struct flow *flow, struct flow_wildcards *wc) +{ + union mf_subvalue src_subvalue; + union mf_subvalue mask_value; + ovs_be64 src_data_be = htonll(src_data); + + memset(&mask_value, 0xff, sizeof mask_value); + mf_write_subfield_flow(dst, &mask_value, &wc->masks); + + bitwise_copy(&src_data_be, sizeof src_data_be, 0, + &src_subvalue, sizeof src_subvalue, 0, + sizeof src_data_be * 8); + mf_write_subfield_flow(dst, &src_subvalue, flow); +} + +/* nxm_parse_stack_action, works for both push() and pop(). */ + +/* Parses 's' as a "push" or "pop" action, in the form described in + * ovs-ofctl(8), into '*stack_action'. + * + * Returns NULL if successful, otherwise a malloc()'d string describing the + * error. The caller is responsible for freeing the returned string. */ +char * WARN_UNUSED_RESULT +nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s) { - int n_bits; - int error; + char *error; - error = nxm_dst_check(action->dst, action->ofs_nbits, 0, flow); + error = mf_parse_subfield__(&stack_action->subfield, &s); if (error) { return error; } - /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in - * action->value. */ - n_bits = nxm_decode_n_bits(action->ofs_nbits); - if (n_bits < 64 && ntohll(action->value) >> n_bits) { - return BAD_ARGUMENT; + if (*s != '\0') { + return xasprintf("%s: trailing garbage following push or pop", s); } - return 0; + return NULL; } - -/* nxm_execute_reg_move(), nxm_execute_reg_load(). */ -static uint64_t -nxm_read_field(const struct nxm_field *src, const struct flow *flow) -{ - switch (src->index) { - case NFI_NXM_OF_IN_PORT: - return flow->in_port; - - case NFI_NXM_OF_ETH_DST: - return eth_addr_to_uint64(flow->dl_dst); - - case NFI_NXM_OF_ETH_SRC: - return eth_addr_to_uint64(flow->dl_src); - - case NFI_NXM_OF_ETH_TYPE: - return ntohs(ofputil_dl_type_to_openflow(flow->dl_type)); - - case NFI_NXM_OF_VLAN_TCI: - return ntohs(flow->vlan_tci); - - case NFI_NXM_OF_IP_TOS: - return flow->nw_tos; - - case NFI_NXM_OF_IP_PROTO: - case NFI_NXM_OF_ARP_OP: - return flow->nw_proto; - - case NFI_NXM_OF_IP_SRC: - case NFI_NXM_OF_ARP_SPA: - return ntohl(flow->nw_src); - - case NFI_NXM_OF_IP_DST: - case NFI_NXM_OF_ARP_TPA: - return ntohl(flow->nw_dst); - - case NFI_NXM_OF_TCP_SRC: - case NFI_NXM_OF_UDP_SRC: - return ntohs(flow->tp_src); - - case NFI_NXM_OF_TCP_DST: - case NFI_NXM_OF_UDP_DST: - return ntohs(flow->tp_dst); - - case NFI_NXM_OF_ICMP_TYPE: - case NFI_NXM_NX_ICMPV6_TYPE: - return ntohs(flow->tp_src) & 0xff; - - case NFI_NXM_OF_ICMP_CODE: - case NFI_NXM_NX_ICMPV6_CODE: - return ntohs(flow->tp_dst) & 0xff; - - case NFI_NXM_NX_TUN_ID: - return ntohll(flow->tun_id); - -#define NXM_READ_REGISTER(IDX) \ - case NFI_NXM_NX_REG##IDX: \ - return flow->regs[IDX]; \ - case NFI_NXM_NX_REG##IDX##_W: \ - NOT_REACHED(); - - NXM_READ_REGISTER(0); -#if FLOW_N_REGS >= 2 - NXM_READ_REGISTER(1); -#endif -#if FLOW_N_REGS >= 3 - NXM_READ_REGISTER(2); -#endif -#if FLOW_N_REGS >= 4 - NXM_READ_REGISTER(3); -#endif -#if FLOW_N_REGS > 4 -#error -#endif - - case NFI_NXM_NX_ARP_SHA: - case NFI_NXM_NX_ND_SLL: - return eth_addr_to_uint64(flow->arp_sha); - - case NFI_NXM_NX_ARP_THA: - case NFI_NXM_NX_ND_TLL: - return eth_addr_to_uint64(flow->arp_tha); - - case NFI_NXM_NX_TUN_ID_W: - case NFI_NXM_OF_ETH_DST_W: - case NFI_NXM_OF_VLAN_TCI_W: - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_SPA_W: - case NFI_NXM_OF_ARP_TPA_W: - case NFI_NXM_NX_IPV6_SRC: - case NFI_NXM_NX_IPV6_SRC_W: - case NFI_NXM_NX_IPV6_DST: - case NFI_NXM_NX_IPV6_DST_W: - case NFI_NXM_NX_ND_TARGET: - case N_NXM_FIELDS: - NOT_REACHED(); - } +void +nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s) +{ + ds_put_cstr(s, "push:"); + mf_format_subfield(&push->subfield, s); +} - NOT_REACHED(); +void +nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s) +{ + ds_put_cstr(s, "pop:"); + mf_format_subfield(&pop->subfield, s); } +/* Common set for both push and pop actions. */ static void -nxm_write_field(const struct nxm_field *dst, struct flow *flow, - uint64_t new_value) +stack_action_from_openflow__(const struct nx_action_stack *nasp, + struct ofpact_stack *stack_action) { - switch (dst->index) { - case NFI_NXM_OF_ETH_DST: - eth_addr_from_uint64(new_value, flow->dl_dst); - break; + stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field)); + stack_action->subfield.ofs = ntohs(nasp->offset); + stack_action->subfield.n_bits = ntohs(nasp->n_bits); +} - case NFI_NXM_OF_ETH_SRC: - eth_addr_from_uint64(new_value, flow->dl_src); - break; +static void +nxm_stack_to_nxast__(const struct ofpact_stack *stack_action, + struct nx_action_stack *nasp) +{ + nasp->offset = htons(stack_action->subfield.ofs); + nasp->n_bits = htons(stack_action->subfield.n_bits); + nasp->field = htonl(stack_action->subfield.field->nxm_header); +} - case NFI_NXM_OF_VLAN_TCI: - flow->vlan_tci = htons(new_value); - break; +enum ofperr +nxm_stack_push_from_openflow(const struct nx_action_stack *nasp, + struct ofpbuf *ofpacts) +{ + struct ofpact_stack *push; - case NFI_NXM_NX_TUN_ID: - flow->tun_id = htonll(new_value); - break; + push = ofpact_put_STACK_PUSH(ofpacts); + stack_action_from_openflow__(nasp, push); -#define NXM_WRITE_REGISTER(IDX) \ - case NFI_NXM_NX_REG##IDX: \ - flow->regs[IDX] = new_value; \ - break; \ - case NFI_NXM_NX_REG##IDX##_W: \ - NOT_REACHED(); - - NXM_WRITE_REGISTER(0); -#if FLOW_N_REGS >= 2 - NXM_WRITE_REGISTER(1); -#endif -#if FLOW_N_REGS >= 3 - NXM_WRITE_REGISTER(2); -#endif -#if FLOW_N_REGS >= 4 - NXM_WRITE_REGISTER(3); -#endif -#if FLOW_N_REGS > 4 -#error -#endif - - case NFI_NXM_OF_IP_TOS: - flow->nw_tos = new_value & IP_DSCP_MASK; - break; + return nxm_stack_push_check(push, NULL); +} - case NFI_NXM_OF_IP_SRC: - flow->nw_src = htonl(new_value); - break; +enum ofperr +nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp, + struct ofpbuf *ofpacts) +{ + struct ofpact_stack *pop; - case NFI_NXM_OF_IP_DST: - flow->nw_dst = htonl(new_value); - break; + pop = ofpact_put_STACK_POP(ofpacts); + stack_action_from_openflow__(nasp, pop); - case NFI_NXM_OF_TCP_SRC: - case NFI_NXM_OF_UDP_SRC: - flow->tp_src = htons(new_value); - break; + return nxm_stack_pop_check(pop, NULL); +} - case NFI_NXM_OF_TCP_DST: - case NFI_NXM_OF_UDP_DST: - flow->tp_dst = htons(new_value); - break; +enum ofperr +nxm_stack_push_check(const struct ofpact_stack *push, + const struct flow *flow) +{ + return mf_check_src(&push->subfield, flow); +} - case NFI_NXM_OF_IN_PORT: - case NFI_NXM_OF_ETH_TYPE: - case NFI_NXM_OF_IP_PROTO: - case NFI_NXM_OF_ARP_OP: - case NFI_NXM_OF_ARP_SPA: - case NFI_NXM_OF_ARP_TPA: - case NFI_NXM_OF_ICMP_TYPE: - case NFI_NXM_OF_ICMP_CODE: - case NFI_NXM_NX_TUN_ID_W: - case NFI_NXM_OF_ETH_DST_W: - case NFI_NXM_OF_VLAN_TCI_W: - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_SPA_W: - case NFI_NXM_OF_ARP_TPA_W: - case NFI_NXM_NX_ARP_SHA: - case NFI_NXM_NX_ARP_THA: - case NFI_NXM_NX_IPV6_SRC: - case NFI_NXM_NX_IPV6_SRC_W: - case NFI_NXM_NX_IPV6_DST: - case NFI_NXM_NX_IPV6_DST_W: - case NFI_NXM_NX_ICMPV6_TYPE: - case NFI_NXM_NX_ICMPV6_CODE: - case NFI_NXM_NX_ND_TARGET: - case NFI_NXM_NX_ND_SLL: - case NFI_NXM_NX_ND_TLL: - case N_NXM_FIELDS: - NOT_REACHED(); - } +enum ofperr +nxm_stack_pop_check(const struct ofpact_stack *pop, + const struct flow *flow) +{ + return mf_check_dst(&pop->subfield, flow); } void -nxm_execute_reg_move(const struct nx_action_reg_move *action, - struct flow *flow) +nxm_stack_push_to_nxast(const struct ofpact_stack *stack, + struct ofpbuf *openflow) { - /* Preparation. */ - int n_bits = ntohs(action->n_bits); - uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; + nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow)); +} - /* Get the interesting bits of the source field. */ - const struct nxm_field *src = nxm_field_lookup(ntohl(action->src)); - int src_ofs = ntohs(action->src_ofs); - uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs); +void +nxm_stack_pop_to_nxast(const struct ofpact_stack *stack, + struct ofpbuf *openflow) +{ + nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow)); +} - nxm_reg_load(action->dst, - nxm_encode_ofs_nbits(ntohs(action->dst_ofs), n_bits), - src_data, flow); +/* nxm_execute_stack_push(), nxm_execute_stack_pop(). */ +static void +nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v) +{ + ofpbuf_put(stack, v, sizeof *v); +} + +static union mf_subvalue * +nx_stack_pop(struct ofpbuf *stack) +{ + union mf_subvalue *v = NULL; + + if (ofpbuf_size(stack)) { + + ofpbuf_set_size(stack, ofpbuf_size(stack) - sizeof *v); + v = (union mf_subvalue *) ofpbuf_tail(stack); + } + + return v; } void -nxm_execute_reg_load(const struct nx_action_reg_load *action, - struct flow *flow) +nxm_execute_stack_push(const struct ofpact_stack *push, + const struct flow *flow, struct flow_wildcards *wc, + struct ofpbuf *stack) { - nxm_reg_load(action->dst, action->ofs_nbits, ntohll(action->value), flow); + union mf_subvalue mask_value; + union mf_subvalue dst_value; + + memset(&mask_value, 0xff, sizeof mask_value); + mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks); + + mf_read_subfield(&push->subfield, flow, &dst_value); + nx_stack_push(stack, &dst_value); } -/* Calculates ofs and n_bits from the given 'ofs_nbits' parameter, and copies - * 'src_data'[0:n_bits] to 'dst_header'[ofs:ofs+n_bits] in the given 'flow'. */ void -nxm_reg_load(ovs_be32 dst_header, ovs_be16 ofs_nbits, uint64_t src_data, - struct flow *flow) +nxm_execute_stack_pop(const struct ofpact_stack *pop, + struct flow *flow, struct flow_wildcards *wc, + struct ofpbuf *stack) { - int n_bits = nxm_decode_n_bits(ofs_nbits); - int dst_ofs = nxm_decode_ofs(ofs_nbits); - uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; + union mf_subvalue *src_value; - /* Get remaining bits of the destination field. */ - const struct nxm_field *dst = nxm_field_lookup(ntohl(dst_header)); - uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs); + src_value = nx_stack_pop(stack); - /* Get the final value. */ - uint64_t new_data = dst_data | (src_data << dst_ofs); + /* Only pop if stack is not empty. Otherwise, give warning. */ + if (src_value) { + union mf_subvalue mask_value; - nxm_write_field(dst, flow, new_data); + memset(&mask_value, 0xff, sizeof mask_value); + mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks); + mf_write_subfield_flow(&pop->subfield, src_value, flow); + } else { + if (!VLOG_DROP_WARN(&rl)) { + char *flow_str = flow_to_string(flow); + VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n" + " %s", flow_str); + free(flow_str); + } + } }