X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fnx-match.c;h=eaccce12fdb5fff261f09f777d9d784983d8ecdc;hb=524e1b1c3dbf4f88c00c4b6c2544a334d48dab0a;hp=5fc6aa24942f1253b95e1099ef70a14956790589;hpb=d31f1109f10e5ffb9bf266306b913ebf23781666;p=sliver-openvswitch.git diff --git a/lib/nx-match.c b/lib/nx-match.c index 5fc6aa249..eaccce12f 100644 --- a/lib/nx-match.c +++ b/lib/nx-match.c @@ -18,8 +18,11 @@ #include "nx-match.h" +#include + #include "classifier.h" #include "dynamic-string.h" +#include "meta-flow.h" #include "ofp-util.h" #include "ofpbuf.h" #include "openflow/nicira-ext.h" @@ -43,88 +46,6 @@ enum { BAD_ARGUMENT = OFP_MKERR(OFPET_BAD_ACTION, OFPBAC_BAD_ARGUMENT) }; -/* For each NXM_* field, define NFI_NXM_* as consecutive integers starting from - * zero. */ -enum nxm_field_index { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ - NFI_NXM_##HEADER, -#include "nx-match.def" - N_NXM_FIELDS -}; - -struct nxm_field { - struct hmap_node hmap_node; - enum nxm_field_index index; /* NFI_* value. */ - uint32_t header; /* NXM_* value. */ - flow_wildcards_t wildcard; /* FWW_* bit, if exactly one. */ - ovs_be16 dl_type[N_NXM_DL_TYPES]; /* dl_type prerequisites. */ - uint8_t nw_proto; /* nw_proto prerequisite, if nonzero. */ - const char *name; /* "NXM_*" string. */ - bool writable; /* Writable with NXAST_REG_{MOVE,LOAD}? */ -}; - - -/* All the known fields. */ -static struct nxm_field nxm_fields[N_NXM_FIELDS] = { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPES, NW_PROTO, WRITABLE) \ - { HMAP_NODE_NULL_INITIALIZER, NFI_NXM_##HEADER, NXM_##HEADER, WILDCARD, \ - DL_CONVERT DL_TYPES, NW_PROTO, "NXM_" #HEADER, WRITABLE }, -#define DL_CONVERT(T1, T2) { CONSTANT_HTONS(T1), CONSTANT_HTONS(T2) } -#include "nx-match.def" -}; - -/* Hash table of 'nxm_fields'. */ -static struct hmap all_nxm_fields = HMAP_INITIALIZER(&all_nxm_fields); - -/* Possible masks for NXM_OF_ETH_DST_W. */ -static const uint8_t eth_all_0s[ETH_ADDR_LEN] - = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; -static const uint8_t eth_all_1s[ETH_ADDR_LEN] - = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; -static const uint8_t eth_mcast_1[ETH_ADDR_LEN] - = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; -static const uint8_t eth_mcast_0[ETH_ADDR_LEN] - = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff}; - -static void -nxm_init(void) -{ - if (hmap_is_empty(&all_nxm_fields)) { - int i; - - for (i = 0; i < N_NXM_FIELDS; i++) { - struct nxm_field *f = &nxm_fields[i]; - hmap_insert(&all_nxm_fields, &f->hmap_node, - hash_int(f->header, 0)); - } - - /* Verify that the header values are unique (duplicate "case" values - * cause a compile error). */ - switch (0) { -#define DEFINE_FIELD(HEADER, WILDCARD, DL_TYPE, NW_PROTO, WRITABLE) \ - case NXM_##HEADER: break; -#include "nx-match.def" - } - } -} - -static const struct nxm_field * -nxm_field_lookup(uint32_t header) -{ - struct nxm_field *f; - - nxm_init(); - - HMAP_FOR_EACH_WITH_HASH (f, hmap_node, hash_int(header, 0), - &all_nxm_fields) { - if (f->header == header) { - return f; - } - } - - return NULL; -} - /* Returns the width of the data for a field with the given 'header', in * bytes. */ int @@ -144,303 +65,6 @@ nxm_field_bits(uint32_t header) /* nx_pull_match() and helpers. */ -static int -parse_nx_reg(const struct nxm_field *f, - struct flow *flow, struct flow_wildcards *wc, - const void *value, const void *maskp) -{ - int idx = NXM_NX_REG_IDX(f->header); - if (wc->reg_masks[idx]) { - return NXM_DUP_TYPE; - } else { - flow_wildcards_set_reg_mask(wc, idx, - (NXM_HASMASK(f->header) - ? ntohl(get_unaligned_be32(maskp)) - : UINT32_MAX)); - flow->regs[idx] = ntohl(get_unaligned_be32(value)); - flow->regs[idx] &= wc->reg_masks[idx]; - return 0; - } -} - -static int -parse_nxm_entry(struct cls_rule *rule, const struct nxm_field *f, - const void *value, const void *mask) -{ - struct flow_wildcards *wc = &rule->wc; - struct flow *flow = &rule->flow; - - switch (f->index) { - /* Metadata. */ - case NFI_NXM_OF_IN_PORT: - flow->in_port = ntohs(get_unaligned_be16(value)); - if (flow->in_port == OFPP_LOCAL) { - flow->in_port = ODPP_LOCAL; - } - return 0; - - /* Ethernet header. */ - case NFI_NXM_OF_ETH_DST: - if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) - != (FWW_DL_DST | FWW_ETH_MCAST)) { - return NXM_DUP_TYPE; - } else { - wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - return 0; - } - case NFI_NXM_OF_ETH_DST_W: - if ((wc->wildcards & (FWW_DL_DST | FWW_ETH_MCAST)) - != (FWW_DL_DST | FWW_ETH_MCAST)) { - return NXM_DUP_TYPE; - } else if (eth_addr_equals(mask, eth_mcast_1)) { - wc->wildcards &= ~FWW_ETH_MCAST; - flow->dl_dst[0] = *(uint8_t *) value & 0x01; - } else if (eth_addr_equals(mask, eth_mcast_0)) { - wc->wildcards &= ~FWW_DL_DST; - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - flow->dl_dst[0] &= 0xfe; - } else if (eth_addr_equals(mask, eth_all_0s)) { - return 0; - } else if (eth_addr_equals(mask, eth_all_1s)) { - wc->wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); - memcpy(flow->dl_dst, value, ETH_ADDR_LEN); - return 0; - } else { - return NXM_BAD_MASK; - } - case NFI_NXM_OF_ETH_SRC: - memcpy(flow->dl_src, value, ETH_ADDR_LEN); - return 0; - case NFI_NXM_OF_ETH_TYPE: - flow->dl_type = ofputil_dl_type_from_openflow(get_unaligned_be16(value)); - return 0; - - /* 802.1Q header. */ - case NFI_NXM_OF_VLAN_TCI: - if (wc->vlan_tci_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_dl_tci(rule, get_unaligned_be16(value)); - return 0; - } - case NFI_NXM_OF_VLAN_TCI_W: - if (wc->vlan_tci_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_dl_tci_masked(rule, get_unaligned_be16(value), - get_unaligned_be16(mask)); - return 0; - } - - /* IP header. */ - case NFI_NXM_OF_IP_TOS: - if (*(uint8_t *) value & 0x03) { - return NXM_BAD_VALUE; - } else { - flow->nw_tos = *(uint8_t *) value; - return 0; - } - case NFI_NXM_OF_IP_PROTO: - flow->nw_proto = *(uint8_t *) value; - return 0; - - /* IP addresses in IP and ARP headers. */ - case NFI_NXM_OF_IP_SRC: - case NFI_NXM_OF_ARP_SPA: - if (wc->nw_src_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_nw_src(rule, get_unaligned_be32(value)); - return 0; - } - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_ARP_SPA_W: - if (wc->nw_src_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be32 ip = get_unaligned_be32(value); - ovs_be32 netmask = get_unaligned_be32(mask); - if (!cls_rule_set_nw_src_masked(rule, ip, netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - case NFI_NXM_OF_IP_DST: - case NFI_NXM_OF_ARP_TPA: - if (wc->nw_dst_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_nw_dst(rule, get_unaligned_be32(value)); - return 0; - } - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_TPA_W: - if (wc->nw_dst_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be32 ip = get_unaligned_be32(value); - ovs_be32 netmask = get_unaligned_be32(mask); - if (!cls_rule_set_nw_dst_masked(rule, ip, netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - - /* IPv6 addresses. */ - case NFI_NXM_NX_IPV6_SRC: - if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6; - memcpy(&ipv6, value, sizeof ipv6); - cls_rule_set_ipv6_src(rule, &ipv6); - return 0; - } - case NFI_NXM_NX_IPV6_SRC_W: - if (!ipv6_mask_is_any(&wc->ipv6_src_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6, netmask; - memcpy(&ipv6, value, sizeof ipv6); - memcpy(&netmask, mask, sizeof netmask); - if (!cls_rule_set_ipv6_src_masked(rule, &ipv6, &netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - case NFI_NXM_NX_IPV6_DST: - if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6; - memcpy(&ipv6, value, sizeof ipv6); - cls_rule_set_ipv6_dst(rule, &ipv6); - return 0; - } - case NFI_NXM_NX_IPV6_DST_W: - if (!ipv6_mask_is_any(&wc->ipv6_dst_mask)) { - return NXM_DUP_TYPE; - } else { - struct in6_addr ipv6, netmask; - memcpy(&ipv6, value, sizeof ipv6); - memcpy(&netmask, mask, sizeof netmask); - if (!cls_rule_set_ipv6_dst_masked(rule, &ipv6, &netmask)) { - return NXM_BAD_MASK; - } - return 0; - } - - /* TCP header. */ - case NFI_NXM_OF_TCP_SRC: - flow->tp_src = get_unaligned_be16(value); - return 0; - case NFI_NXM_OF_TCP_DST: - flow->tp_dst = get_unaligned_be16(value); - return 0; - - /* UDP header. */ - case NFI_NXM_OF_UDP_SRC: - flow->tp_src = get_unaligned_be16(value); - return 0; - case NFI_NXM_OF_UDP_DST: - flow->tp_dst = get_unaligned_be16(value); - return 0; - - /* ICMP header. */ - case NFI_NXM_OF_ICMP_TYPE: - flow->tp_src = htons(*(uint8_t *) value); - return 0; - case NFI_NXM_OF_ICMP_CODE: - flow->tp_dst = htons(*(uint8_t *) value); - return 0; - - /* ICMPv6 header. */ - case NFI_NXM_NX_ICMPV6_TYPE: - flow->tp_src = htons(*(uint8_t *) value); - return 0; - case NFI_NXM_NX_ICMPV6_CODE: - flow->tp_dst = htons(*(uint8_t *) value); - return 0; - - /* ARP header. */ - case NFI_NXM_OF_ARP_OP: - if (ntohs(get_unaligned_be16(value)) > 255) { - return NXM_BAD_VALUE; - } else { - flow->nw_proto = ntohs(get_unaligned_be16(value)); - return 0; - } - - case NFI_NXM_NX_ARP_SHA: - memcpy(flow->arp_sha, value, ETH_ADDR_LEN); - return 0; - case NFI_NXM_NX_ARP_THA: - memcpy(flow->arp_tha, value, ETH_ADDR_LEN); - return 0; - - /* Tunnel ID. */ - case NFI_NXM_NX_TUN_ID: - if (wc->tun_id_mask) { - return NXM_DUP_TYPE; - } else { - cls_rule_set_tun_id(rule, get_unaligned_be64(value)); - return 0; - } - case NFI_NXM_NX_TUN_ID_W: - if (wc->tun_id_mask) { - return NXM_DUP_TYPE; - } else { - ovs_be64 tun_id = get_unaligned_be64(value); - ovs_be64 tun_mask = get_unaligned_be64(mask); - cls_rule_set_tun_id_masked(rule, tun_id, tun_mask); - return 0; - } - - /* Registers. */ - case NFI_NXM_NX_REG0: - case NFI_NXM_NX_REG0_W: -#if FLOW_N_REGS >= 2 - case NFI_NXM_NX_REG1: - case NFI_NXM_NX_REG1_W: -#endif -#if FLOW_N_REGS >= 3 - case NFI_NXM_NX_REG2: - case NFI_NXM_NX_REG2_W: -#endif -#if FLOW_N_REGS >= 4 - case NFI_NXM_NX_REG3: - case NFI_NXM_NX_REG3_W: -#endif -#if FLOW_N_REGS > 4 -#error -#endif - return parse_nx_reg(f, flow, wc, value, mask); - - case N_NXM_FIELDS: - NOT_REACHED(); - } - NOT_REACHED(); -} - -static bool -nxm_prereqs_ok(const struct nxm_field *field, const struct flow *flow) -{ - if (field->nw_proto && field->nw_proto != flow->nw_proto) { - return false; - } - - if (!field->dl_type[0]) { - return true; - } else if (field->dl_type[0] == flow->dl_type) { - return true; - } else if (field->dl_type[1] && field->dl_type[1] == flow->dl_type) { - return true; - } - - return false; -} - static uint32_t nx_entry_ok(const void *p, unsigned int match_len) { @@ -472,13 +96,16 @@ nx_entry_ok(const void *p, unsigned int match_len) return header; } -int -nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority, - struct cls_rule *rule) +static int +nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict, + uint16_t priority, struct cls_rule *rule, + ovs_be64 *cookie, ovs_be64 *cookie_mask) { uint32_t header; uint8_t *p; + assert((cookie != NULL) == (cookie_mask != NULL)); + p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8)); if (!p) { VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a " @@ -488,42 +115,111 @@ nx_pull_match(struct ofpbuf *b, unsigned int match_len, uint16_t priority, } cls_rule_init_catchall(rule, priority); - while ((header = nx_entry_ok(p, match_len)) != 0) { - unsigned length = NXM_LENGTH(header); - const struct nxm_field *f; + if (cookie) { + *cookie = *cookie_mask = htonll(0); + } + for (; + (header = nx_entry_ok(p, match_len)) != 0; + p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) { + const struct mf_field *mf; int error; - f = nxm_field_lookup(header); - if (!f) { - error = NXM_BAD_TYPE; - } else if (!nxm_prereqs_ok(f, &rule->flow)) { + mf = mf_from_nxm_header(header); + if (!mf) { + if (strict) { + error = NXM_BAD_TYPE; + } else { + continue; + } + } else if (!mf_are_prereqs_ok(mf, &rule->flow)) { error = NXM_BAD_PREREQ; - } else if (f->wildcard && !(rule->wc.wildcards & f->wildcard)) { + } else if (!mf_is_all_wild(mf, &rule->wc)) { error = NXM_DUP_TYPE; } else { - /* 'hasmask' and 'length' are known to be correct at this point - * because they are included in 'header' and nxm_field_lookup() - * checked them already. */ - rule->wc.wildcards &= ~f->wildcard; - error = parse_nxm_entry(rule, f, p + 4, p + 4 + length / 2); + unsigned int width = mf->n_bytes; + union mf_value value; + + memcpy(&value, p + 4, width); + if (!mf_is_value_valid(mf, &value)) { + error = NXM_BAD_VALUE; + } else if (!NXM_HASMASK(header)) { + error = 0; + mf_set_value(mf, &value, rule); + } else { + union mf_value mask; + + memcpy(&mask, p + 4 + width, width); + if (!mf_is_mask_valid(mf, &mask)) { + error = NXM_BAD_MASK; + } else { + error = 0; + mf_set(mf, &value, &mask, rule); + } + } } + + /* Check if the match is for a cookie rather than a classifier rule. */ + if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) { + if (*cookie_mask) { + error = NXM_DUP_TYPE; + } else { + unsigned int width = sizeof *cookie; + + memcpy(cookie, p + 4, width); + if (NXM_HASMASK(header)) { + memcpy(cookie_mask, p + 4 + width, width); + } else { + *cookie_mask = htonll(UINT64_MAX); + } + error = 0; + } + } + if (error) { - VLOG_DBG_RL(&rl, "bad nxm_entry with vendor=%"PRIu32", " - "field=%"PRIu32", hasmask=%"PRIu32", type=%"PRIu32" " - "(error %x)", + char *msg = ofputil_error_to_string(error); + VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", " + "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), " + "(%s)", header, NXM_VENDOR(header), NXM_FIELD(header), - NXM_HASMASK(header), NXM_TYPE(header), - error); + NXM_HASMASK(header), NXM_LENGTH(header), + msg); + free(msg); + return error; } - - - p += 4 + length; - match_len -= 4 + length; } return match_len ? NXM_INVALID : 0; } + +/* Parses the nx_match formatted match description in 'b' with length + * 'match_len'. The results are stored in 'rule', which is initialized with + * 'priority'. If 'cookie' and 'cookie_mask' contain valid pointers, then the + * cookie and mask will be stored in them if a "NXM_NX_COOKIE*" match is + * defined. Otherwise, 0 is stored in both. + * + * Fails with an error when encountering unknown NXM headers. + * + * Returns 0 if successful, otherwise an OpenFlow error code. */ +int +nx_pull_match(struct ofpbuf *b, unsigned int match_len, + uint16_t priority, struct cls_rule *rule, + ovs_be64 *cookie, ovs_be64 *cookie_mask) +{ + return nx_pull_match__(b, match_len, true, priority, rule, cookie, + cookie_mask); +} + +/* Behaves the same as nx_pull_match() with one exception. Skips over unknown + * NXM headers instead of failing with an error when they are encountered. */ +int +nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len, + uint16_t priority, struct cls_rule *rule, + ovs_be64 *cookie, ovs_be64 *cookie_mask) +{ + return nx_pull_match__(b, match_len, false, priority, rule, cookie, + cookie_mask); +} /* nx_put_match() and helpers. * @@ -546,6 +242,24 @@ nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value) ofpbuf_put(b, &value, sizeof value); } +static void +nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask) +{ + switch (mask) { + case 0: + break; + + case UINT8_MAX: + nxm_put_8(b, header, value); + break; + + default: + nxm_put_header(b, NXM_MAKE_WILD_HEADER(header)); + ofpbuf_put(b, &value, sizeof value); + ofpbuf_put(b, &mask, sizeof mask); + } +} + static void nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value) { @@ -652,20 +366,15 @@ nxm_put_eth(struct ofpbuf *b, uint32_t header, static void nxm_put_eth_dst(struct ofpbuf *b, - uint32_t wc, const uint8_t value[ETH_ADDR_LEN]) + flow_wildcards_t wc, const uint8_t value[ETH_ADDR_LEN]) { switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) { case FWW_DL_DST | FWW_ETH_MCAST: break; - case FWW_DL_DST: - nxm_put_header(b, NXM_OF_ETH_DST_W); - ofpbuf_put(b, value, ETH_ADDR_LEN); - ofpbuf_put(b, eth_mcast_1, ETH_ADDR_LEN); - break; - case FWW_ETH_MCAST: + default: nxm_put_header(b, NXM_OF_ETH_DST_W); ofpbuf_put(b, value, ETH_ADDR_LEN); - ofpbuf_put(b, eth_mcast_0, ETH_ADDR_LEN); + ofpbuf_put(b, flow_wildcards_to_dl_dst_mask(wc), ETH_ADDR_LEN); break; case 0: nxm_put_eth(b, NXM_OF_ETH_DST, value); @@ -689,9 +398,32 @@ nxm_put_ipv6(struct ofpbuf *b, uint32_t header, } } +static void +nxm_put_frag(struct ofpbuf *b, const struct cls_rule *cr) +{ + uint8_t nw_frag = cr->flow.nw_frag; + uint8_t nw_frag_mask = cr->wc.nw_frag_mask; + + switch (nw_frag_mask) { + case 0: + break; + + case FLOW_NW_FRAG_MASK: + nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag); + break; + + default: + nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag, + nw_frag_mask & FLOW_NW_FRAG_MASK); + break; + } +} + /* Appends to 'b' the nx_match format that expresses 'cr' (except for * 'cr->priority', because priority is not part of nx_match), plus enough - * zero bytes to pad the nx_match out to a multiple of 8. + * zero bytes to pad the nx_match out to a multiple of 8. For Flow Mod + * and Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be + * supplied. Otherwise, 'cookie_mask' should be zero. * * This function can cause 'b''s data to be reallocated. * @@ -700,7 +432,8 @@ nxm_put_ipv6(struct ofpbuf *b, uint32_t header, * If 'cr' is a catch-all rule that matches every packet, then this function * appends nothing to 'b' and returns 0. */ int -nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) +nx_put_match(struct ofpbuf *b, const struct cls_rule *cr, + ovs_be64 cookie, ovs_be64 cookie_mask) { const flow_wildcards_t wc = cr->wc.wildcards; const struct flow *flow = &cr->flow; @@ -708,12 +441,11 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) int match_len; int i; + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 7); + /* Metadata. */ if (!(wc & FWW_IN_PORT)) { uint16_t in_port = flow->in_port; - if (in_port == ODPP_LOCAL) { - in_port = OFPP_LOCAL; - } nxm_put_16(b, NXM_OF_IN_PORT, htons(in_port)); } @@ -733,11 +465,21 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) /* L3. */ if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IP)) { /* IP. */ - if (!(wc & FWW_NW_TOS)) { - nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); - } nxm_put_32m(b, NXM_OF_IP_SRC, flow->nw_src, cr->wc.nw_src_mask); nxm_put_32m(b, NXM_OF_IP_DST, flow->nw_dst, cr->wc.nw_dst_mask); + nxm_put_frag(b, cr); + + if (!(wc & FWW_NW_DSCP)) { + nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK); + } + + if (!(wc & FWW_NW_ECN)) { + nxm_put_8(b, NXM_NX_IP_ECN, flow->nw_tos & IP_ECN_MASK); + } + + if (!(wc & FWW_NW_TTL)) { + nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl); + } if (!(wc & FWW_NW_PROTO)) { nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); @@ -775,14 +517,27 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) } } else if (!(wc & FWW_DL_TYPE) && flow->dl_type == htons(ETH_TYPE_IPV6)) { /* IPv6. */ - - if (!(wc & FWW_NW_TOS)) { - nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & 0xfc); - } nxm_put_ipv6(b, NXM_NX_IPV6_SRC, &flow->ipv6_src, &cr->wc.ipv6_src_mask); nxm_put_ipv6(b, NXM_NX_IPV6_DST, &flow->ipv6_dst, &cr->wc.ipv6_dst_mask); + nxm_put_frag(b, cr); + + if (!(wc & FWW_IPV6_LABEL)) { + nxm_put_32(b, NXM_NX_IPV6_LABEL, flow->ipv6_label); + } + + if (!(wc & FWW_NW_DSCP)) { + nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK); + } + + if (!(wc & FWW_NW_ECN)) { + nxm_put_8(b, NXM_NX_IP_ECN, flow->nw_tos & IP_ECN_MASK); + } + + if (!(wc & FWW_NW_TTL)) { + nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl); + } if (!(wc & FWW_NW_PROTO)) { nxm_put_8(b, NXM_OF_IP_PROTO, flow->nw_proto); @@ -811,6 +566,22 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) case IPPROTO_ICMPV6: if (!(wc & FWW_TP_SRC)) { nxm_put_8(b, NXM_NX_ICMPV6_TYPE, ntohs(flow->tp_src)); + + if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) || + flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { + if (!(wc & FWW_ND_TARGET)) { + nxm_put_ipv6(b, NXM_NX_ND_TARGET, &flow->nd_target, + &in6addr_exact); + } + if (!(wc & FWW_ARP_SHA) + && flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) { + nxm_put_eth(b, NXM_NX_ND_SLL, flow->arp_sha); + } + if (!(wc & FWW_ARP_THA) + && flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) { + nxm_put_eth(b, NXM_NX_ND_TLL, flow->arp_tha); + } + } } if (!(wc & FWW_TP_DST)) { nxm_put_8(b, NXM_NX_ICMPV6_CODE, ntohs(flow->tp_dst)); @@ -842,6 +613,9 @@ nx_put_match(struct ofpbuf *b, const struct cls_rule *cr) htonl(flow->regs[i]), htonl(cr->wc.reg_masks[i])); } + /* Cookie. */ + nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask); + match_len = b->size - start_len; ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len); return match_len; @@ -905,9 +679,16 @@ nx_match_to_string(const uint8_t *p, unsigned int match_len) static void format_nxm_field_name(struct ds *s, uint32_t header) { - const struct nxm_field *f = nxm_field_lookup(header); - if (f) { - ds_put_cstr(s, f->name); + const struct mf_field *mf = mf_from_nxm_header(header); + if (mf) { + ds_put_cstr(s, mf->nxm_name); + if (NXM_HASMASK(header)) { + ds_put_cstr(s, "_W"); + } + } else if (header == NXM_NX_COOKIE) { + ds_put_cstr(s, "NXM_NX_COOKIE"); + } else if (header == NXM_NX_COOKIE_W) { + ds_put_cstr(s, "NXM_NX_COOKIE_W"); } else { ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header)); } @@ -916,12 +697,35 @@ format_nxm_field_name(struct ds *s, uint32_t header) static uint32_t parse_nxm_field_name(const char *name, int name_len) { - const struct nxm_field *f; + bool wild; + int i; /* Check whether it's a field name. */ - for (f = nxm_fields; f < &nxm_fields[ARRAY_SIZE(nxm_fields)]; f++) { - if (!strncmp(f->name, name, name_len) && f->name[name_len] == '\0') { - return f->header; + wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2); + if (wild) { + name_len -= 2; + } + + for (i = 0; i < MFF_N_IDS; i++) { + const struct mf_field *mf = mf_from_id(i); + + if (mf->nxm_name + && !strncmp(mf->nxm_name, name, name_len) + && mf->nxm_name[name_len] == '\0') { + if (!wild) { + return mf->nxm_header; + } else if (mf->maskable != MFM_NONE) { + return NXM_MAKE_WILD_HEADER(mf->nxm_header); + } + } + } + + if (!strncmp("NXM_NX_COOKIE", name, name_len) + && (name_len == strlen("NXM_NX_COOKIE"))) { + if (!wild) { + return NXM_NX_COOKIE; + } else { + return NXM_NX_COOKIE_W; } } @@ -1078,10 +882,7 @@ nxm_parse_reg_move(struct nx_action_reg_move *move, const char *s) "%d bits wide", full_s, src_n_bits, dst_n_bits); } - move->type = htons(OFPAT_VENDOR); - move->len = htons(sizeof *move); - move->vendor = htonl(NX_VENDOR_ID); - move->subtype = htons(NXAST_REG_MOVE); + ofputil_init_NXAST_REG_MOVE(move); move->n_bits = htons(src_n_bits); move->src_ofs = htons(src_ofs); move->dst_ofs = htons(dst_ofs); @@ -1112,10 +913,7 @@ nxm_parse_reg_load(struct nx_action_reg_load *load, const char *s) full_s, value, n_bits); } - load->type = htons(OFPAT_VENDOR); - load->len = htons(sizeof *load); - load->vendor = htonl(NX_VENDOR_ID); - load->subtype = htons(NXAST_REG_LOAD); + ofputil_init_NXAST_REG_LOAD(load); load->ofs_nbits = nxm_encode_ofs_nbits(ofs, n_bits); load->dst = htonl(dst); load->value = htonll(value); @@ -1166,52 +964,85 @@ nxm_format_reg_load(const struct nx_action_reg_load *load, struct ds *s) /* nxm_check_reg_move(), nxm_check_reg_load(). */ static bool -field_ok(const struct nxm_field *f, const struct flow *flow, int size) +field_ok(const struct mf_field *mf, const struct flow *flow, int size) { - return (f && !NXM_HASMASK(f->header) - && nxm_prereqs_ok(f, flow) && size <= nxm_field_bits(f->header)); + return (mf + && mf_are_prereqs_ok(mf, flow) + && size <= nxm_field_bits(mf->nxm_header)); } int nxm_check_reg_move(const struct nx_action_reg_move *action, const struct flow *flow) { - const struct nxm_field *src; - const struct nxm_field *dst; + int src_ofs, dst_ofs, n_bits; + int error; - if (action->n_bits == htons(0)) { - return BAD_ARGUMENT; - } + n_bits = ntohs(action->n_bits); + src_ofs = ntohs(action->src_ofs); + dst_ofs = ntohs(action->dst_ofs); - src = nxm_field_lookup(ntohl(action->src)); - if (!field_ok(src, flow, ntohs(action->src_ofs) + ntohs(action->n_bits))) { - return BAD_ARGUMENT; + error = nxm_src_check(action->src, src_ofs, n_bits, flow); + if (error) { + return error; } - dst = nxm_field_lookup(ntohl(action->dst)); - if (!field_ok(dst, flow, ntohs(action->dst_ofs) + ntohs(action->n_bits))) { - return BAD_ARGUMENT; + return nxm_dst_check(action->dst, dst_ofs, n_bits, flow); +} + +/* Given a flow, checks that the source field represented by 'src_header' + * in the range ['ofs', 'ofs' + 'n_bits') is valid. */ +int +nxm_src_check(ovs_be32 src_header_, unsigned int ofs, unsigned int n_bits, + const struct flow *flow) +{ + uint32_t src_header = ntohl(src_header_); + const struct mf_field *src = mf_from_nxm_header(src_header); + + if (!n_bits) { + VLOG_WARN_RL(&rl, "zero bit source field"); + } else if (NXM_HASMASK(src_header) || !field_ok(src, flow, ofs + n_bits)) { + VLOG_WARN_RL(&rl, "invalid source field"); + } else { + return 0; } - if (!dst->writable) { - return BAD_ARGUMENT; + return BAD_ARGUMENT; +} + +/* Given a flow, checks that the destination field represented by 'dst_header' + * in the range ['ofs', 'ofs' + 'n_bits') is valid. */ +int +nxm_dst_check(ovs_be32 dst_header_, unsigned int ofs, unsigned int n_bits, + const struct flow *flow) +{ + uint32_t dst_header = ntohl(dst_header_); + const struct mf_field *dst = mf_from_nxm_header(dst_header); + + if (!n_bits) { + VLOG_WARN_RL(&rl, "zero bit destination field"); + } else if (NXM_HASMASK(dst_header) || !field_ok(dst, flow, ofs + n_bits)) { + VLOG_WARN_RL(&rl, "invalid destination field"); + } else if (!dst->writable) { + VLOG_WARN_RL(&rl, "destination field is not writable"); + } else { + return 0; } - return 0; + return BAD_ARGUMENT; } int nxm_check_reg_load(const struct nx_action_reg_load *action, const struct flow *flow) { - const struct nxm_field *dst; - int ofs, n_bits; + unsigned int ofs = nxm_decode_ofs(action->ofs_nbits); + unsigned int n_bits = nxm_decode_n_bits(action->ofs_nbits); + int error; - ofs = nxm_decode_ofs(action->ofs_nbits); - n_bits = nxm_decode_n_bits(action->ofs_nbits); - dst = nxm_field_lookup(ntohl(action->dst)); - if (!field_ok(dst, flow, ofs + n_bits)) { - return BAD_ARGUMENT; + error = nxm_dst_check(action->dst, ofs, n_bits, flow); + if (error) { + return error; } /* Reject 'action' if a bit numbered 'n_bits' or higher is set to 1 in @@ -1220,225 +1051,122 @@ nxm_check_reg_load(const struct nx_action_reg_load *action, return BAD_ARGUMENT; } - if (!dst->writable) { - return BAD_ARGUMENT; - } - return 0; } /* nxm_execute_reg_move(), nxm_execute_reg_load(). */ -static uint64_t -nxm_read_field(const struct nxm_field *src, const struct flow *flow) +static void +bitwise_copy(const void *src_, unsigned int src_len, unsigned int src_ofs, + void *dst_, unsigned int dst_len, unsigned int dst_ofs, + unsigned int n_bits) { - switch (src->index) { - case NFI_NXM_OF_IN_PORT: - return flow->in_port == ODPP_LOCAL ? OFPP_LOCAL : flow->in_port; - - case NFI_NXM_OF_ETH_DST: - return eth_addr_to_uint64(flow->dl_dst); - - case NFI_NXM_OF_ETH_SRC: - return eth_addr_to_uint64(flow->dl_src); - - case NFI_NXM_OF_ETH_TYPE: - return ntohs(ofputil_dl_type_to_openflow(flow->dl_type)); - - case NFI_NXM_OF_VLAN_TCI: - return ntohs(flow->vlan_tci); - - case NFI_NXM_OF_IP_TOS: - return flow->nw_tos; - - case NFI_NXM_OF_IP_PROTO: - case NFI_NXM_OF_ARP_OP: - return flow->nw_proto; - - case NFI_NXM_OF_IP_SRC: - case NFI_NXM_OF_ARP_SPA: - return ntohl(flow->nw_src); - - case NFI_NXM_OF_IP_DST: - case NFI_NXM_OF_ARP_TPA: - return ntohl(flow->nw_dst); - - case NFI_NXM_OF_TCP_SRC: - case NFI_NXM_OF_UDP_SRC: - return ntohs(flow->tp_src); - - case NFI_NXM_OF_TCP_DST: - case NFI_NXM_OF_UDP_DST: - return ntohs(flow->tp_dst); - - case NFI_NXM_OF_ICMP_TYPE: - case NFI_NXM_NX_ICMPV6_TYPE: - return ntohs(flow->tp_src) & 0xff; - - case NFI_NXM_OF_ICMP_CODE: - case NFI_NXM_NX_ICMPV6_CODE: - return ntohs(flow->tp_dst) & 0xff; - - case NFI_NXM_NX_TUN_ID: - return ntohll(flow->tun_id); - -#define NXM_READ_REGISTER(IDX) \ - case NFI_NXM_NX_REG##IDX: \ - return flow->regs[IDX]; \ - case NFI_NXM_NX_REG##IDX##_W: \ - NOT_REACHED(); - - NXM_READ_REGISTER(0); -#if FLOW_N_REGS >= 2 - NXM_READ_REGISTER(1); -#endif -#if FLOW_N_REGS >= 3 - NXM_READ_REGISTER(2); -#endif -#if FLOW_N_REGS >= 4 - NXM_READ_REGISTER(3); -#endif -#if FLOW_N_REGS > 4 -#error -#endif - - case NFI_NXM_NX_ARP_SHA: - return eth_addr_to_uint64(flow->arp_sha); - - case NFI_NXM_NX_ARP_THA: - return eth_addr_to_uint64(flow->arp_tha); - - case NFI_NXM_NX_TUN_ID_W: - case NFI_NXM_OF_ETH_DST_W: - case NFI_NXM_OF_VLAN_TCI_W: - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_SPA_W: - case NFI_NXM_OF_ARP_TPA_W: - case NFI_NXM_NX_IPV6_SRC: - case NFI_NXM_NX_IPV6_SRC_W: - case NFI_NXM_NX_IPV6_DST: - case NFI_NXM_NX_IPV6_DST_W: - case N_NXM_FIELDS: - NOT_REACHED(); - } + const uint8_t *src = src_; + uint8_t *dst = dst_; - NOT_REACHED(); -} + src += src_len - (src_ofs / 8 + 1); + src_ofs %= 8; -static void -nxm_write_field(const struct nxm_field *dst, struct flow *flow, - uint64_t new_value) -{ - switch (dst->index) { - case NFI_NXM_OF_VLAN_TCI: - flow->vlan_tci = htons(new_value); - break; + dst += dst_len - (dst_ofs / 8 + 1); + dst_ofs %= 8; - case NFI_NXM_NX_TUN_ID: - flow->tun_id = htonll(new_value); - break; + if (src_ofs == 0 && dst_ofs == 0) { + unsigned int n_bytes = n_bits / 8; + if (n_bytes) { + dst -= n_bytes - 1; + src -= n_bytes - 1; + memcpy(dst, src, n_bytes); -#define NXM_WRITE_REGISTER(IDX) \ - case NFI_NXM_NX_REG##IDX: \ - flow->regs[IDX] = new_value; \ - break; \ - case NFI_NXM_NX_REG##IDX##_W: \ - NOT_REACHED(); - - NXM_WRITE_REGISTER(0); -#if FLOW_N_REGS >= 2 - NXM_WRITE_REGISTER(1); -#endif -#if FLOW_N_REGS >= 3 - NXM_WRITE_REGISTER(2); -#endif -#if FLOW_N_REGS >= 4 - NXM_WRITE_REGISTER(3); -#endif -#if FLOW_N_REGS > 4 -#error -#endif - - case NFI_NXM_OF_IN_PORT: - case NFI_NXM_OF_ETH_DST: - case NFI_NXM_OF_ETH_SRC: - case NFI_NXM_OF_ETH_TYPE: - case NFI_NXM_OF_IP_TOS: - case NFI_NXM_OF_IP_PROTO: - case NFI_NXM_OF_ARP_OP: - case NFI_NXM_OF_IP_SRC: - case NFI_NXM_OF_ARP_SPA: - case NFI_NXM_OF_IP_DST: - case NFI_NXM_OF_ARP_TPA: - case NFI_NXM_OF_TCP_SRC: - case NFI_NXM_OF_UDP_SRC: - case NFI_NXM_OF_TCP_DST: - case NFI_NXM_OF_UDP_DST: - case NFI_NXM_OF_ICMP_TYPE: - case NFI_NXM_OF_ICMP_CODE: - case NFI_NXM_NX_TUN_ID_W: - case NFI_NXM_OF_ETH_DST_W: - case NFI_NXM_OF_VLAN_TCI_W: - case NFI_NXM_OF_IP_SRC_W: - case NFI_NXM_OF_IP_DST_W: - case NFI_NXM_OF_ARP_SPA_W: - case NFI_NXM_OF_ARP_TPA_W: - case NFI_NXM_NX_ARP_SHA: - case NFI_NXM_NX_ARP_THA: - case NFI_NXM_NX_IPV6_SRC: - case NFI_NXM_NX_IPV6_SRC_W: - case NFI_NXM_NX_IPV6_DST: - case NFI_NXM_NX_IPV6_DST_W: - case NFI_NXM_NX_ICMPV6_TYPE: - case NFI_NXM_NX_ICMPV6_CODE: - case N_NXM_FIELDS: - NOT_REACHED(); + n_bits %= 8; + src--; + dst--; + } + if (n_bits) { + uint8_t mask = (1 << n_bits) - 1; + *dst = (*dst & ~mask) | (*src & mask); + } + } else { + while (n_bits > 0) { + unsigned int max_copy = 8 - MAX(src_ofs, dst_ofs); + unsigned int chunk = MIN(n_bits, max_copy); + uint8_t mask = ((1 << chunk) - 1) << dst_ofs; + + *dst &= ~mask; + *dst |= ((*src >> src_ofs) << dst_ofs) & mask; + + src_ofs += chunk; + if (src_ofs == 8) { + src--; + src_ofs = 0; + } + dst_ofs += chunk; + if (dst_ofs == 8) { + dst--; + dst_ofs = 0; + } + n_bits -= chunk; + } } } +/* Returns the value of the NXM field corresponding to 'header' at 'ofs_nbits' + * in 'flow'. */ +uint64_t +nxm_read_field_bits(ovs_be32 header, ovs_be16 ofs_nbits, + const struct flow *flow) +{ + const struct mf_field *field = mf_from_nxm_header(ntohl(header)); + union mf_value value; + union mf_value bits; + + mf_get_value(field, flow, &value); + bits.be64 = htonll(0); + bitwise_copy(&value, field->n_bytes, nxm_decode_ofs(ofs_nbits), + &bits, sizeof bits.be64, 0, + nxm_decode_n_bits(ofs_nbits)); + return ntohll(bits.be64); +} + void nxm_execute_reg_move(const struct nx_action_reg_move *action, struct flow *flow) { - /* Preparation. */ - int n_bits = ntohs(action->n_bits); - uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; - - /* Get the interesting bits of the source field. */ - const struct nxm_field *src = nxm_field_lookup(ntohl(action->src)); - int src_ofs = ntohs(action->src_ofs); - uint64_t src_data = nxm_read_field(src, flow) & (mask << src_ofs); - - /* Get the remaining bits of the destination field. */ - const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst)); - int dst_ofs = ntohs(action->dst_ofs); - uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs); - - /* Get the final value. */ - uint64_t new_data = dst_data | ((src_data >> src_ofs) << dst_ofs); - - nxm_write_field(dst, flow, new_data); + const struct mf_field *src = mf_from_nxm_header(ntohl(action->src)); + const struct mf_field *dst = mf_from_nxm_header(ntohl(action->dst)); + union mf_value src_value; + union mf_value dst_value; + + mf_get_value(dst, flow, &dst_value); + mf_get_value(src, flow, &src_value); + bitwise_copy(&src_value, src->n_bytes, ntohs(action->src_ofs), + &dst_value, dst->n_bytes, ntohs(action->dst_ofs), + ntohs(action->n_bits)); + mf_set_flow_value(dst, &dst_value, flow); } void nxm_execute_reg_load(const struct nx_action_reg_load *action, struct flow *flow) { - /* Preparation. */ - int n_bits = nxm_decode_n_bits(action->ofs_nbits); - uint64_t mask = n_bits == 64 ? UINT64_MAX : (UINT64_C(1) << n_bits) - 1; - - /* Get source data. */ - uint64_t src_data = ntohll(action->value); - - /* Get remaining bits of the destination field. */ - const struct nxm_field *dst = nxm_field_lookup(ntohl(action->dst)); - int dst_ofs = nxm_decode_ofs(action->ofs_nbits); - uint64_t dst_data = nxm_read_field(dst, flow) & ~(mask << dst_ofs); - - /* Get the final value. */ - uint64_t new_data = dst_data | (src_data << dst_ofs); + nxm_reg_load(action->dst, action->ofs_nbits, ntohll(action->value), flow); +} - nxm_write_field(dst, flow, new_data); +/* Calculates ofs and n_bits from the given 'ofs_nbits' parameter, and copies + * 'src_data'[0:n_bits] to 'dst_header'[ofs:ofs+n_bits] in the given 'flow'. */ +void +nxm_reg_load(ovs_be32 dst_header, ovs_be16 ofs_nbits, uint64_t src_data, + struct flow *flow) +{ + const struct mf_field *dst = mf_from_nxm_header(ntohl(dst_header)); + int n_bits = nxm_decode_n_bits(ofs_nbits); + int dst_ofs = nxm_decode_ofs(ofs_nbits); + union mf_value dst_value; + union mf_value src_value; + + mf_get_value(dst, flow, &dst_value); + src_value.be64 = htonll(src_data); + bitwise_copy(&src_value, sizeof src_value.be64, 0, + &dst_value, dst->n_bytes, dst_ofs, + n_bits); + mf_set_flow_value(dst, &dst_value, flow); }