2 * Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
25 #include "meta-flow.h"
26 #include "ofp-actions.h"
27 #include "ofp-errors.h"
30 #include "openflow/nicira-ext.h"
32 #include "unaligned.h"
36 VLOG_DEFINE_THIS_MODULE(nx_match);
38 /* Rate limit for nx_match parse errors. These always indicate a bug in the
39 * peer and so there's not much point in showing a lot of them. */
40 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
42 /* Returns the width of the data for a field with the given 'header', in
45 nxm_field_bytes(uint32_t header)
47 unsigned int length = NXM_LENGTH(header);
48 return NXM_HASMASK(header) ? length / 2 : length;
51 /* Returns the width of the data for a field with the given 'header', in
54 nxm_field_bits(uint32_t header)
56 return nxm_field_bytes(header) * 8;
59 /* nx_pull_match() and helpers. */
62 nx_entry_ok(const void *p, unsigned int match_len)
64 unsigned int payload_len;
70 VLOG_DBG_RL(&rl, "nx_match ends with partial (%u-byte) nxm_header",
75 memcpy(&header_be, p, 4);
76 header = ntohl(header_be);
78 payload_len = NXM_LENGTH(header);
80 VLOG_DBG_RL(&rl, "nxm_entry %08"PRIx32" has invalid payload "
84 if (match_len < payload_len + 4) {
85 VLOG_DBG_RL(&rl, "%"PRIu32"-byte nxm_entry but only "
86 "%u bytes left in nx_match", payload_len + 4, match_len);
93 /* Given NXM/OXM value 'value' and mask 'mask', each 'width' bytes long,
94 * checks for any 1-bit in the value where there is a 0-bit in the mask. If it
95 * finds one, logs a warning. */
97 check_mask_consistency(const uint8_t *p, const struct mf_field *mf)
99 unsigned int width = mf->n_bytes;
100 const uint8_t *value = p + 4;
101 const uint8_t *mask = p + 4 + width;
104 for (i = 0; i < width; i++) {
105 if (value[i] & ~mask[i]) {
106 if (!VLOG_DROP_WARN(&rl)) {
107 char *s = nx_match_to_string(p, width * 2 + 4);
108 VLOG_WARN_RL(&rl, "NXM/OXM entry %s has 1-bits in value for "
109 "bits wildcarded by the mask. (Future versions "
110 "of OVS may report this as an OpenFlow error.)",
119 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
120 struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
124 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
126 match_init_catchall(match);
128 *cookie = *cookie_mask = htonll(0);
135 (header = nx_entry_ok(p, match_len)) != 0;
136 p += 4 + NXM_LENGTH(header), match_len -= 4 + NXM_LENGTH(header)) {
137 const struct mf_field *mf;
140 mf = mf_from_nxm_header(header);
143 error = OFPERR_OFPBMC_BAD_FIELD;
147 } else if (!mf_are_prereqs_ok(mf, &match->flow)) {
148 error = OFPERR_OFPBMC_BAD_PREREQ;
149 } else if (!mf_is_all_wild(mf, &match->wc)) {
150 error = OFPERR_OFPBMC_DUP_FIELD;
152 unsigned int width = mf->n_bytes;
153 union mf_value value;
155 memcpy(&value, p + 4, width);
156 if (!mf_is_value_valid(mf, &value)) {
157 error = OFPERR_OFPBMC_BAD_VALUE;
158 } else if (!NXM_HASMASK(header)) {
160 mf_set_value(mf, &value, match);
164 memcpy(&mask, p + 4 + width, width);
165 if (!mf_is_mask_valid(mf, &mask)) {
166 error = OFPERR_OFPBMC_BAD_MASK;
169 check_mask_consistency(p, mf);
170 mf_set(mf, &value, &mask, match);
175 /* Check if the match is for a cookie rather than a classifier rule. */
176 if ((header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W) && cookie) {
178 error = OFPERR_OFPBMC_DUP_FIELD;
180 unsigned int width = sizeof *cookie;
182 memcpy(cookie, p + 4, width);
183 if (NXM_HASMASK(header)) {
184 memcpy(cookie_mask, p + 4 + width, width);
186 *cookie_mask = OVS_BE64_MAX;
193 VLOG_DBG_RL(&rl, "bad nxm_entry %#08"PRIx32" (vendor=%"PRIu32", "
194 "field=%"PRIu32", hasmask=%"PRIu32", len=%"PRIu32"), "
196 NXM_VENDOR(header), NXM_FIELD(header),
197 NXM_HASMASK(header), NXM_LENGTH(header),
198 ofperr_to_string(error));
203 return match_len ? OFPERR_OFPBMC_BAD_LEN : 0;
207 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
209 ovs_be64 *cookie, ovs_be64 *cookie_mask)
214 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
216 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
217 "multiple of 8, is longer than space in message (max "
218 "length %"PRIuSIZE")", match_len, b->size);
219 return OFPERR_OFPBMC_BAD_LEN;
223 return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
226 /* Parses the nx_match formatted match description in 'b' with length
227 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
228 * are valid pointers, then stores the cookie and mask in them if 'b' contains
229 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
231 * Fails with an error upon encountering an unknown NXM header.
233 * Returns 0 if successful, otherwise an OpenFlow error code. */
235 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
236 ovs_be64 *cookie, ovs_be64 *cookie_mask)
238 return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
241 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
242 * instead of failing with an error. */
244 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
246 ovs_be64 *cookie, ovs_be64 *cookie_mask)
248 return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
252 oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
254 struct ofp11_match_header *omh = b->data;
258 if (b->size < sizeof *omh) {
259 return OFPERR_OFPBMC_BAD_LEN;
262 match_len = ntohs(omh->length);
263 if (match_len < sizeof *omh) {
264 return OFPERR_OFPBMC_BAD_LEN;
267 if (omh->type != htons(OFPMT_OXM)) {
268 return OFPERR_OFPBMC_BAD_TYPE;
271 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
273 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
274 "multiple of 8, is longer than space in message (max "
275 "length %"PRIuSIZE")", match_len, b->size);
276 return OFPERR_OFPBMC_BAD_LEN;
279 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
280 strict, match, NULL, NULL);
283 /* Parses the oxm formatted match description preceded by a struct
284 * ofp11_match_header in 'b'. Stores the result in 'match'.
286 * Fails with an error when encountering unknown OXM headers.
288 * Returns 0 if successful, otherwise an OpenFlow error code. */
290 oxm_pull_match(struct ofpbuf *b, struct match *match)
292 return oxm_pull_match__(b, true, match);
295 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
296 * OXM headers instead of failing with an error when they are encountered. */
298 oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
300 return oxm_pull_match__(b, false, match);
303 /* nx_put_match() and helpers.
305 * 'put' functions whose names end in 'w' add a wildcarded field.
306 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
307 * Other 'put' functions add exact-match fields.
311 nxm_put_header(struct ofpbuf *b, uint32_t header)
313 ovs_be32 n_header = htonl(header);
314 ofpbuf_put(b, &n_header, sizeof n_header);
318 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
320 nxm_put_header(b, header);
321 ofpbuf_put(b, &value, sizeof value);
325 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
332 nxm_put_8(b, header, value);
336 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
337 ofpbuf_put(b, &value, sizeof value);
338 ofpbuf_put(b, &mask, sizeof mask);
343 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
345 nxm_put_header(b, header);
346 ofpbuf_put(b, &value, sizeof value);
350 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
352 nxm_put_header(b, header);
353 ofpbuf_put(b, &value, sizeof value);
354 ofpbuf_put(b, &mask, sizeof mask);
358 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
365 nxm_put_16(b, header, value);
369 nxm_put_16w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
375 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
377 nxm_put_header(b, header);
378 ofpbuf_put(b, &value, sizeof value);
382 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
384 nxm_put_header(b, header);
385 ofpbuf_put(b, &value, sizeof value);
386 ofpbuf_put(b, &mask, sizeof mask);
390 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
397 nxm_put_32(b, header, value);
401 nxm_put_32w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
407 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
409 nxm_put_header(b, header);
410 ofpbuf_put(b, &value, sizeof value);
414 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
416 nxm_put_header(b, header);
417 ofpbuf_put(b, &value, sizeof value);
418 ofpbuf_put(b, &mask, sizeof mask);
422 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
429 nxm_put_64(b, header, value);
433 nxm_put_64w(b, NXM_MAKE_WILD_HEADER(header), value, mask);
439 nxm_put_eth(struct ofpbuf *b, uint32_t header,
440 const uint8_t value[ETH_ADDR_LEN])
442 nxm_put_header(b, header);
443 ofpbuf_put(b, value, ETH_ADDR_LEN);
447 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
448 const uint8_t value[ETH_ADDR_LEN],
449 const uint8_t mask[ETH_ADDR_LEN])
451 if (!eth_addr_is_zero(mask)) {
452 if (eth_mask_is_exact(mask)) {
453 nxm_put_eth(b, header, value);
455 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
456 ofpbuf_put(b, value, ETH_ADDR_LEN);
457 ofpbuf_put(b, mask, ETH_ADDR_LEN);
463 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
464 const struct in6_addr *value, const struct in6_addr *mask)
466 if (ipv6_mask_is_any(mask)) {
468 } else if (ipv6_mask_is_exact(mask)) {
469 nxm_put_header(b, header);
470 ofpbuf_put(b, value, sizeof *value);
472 nxm_put_header(b, NXM_MAKE_WILD_HEADER(header));
473 ofpbuf_put(b, value, sizeof *value);
474 ofpbuf_put(b, mask, sizeof *mask);
479 nxm_put_frag(struct ofpbuf *b, const struct match *match)
481 uint8_t nw_frag = match->flow.nw_frag;
482 uint8_t nw_frag_mask = match->wc.masks.nw_frag;
484 switch (nw_frag_mask) {
488 case FLOW_NW_FRAG_MASK:
489 nxm_put_8(b, NXM_NX_IP_FRAG, nw_frag);
493 nxm_put_8m(b, NXM_NX_IP_FRAG, nw_frag,
494 nw_frag_mask & FLOW_NW_FRAG_MASK);
500 nxm_put_ip(struct ofpbuf *b, const struct match *match,
501 uint8_t icmp_proto, uint32_t icmp_type, uint32_t icmp_code,
504 const struct flow *flow = &match->flow;
506 nxm_put_frag(b, match);
508 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
510 nxm_put_8(b, OXM_OF_IP_DSCP, flow->nw_tos >> 2);
512 nxm_put_8(b, NXM_OF_IP_TOS, flow->nw_tos & IP_DSCP_MASK);
516 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
517 nxm_put_8(b, oxm ? OXM_OF_IP_ECN : NXM_NX_IP_ECN,
518 flow->nw_tos & IP_ECN_MASK);
521 if (!oxm && match->wc.masks.nw_ttl) {
522 nxm_put_8(b, NXM_NX_IP_TTL, flow->nw_ttl);
525 if (match->wc.masks.nw_proto) {
526 nxm_put_8(b, oxm ? OXM_OF_IP_PROTO : NXM_OF_IP_PROTO, flow->nw_proto);
528 if (flow->nw_proto == IPPROTO_TCP) {
529 nxm_put_16m(b, oxm ? OXM_OF_TCP_SRC : NXM_OF_TCP_SRC,
530 flow->tp_src, match->wc.masks.tp_src);
531 nxm_put_16m(b, oxm ? OXM_OF_TCP_DST : NXM_OF_TCP_DST,
532 flow->tp_dst, match->wc.masks.tp_dst);
533 nxm_put_16m(b, NXM_NX_TCP_FLAGS,
534 flow->tcp_flags, match->wc.masks.tcp_flags);
535 } else if (flow->nw_proto == IPPROTO_UDP) {
536 nxm_put_16m(b, oxm ? OXM_OF_UDP_SRC : NXM_OF_UDP_SRC,
537 flow->tp_src, match->wc.masks.tp_src);
538 nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
539 flow->tp_dst, match->wc.masks.tp_dst);
540 } else if (flow->nw_proto == IPPROTO_SCTP) {
541 nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
542 match->wc.masks.tp_src);
543 nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
544 match->wc.masks.tp_dst);
545 } else if (flow->nw_proto == icmp_proto) {
546 if (match->wc.masks.tp_src) {
547 nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
549 if (match->wc.masks.tp_dst) {
550 nxm_put_8(b, icmp_code, ntohs(flow->tp_dst));
556 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
557 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
558 * Otherwise, 'cookie_mask' should be zero.
560 * This function can cause 'b''s data to be reallocated.
562 * Returns the number of bytes appended to 'b', excluding padding.
564 * If 'match' is a catch-all rule that matches every packet, then this function
565 * appends nothing to 'b' and returns 0. */
567 nx_put_raw(struct ofpbuf *b, bool oxm, const struct match *match,
568 ovs_be64 cookie, ovs_be64 cookie_mask)
570 const struct flow *flow = &match->flow;
571 const size_t start_len = b->size;
575 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 24);
578 if (match->wc.masks.in_port.ofp_port) {
579 ofp_port_t in_port = flow->in_port.ofp_port;
581 nxm_put_32(b, OXM_OF_IN_PORT, ofputil_port_to_ofp11(in_port));
583 nxm_put_16(b, NXM_OF_IN_PORT, htons(ofp_to_u16(in_port)));
588 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_SRC : NXM_OF_ETH_SRC,
589 flow->dl_src, match->wc.masks.dl_src);
590 nxm_put_eth_masked(b, oxm ? OXM_OF_ETH_DST : NXM_OF_ETH_DST,
591 flow->dl_dst, match->wc.masks.dl_dst);
592 nxm_put_16m(b, oxm ? OXM_OF_ETH_TYPE : NXM_OF_ETH_TYPE,
593 ofputil_dl_type_to_openflow(flow->dl_type),
594 match->wc.masks.dl_type);
598 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
599 ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
600 ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
602 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
603 nxm_put_16(b, OXM_OF_VLAN_VID, vid);
605 nxm_put_16m(b, OXM_OF_VLAN_VID, vid, mask);
608 if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
609 nxm_put_8(b, OXM_OF_VLAN_PCP, vlan_tci_to_pcp(flow->vlan_tci));
613 nxm_put_16m(b, NXM_OF_VLAN_TCI, flow->vlan_tci,
614 match->wc.masks.vlan_tci);
618 if (eth_type_mpls(flow->dl_type)) {
619 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
620 nxm_put_8(b, OXM_OF_MPLS_TC, mpls_lse_to_tc(flow->mpls_lse[0]));
623 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
624 nxm_put_8(b, OXM_OF_MPLS_BOS, mpls_lse_to_bos(flow->mpls_lse[0]));
627 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
628 nxm_put_32(b, OXM_OF_MPLS_LABEL,
629 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
634 if (flow->dl_type == htons(ETH_TYPE_IP)) {
636 nxm_put_32m(b, oxm ? OXM_OF_IPV4_SRC : NXM_OF_IP_SRC,
637 flow->nw_src, match->wc.masks.nw_src);
638 nxm_put_32m(b, oxm ? OXM_OF_IPV4_DST : NXM_OF_IP_DST,
639 flow->nw_dst, match->wc.masks.nw_dst);
640 nxm_put_ip(b, match, IPPROTO_ICMP,
641 oxm ? OXM_OF_ICMPV4_TYPE : NXM_OF_ICMP_TYPE,
642 oxm ? OXM_OF_ICMPV4_CODE : NXM_OF_ICMP_CODE, oxm);
643 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
645 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_SRC : NXM_NX_IPV6_SRC,
646 &flow->ipv6_src, &match->wc.masks.ipv6_src);
647 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_DST : NXM_NX_IPV6_DST,
648 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
649 nxm_put_ip(b, match, IPPROTO_ICMPV6,
650 oxm ? OXM_OF_ICMPV6_TYPE : NXM_NX_ICMPV6_TYPE,
651 oxm ? OXM_OF_ICMPV6_CODE : NXM_NX_ICMPV6_CODE, oxm);
653 nxm_put_32m(b, oxm ? OXM_OF_IPV6_FLABEL : NXM_NX_IPV6_LABEL,
654 flow->ipv6_label, match->wc.masks.ipv6_label);
656 if (flow->nw_proto == IPPROTO_ICMPV6
657 && (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
658 flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
659 nxm_put_ipv6(b, oxm ? OXM_OF_IPV6_ND_TARGET : NXM_NX_ND_TARGET,
660 &flow->nd_target, &match->wc.masks.nd_target);
661 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
662 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_SLL : NXM_NX_ND_SLL,
663 flow->arp_sha, match->wc.masks.arp_sha);
665 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
666 nxm_put_eth_masked(b, oxm ? OXM_OF_IPV6_ND_TLL : NXM_NX_ND_TLL,
667 flow->arp_tha, match->wc.masks.arp_tha);
670 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
671 flow->dl_type == htons(ETH_TYPE_RARP)) {
673 if (match->wc.masks.nw_proto) {
674 nxm_put_16(b, oxm ? OXM_OF_ARP_OP : NXM_OF_ARP_OP,
675 htons(flow->nw_proto));
677 nxm_put_32m(b, oxm ? OXM_OF_ARP_SPA : NXM_OF_ARP_SPA,
678 flow->nw_src, match->wc.masks.nw_src);
679 nxm_put_32m(b, oxm ? OXM_OF_ARP_TPA : NXM_OF_ARP_TPA,
680 flow->nw_dst, match->wc.masks.nw_dst);
681 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_SHA : NXM_NX_ARP_SHA,
682 flow->arp_sha, match->wc.masks.arp_sha);
683 nxm_put_eth_masked(b, oxm ? OXM_OF_ARP_THA : NXM_NX_ARP_THA,
684 flow->arp_tha, match->wc.masks.arp_tha);
688 nxm_put_64m(b, oxm ? OXM_OF_TUNNEL_ID : NXM_NX_TUN_ID,
689 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
691 /* Other tunnel metadata. */
692 nxm_put_32m(b, NXM_NX_TUN_IPV4_SRC,
693 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
694 nxm_put_32m(b, NXM_NX_TUN_IPV4_DST,
695 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
698 for (i = 0; i < FLOW_N_REGS; i++) {
699 nxm_put_32m(b, NXM_NX_REG(i),
700 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
704 nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
705 htonl(match->wc.masks.pkt_mark));
707 /* OpenFlow 1.1+ Metadata. */
708 nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
711 nxm_put_64m(b, NXM_NX_COOKIE, cookie, cookie_mask);
713 match_len = b->size - start_len;
717 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
718 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
719 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
720 * Otherwise, 'cookie_mask' should be zero.
722 * This function can cause 'b''s data to be reallocated.
724 * Returns the number of bytes appended to 'b', excluding padding. The return
725 * value can be zero if it appended nothing at all to 'b' (which happens if
726 * 'cr' is a catch-all rule that matches every packet). */
728 nx_put_match(struct ofpbuf *b, const struct match *match,
729 ovs_be64 cookie, ovs_be64 cookie_mask)
731 int match_len = nx_put_raw(b, false, match, cookie, cookie_mask);
733 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
738 /* Appends to 'b' an struct ofp11_match_header followed by the oxm format that
739 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
742 * This function can cause 'b''s data to be reallocated.
744 * Returns the number of bytes appended to 'b', excluding the padding. Never
747 oxm_put_match(struct ofpbuf *b, const struct match *match)
750 struct ofp11_match_header *omh;
751 size_t start_len = b->size;
752 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
754 ofpbuf_put_uninit(b, sizeof *omh);
755 match_len = nx_put_raw(b, true, match, cookie, cookie_mask) + sizeof *omh;
756 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
758 omh = ofpbuf_at(b, start_len, sizeof *omh);
759 omh->type = htons(OFPMT_OXM);
760 omh->length = htons(match_len);
765 /* nx_match_to_string() and helpers. */
767 static void format_nxm_field_name(struct ds *, uint32_t header);
770 nx_match_to_string(const uint8_t *p, unsigned int match_len)
776 return xstrdup("<any>");
780 while ((header = nx_entry_ok(p, match_len)) != 0) {
781 unsigned int length = NXM_LENGTH(header);
782 unsigned int value_len = nxm_field_bytes(header);
783 const uint8_t *value = p + 4;
784 const uint8_t *mask = value + value_len;
788 ds_put_cstr(&s, ", ");
791 format_nxm_field_name(&s, header);
792 ds_put_char(&s, '(');
794 for (i = 0; i < value_len; i++) {
795 ds_put_format(&s, "%02x", value[i]);
797 if (NXM_HASMASK(header)) {
798 ds_put_char(&s, '/');
799 for (i = 0; i < value_len; i++) {
800 ds_put_format(&s, "%02x", mask[i]);
803 ds_put_char(&s, ')');
806 match_len -= 4 + length;
811 ds_put_cstr(&s, ", ");
814 ds_put_format(&s, "<%u invalid bytes>", match_len);
817 return ds_steal_cstr(&s);
821 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
823 const struct ofp11_match_header *omh = p->data;
829 if (match_len < sizeof *omh) {
830 ds_put_format(&s, "<match too short: %u>", match_len);
834 if (omh->type != htons(OFPMT_OXM)) {
835 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
839 match_len_ = ntohs(omh->length);
840 if (match_len_ < sizeof *omh) {
841 ds_put_format(&s, "<match length field too short: %u>", match_len_);
845 if (match_len_ != match_len) {
846 ds_put_format(&s, "<match length field incorrect: %u != %u>",
847 match_len_, match_len);
851 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
852 match_len - sizeof *omh);
855 return ds_steal_cstr(&s);
859 format_nxm_field_name(struct ds *s, uint32_t header)
861 const struct mf_field *mf = mf_from_nxm_header(header);
863 ds_put_cstr(s, IS_OXM_HEADER(header) ? mf->oxm_name : mf->nxm_name);
864 if (NXM_HASMASK(header)) {
865 ds_put_cstr(s, "_W");
867 } else if (header == NXM_NX_COOKIE) {
868 ds_put_cstr(s, "NXM_NX_COOKIE");
869 } else if (header == NXM_NX_COOKIE_W) {
870 ds_put_cstr(s, "NXM_NX_COOKIE_W");
872 ds_put_format(s, "%d:%d", NXM_VENDOR(header), NXM_FIELD(header));
877 parse_nxm_field_name(const char *name, int name_len)
882 /* Check whether it's a field name. */
883 wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
888 for (i = 0; i < MFF_N_IDS; i++) {
889 const struct mf_field *mf = mf_from_id(i);
893 !strncmp(mf->nxm_name, name, name_len) &&
894 mf->nxm_name[name_len] == '\0') {
895 header = mf->nxm_header;
896 } else if (mf->oxm_name &&
897 !strncmp(mf->oxm_name, name, name_len) &&
898 mf->oxm_name[name_len] == '\0') {
899 header = mf->oxm_header;
906 } else if (mf->maskable != MFM_NONE) {
907 return NXM_MAKE_WILD_HEADER(header);
911 if (!strncmp("NXM_NX_COOKIE", name, name_len) &&
912 (name_len == strlen("NXM_NX_COOKIE"))) {
914 return NXM_NX_COOKIE;
916 return NXM_NX_COOKIE_W;
920 /* Check whether it's a 32-bit field header value as hex.
921 * (This isn't ordinarily useful except for testing error behavior.) */
923 uint32_t header = hexits_value(name, name_len, NULL);
924 if (header != UINT_MAX) {
932 /* nx_match_from_string(). */
935 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
937 const char *full_s = s;
938 const size_t start_len = b->size;
940 if (!strcmp(s, "<any>")) {
941 /* Ensure that 'b->data' isn't actually null. */
942 ofpbuf_prealloc_tailroom(b, 1);
946 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
953 name_len = strcspn(s, "(");
954 if (s[name_len] != '(') {
955 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
958 header = parse_nxm_field_name(name, name_len);
960 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
965 nxm_put_header(b, header);
966 s = ofpbuf_put_hex(b, s, &n);
967 if (n != nxm_field_bytes(header)) {
968 ovs_fatal(0, "%.2s: hex digits expected", s);
970 if (NXM_HASMASK(header)) {
973 ovs_fatal(0, "%s: missing / in masked field %.*s",
974 full_s, name_len, name);
976 s = ofpbuf_put_hex(b, s + 1, &n);
977 if (n != nxm_field_bytes(header)) {
978 ovs_fatal(0, "%.2s: hex digits expected", s);
984 ovs_fatal(0, "%s: missing ) following field %.*s",
985 full_s, name_len, name);
990 return b->size - start_len;
994 nx_match_from_string(const char *s, struct ofpbuf *b)
996 int match_len = nx_match_from_string_raw(s, b);
997 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
1002 oxm_match_from_string(const char *s, struct ofpbuf *b)
1005 struct ofp11_match_header *omh;
1006 size_t start_len = b->size;
1008 ofpbuf_put_uninit(b, sizeof *omh);
1009 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1010 ofpbuf_put_zeros(b, ROUND_UP(match_len, 8) - match_len);
1012 omh = ofpbuf_at(b, start_len, sizeof *omh);
1013 omh->type = htons(OFPMT_OXM);
1014 omh->length = htons(match_len);
1019 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1022 * Returns NULL if successful, otherwise a malloc()'d string describing the
1023 * error. The caller is responsible for freeing the returned string. */
1024 char * WARN_UNUSED_RESULT
1025 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1027 const char *full_s = s;
1030 error = mf_parse_subfield__(&move->src, &s);
1034 if (strncmp(s, "->", 2)) {
1035 return xasprintf("%s: missing `->' following source", full_s);
1038 error = mf_parse_subfield(&move->dst, s);
1043 if (move->src.n_bits != move->dst.n_bits) {
1044 return xasprintf("%s: source field is %d bits wide but destination is "
1045 "%d bits wide", full_s,
1046 move->src.n_bits, move->dst.n_bits);
1051 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1054 * Returns NULL if successful, otherwise a malloc()'d string describing the
1055 * error. The caller is responsible for freeing the returned string. */
1056 char * WARN_UNUSED_RESULT
1057 nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
1059 const char *full_s = s;
1060 uint64_t value = strtoull(s, (char **) &s, 0);
1063 if (strncmp(s, "->", 2)) {
1064 return xasprintf("%s: missing `->' following value", full_s);
1067 error = mf_parse_subfield(&load->dst, s);
1072 if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
1073 return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
1074 full_s, value, load->dst.n_bits);
1077 load->subvalue.be64[0] = htonll(0);
1078 load->subvalue.be64[1] = htonll(value);
1082 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1085 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1087 ds_put_format(s, "move:");
1088 mf_format_subfield(&move->src, s);
1089 ds_put_cstr(s, "->");
1090 mf_format_subfield(&move->dst, s);
1094 nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
1096 ds_put_cstr(s, "load:");
1097 mf_format_subvalue(&load->subvalue, s);
1098 ds_put_cstr(s, "->");
1099 mf_format_subfield(&load->dst, s);
1103 nxm_reg_move_from_openflow(const struct nx_action_reg_move *narm,
1104 struct ofpbuf *ofpacts)
1106 struct ofpact_reg_move *move;
1108 move = ofpact_put_REG_MOVE(ofpacts);
1109 move->src.field = mf_from_nxm_header(ntohl(narm->src));
1110 move->src.ofs = ntohs(narm->src_ofs);
1111 move->src.n_bits = ntohs(narm->n_bits);
1112 move->dst.field = mf_from_nxm_header(ntohl(narm->dst));
1113 move->dst.ofs = ntohs(narm->dst_ofs);
1114 move->dst.n_bits = ntohs(narm->n_bits);
1116 return nxm_reg_move_check(move, NULL);
1120 nxm_reg_load_from_openflow(const struct nx_action_reg_load *narl,
1121 struct ofpbuf *ofpacts)
1123 struct ofpact_reg_load *load;
1125 load = ofpact_put_REG_LOAD(ofpacts);
1126 load->dst.field = mf_from_nxm_header(ntohl(narl->dst));
1127 load->dst.ofs = nxm_decode_ofs(narl->ofs_nbits);
1128 load->dst.n_bits = nxm_decode_n_bits(narl->ofs_nbits);
1129 load->subvalue.be64[1] = narl->value;
1131 /* Reject 'narl' if a bit numbered 'n_bits' or higher is set to 1 in
1133 if (load->dst.n_bits < 64 &&
1134 ntohll(narl->value) >> load->dst.n_bits) {
1135 return OFPERR_OFPBAC_BAD_ARGUMENT;
1138 return nxm_reg_load_check(load, NULL);
1142 nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
1146 error = mf_check_src(&move->src, flow);
1151 return mf_check_dst(&move->dst, NULL);
1155 nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
1157 return mf_check_dst(&load->dst, flow);
1161 nxm_reg_move_to_nxast(const struct ofpact_reg_move *move,
1162 struct ofpbuf *openflow)
1164 struct nx_action_reg_move *narm;
1166 narm = ofputil_put_NXAST_REG_MOVE(openflow);
1167 narm->n_bits = htons(move->dst.n_bits);
1168 narm->src_ofs = htons(move->src.ofs);
1169 narm->dst_ofs = htons(move->dst.ofs);
1170 narm->src = htonl(move->src.field->nxm_header);
1171 narm->dst = htonl(move->dst.field->nxm_header);
1175 nxm_reg_load_to_nxast(const struct ofpact_reg_load *load,
1176 struct ofpbuf *openflow)
1178 struct nx_action_reg_load *narl;
1180 narl = ofputil_put_NXAST_REG_LOAD(openflow);
1181 narl->ofs_nbits = nxm_encode_ofs_nbits(load->dst.ofs, load->dst.n_bits);
1182 narl->dst = htonl(load->dst.field->nxm_header);
1183 narl->value = load->subvalue.be64[1];
1186 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1189 nxm_execute_reg_move(const struct ofpact_reg_move *move,
1190 struct flow *flow, struct flow_wildcards *wc)
1192 union mf_value src_value;
1193 union mf_value dst_value;
1195 mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
1196 mf_mask_field_and_prereqs(move->src.field, &wc->masks);
1198 mf_get_value(move->dst.field, flow, &dst_value);
1199 mf_get_value(move->src.field, flow, &src_value);
1200 bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
1201 &dst_value, move->dst.field->n_bytes, move->dst.ofs,
1203 mf_set_flow_value(move->dst.field, &dst_value, flow);
1207 nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
1208 struct flow_wildcards *wc)
1210 /* Since at the datapath interface we do not have set actions for
1211 * individual fields, but larger sets of fields for a given protocol
1212 * layer, the set action will in practice only ever apply to exactly
1213 * matched flows for the given protocol layer. For example, if the
1214 * reg_load changes the IP TTL, the corresponding datapath action will
1215 * rewrite also the IP addresses and TOS byte. Since these other field
1216 * values may not be explicitly set, they depend on the incoming flow field
1217 * values, and are hence all of them are set in the wildcards masks, when
1218 * the action is committed to the datapath. For the rare case, where the
1219 * reg_load action does not actually change the value, and no other flow
1220 * field values are set (or loaded), the datapath action is skipped, and
1221 * no mask bits are set. Such a datapath flow should, however, be
1222 * dependent on the specific field value, so the corresponding wildcard
1223 * mask bits must be set, lest the datapath flow be applied to packets
1224 * containing some other value in the field and the field value remain
1225 * unchanged regardless of the incoming value.
1227 * We set the masks here for the whole fields, and their prerequisities.
1228 * Even if only the lower byte of a TCP destination port is set,
1229 * we set the mask for the whole field, and also the ip_proto in the IP
1230 * header, so that the kernel flow would not be applied on, e.g., a UDP
1231 * packet, or any other IP protocol in addition to TCP packets.
1233 mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
1234 mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
1238 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1239 struct flow *flow, struct flow_wildcards *wc)
1241 union mf_subvalue src_subvalue;
1242 union mf_subvalue mask_value;
1243 ovs_be64 src_data_be = htonll(src_data);
1245 memset(&mask_value, 0xff, sizeof mask_value);
1246 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1248 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1249 &src_subvalue, sizeof src_subvalue, 0,
1250 sizeof src_data_be * 8);
1251 mf_write_subfield_flow(dst, &src_subvalue, flow);
1254 /* nxm_parse_stack_action, works for both push() and pop(). */
1256 /* Parses 's' as a "push" or "pop" action, in the form described in
1257 * ovs-ofctl(8), into '*stack_action'.
1259 * Returns NULL if successful, otherwise a malloc()'d string describing the
1260 * error. The caller is responsible for freeing the returned string. */
1261 char * WARN_UNUSED_RESULT
1262 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1266 error = mf_parse_subfield__(&stack_action->subfield, &s);
1272 return xasprintf("%s: trailing garbage following push or pop", s);
1279 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1281 ds_put_cstr(s, "push:");
1282 mf_format_subfield(&push->subfield, s);
1286 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1288 ds_put_cstr(s, "pop:");
1289 mf_format_subfield(&pop->subfield, s);
1292 /* Common set for both push and pop actions. */
1294 stack_action_from_openflow__(const struct nx_action_stack *nasp,
1295 struct ofpact_stack *stack_action)
1297 stack_action->subfield.field = mf_from_nxm_header(ntohl(nasp->field));
1298 stack_action->subfield.ofs = ntohs(nasp->offset);
1299 stack_action->subfield.n_bits = ntohs(nasp->n_bits);
1303 nxm_stack_to_nxast__(const struct ofpact_stack *stack_action,
1304 struct nx_action_stack *nasp)
1306 nasp->offset = htons(stack_action->subfield.ofs);
1307 nasp->n_bits = htons(stack_action->subfield.n_bits);
1308 nasp->field = htonl(stack_action->subfield.field->nxm_header);
1312 nxm_stack_push_from_openflow(const struct nx_action_stack *nasp,
1313 struct ofpbuf *ofpacts)
1315 struct ofpact_stack *push;
1317 push = ofpact_put_STACK_PUSH(ofpacts);
1318 stack_action_from_openflow__(nasp, push);
1320 return nxm_stack_push_check(push, NULL);
1324 nxm_stack_pop_from_openflow(const struct nx_action_stack *nasp,
1325 struct ofpbuf *ofpacts)
1327 struct ofpact_stack *pop;
1329 pop = ofpact_put_STACK_POP(ofpacts);
1330 stack_action_from_openflow__(nasp, pop);
1332 return nxm_stack_pop_check(pop, NULL);
1336 nxm_stack_push_check(const struct ofpact_stack *push,
1337 const struct flow *flow)
1339 return mf_check_src(&push->subfield, flow);
1343 nxm_stack_pop_check(const struct ofpact_stack *pop,
1344 const struct flow *flow)
1346 return mf_check_dst(&pop->subfield, flow);
1350 nxm_stack_push_to_nxast(const struct ofpact_stack *stack,
1351 struct ofpbuf *openflow)
1353 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_PUSH(openflow));
1357 nxm_stack_pop_to_nxast(const struct ofpact_stack *stack,
1358 struct ofpbuf *openflow)
1360 nxm_stack_to_nxast__(stack, ofputil_put_NXAST_STACK_POP(openflow));
1363 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1365 nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
1367 ofpbuf_put(stack, v, sizeof *v);
1370 static union mf_subvalue *
1371 nx_stack_pop(struct ofpbuf *stack)
1373 union mf_subvalue *v = NULL;
1376 stack->size -= sizeof *v;
1377 v = (union mf_subvalue *) ofpbuf_tail(stack);
1384 nxm_execute_stack_push(const struct ofpact_stack *push,
1385 const struct flow *flow, struct flow_wildcards *wc,
1386 struct ofpbuf *stack)
1388 union mf_subvalue mask_value;
1389 union mf_subvalue dst_value;
1391 memset(&mask_value, 0xff, sizeof mask_value);
1392 mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
1394 mf_read_subfield(&push->subfield, flow, &dst_value);
1395 nx_stack_push(stack, &dst_value);
1399 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1400 struct flow *flow, struct flow_wildcards *wc,
1401 struct ofpbuf *stack)
1403 union mf_subvalue *src_value;
1405 src_value = nx_stack_pop(stack);
1407 /* Only pop if stack is not empty. Otherwise, give warning. */
1409 union mf_subvalue mask_value;
1411 memset(&mask_value, 0xff, sizeof mask_value);
1412 mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
1413 mf_write_subfield_flow(&pop->subfield, src_value, flow);
1415 if (!VLOG_DROP_WARN(&rl)) {
1416 char *flow_str = flow_to_string(flow);
1417 VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"