2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
36 #include "openflow/openflow.h"
40 #include "unaligned.h"
42 COVERAGE_DEFINE(flow_extract);
43 COVERAGE_DEFINE(miniflow_malloc);
45 /* U32 indices for segmented flow classification. */
46 const uint8_t flow_segment_u32s[4] = {
47 FLOW_SEGMENT_1_ENDS_AT / 4,
48 FLOW_SEGMENT_2_ENDS_AT / 4,
49 FLOW_SEGMENT_3_ENDS_AT / 4,
53 /* miniflow_extract() assumes the following to be true to optimize the
54 * extraction process. */
55 BUILD_ASSERT_DECL(offsetof(struct flow, dl_type) + 2
56 == offsetof(struct flow, vlan_tci) &&
57 offsetof(struct flow, dl_type) / 4
58 == offsetof(struct flow, vlan_tci) / 4 );
60 BUILD_ASSERT_DECL(offsetof(struct flow, nw_frag) + 3
61 == offsetof(struct flow, nw_proto) &&
62 offsetof(struct flow, nw_tos) + 2
63 == offsetof(struct flow, nw_proto) &&
64 offsetof(struct flow, nw_ttl) + 1
65 == offsetof(struct flow, nw_proto) &&
66 offsetof(struct flow, nw_frag) / 4
67 == offsetof(struct flow, nw_tos) / 4 &&
68 offsetof(struct flow, nw_ttl) / 4
69 == offsetof(struct flow, nw_tos) / 4 &&
70 offsetof(struct flow, nw_proto) / 4
71 == offsetof(struct flow, nw_tos) / 4);
73 /* TCP flags in the first half of a BE32, zeroes in the other half. */
74 BUILD_ASSERT_DECL(offsetof(struct flow, tcp_flags) + 2
75 == offsetof(struct flow, pad) &&
76 offsetof(struct flow, tcp_flags) / 4
77 == offsetof(struct flow, pad) / 4);
79 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl) \
82 #define TCP_FLAGS_BE32(tcp_ctl) ((OVS_FORCE ovs_be32)TCP_FLAGS_BE16(tcp_ctl))
85 BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
86 == offsetof(struct flow, tp_dst) &&
87 offsetof(struct flow, tp_src) / 4
88 == offsetof(struct flow, tp_dst) / 4);
90 /* Removes 'size' bytes from the head end of '*datap', of size '*sizep', which
91 * must contain at least 'size' bytes of data. Returns the first byte of data
93 static inline const void *
94 data_pull(void **datap, size_t *sizep, size_t size)
96 char *data = (char *)*datap;
102 /* If '*datap' has at least 'size' bytes of data, removes that many bytes from
103 * the head end of '*datap' and returns the first byte removed. Otherwise,
104 * returns a null pointer without modifying '*datap'. */
105 static inline const void *
106 data_try_pull(void **datap, size_t *sizep, size_t size)
108 return OVS_LIKELY(*sizep >= size) ? data_pull(datap, sizep, size) : NULL;
111 /* Context for pushing data to a miniflow. */
115 uint32_t * const end;
118 /* miniflow_push_* macros allow filling in a miniflow data values in order.
119 * Assertions are needed only when the layout of the struct flow is modified.
120 * 'ofs' is a compile-time constant, which allows most of the code be optimized
121 * away. Some GCC versions gave warnigns on ALWAYS_INLINE, so these are
122 * defined as macros. */
124 #if (FLOW_WC_SEQ != 26)
125 #define MINIFLOW_ASSERT(X) ovs_assert(X)
127 #define MINIFLOW_ASSERT(X)
130 #define miniflow_push_uint32_(MF, OFS, VALUE) \
132 MINIFLOW_ASSERT(MF.data < MF.end && (OFS) % 4 == 0 \
133 && !(MF.map & (UINT64_MAX << (OFS) / 4))); \
134 *MF.data++ = VALUE; \
135 MF.map |= UINT64_C(1) << (OFS) / 4; \
138 #define miniflow_push_be32_(MF, OFS, VALUE) \
139 miniflow_push_uint32_(MF, OFS, (OVS_FORCE uint32_t)(VALUE))
141 #define miniflow_push_uint16_(MF, OFS, VALUE) \
143 MINIFLOW_ASSERT(MF.data < MF.end && \
144 (((OFS) % 4 == 0 && !(MF.map & (UINT64_MAX << (OFS) / 4))) \
145 || ((OFS) % 4 == 2 && MF.map & (UINT64_C(1) << (OFS) / 4) \
146 && !(MF.map & (UINT64_MAX << ((OFS) / 4 + 1)))))); \
148 if ((OFS) % 4 == 0) { \
149 *(uint16_t *)MF.data = VALUE; \
150 MF.map |= UINT64_C(1) << (OFS) / 4; \
151 } else if ((OFS) % 4 == 2) { \
152 *((uint16_t *)MF.data + 1) = VALUE; \
157 #define miniflow_push_be16_(MF, OFS, VALUE) \
158 miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
160 /* Data at 'valuep' may be unaligned. */
161 #define miniflow_push_words_(MF, OFS, VALUEP, N_WORDS) \
163 int ofs32 = (OFS) / 4; \
165 MINIFLOW_ASSERT(MF.data + (N_WORDS) <= MF.end && (OFS) % 4 == 0 \
166 && !(MF.map & (UINT64_MAX << ofs32))); \
168 memcpy(MF.data, (VALUEP), (N_WORDS) * sizeof *MF.data); \
169 MF.data += (N_WORDS); \
170 MF.map |= ((UINT64_MAX >> (64 - (N_WORDS))) << ofs32); \
173 #define miniflow_push_uint32(MF, FIELD, VALUE) \
174 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE)
176 #define miniflow_push_be32(MF, FIELD, VALUE) \
177 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE)
179 #define miniflow_push_uint32_check(MF, FIELD, VALUE) \
180 { if (OVS_LIKELY(VALUE)) { \
181 miniflow_push_uint32_(MF, offsetof(struct flow, FIELD), VALUE); \
185 #define miniflow_push_be32_check(MF, FIELD, VALUE) \
186 { if (OVS_LIKELY(VALUE)) { \
187 miniflow_push_be32_(MF, offsetof(struct flow, FIELD), VALUE); \
191 #define miniflow_push_uint16(MF, FIELD, VALUE) \
192 miniflow_push_uint16_(MF, offsetof(struct flow, FIELD), VALUE)
194 #define miniflow_push_be16(MF, FIELD, VALUE) \
195 miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
197 #define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
198 miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
200 /* Pulls the MPLS headers at '*datap' and returns the count of them. */
202 parse_mpls(void **datap, size_t *sizep)
204 const struct mpls_hdr *mh;
207 while ((mh = data_try_pull(datap, sizep, sizeof *mh))) {
209 if (mh->mpls_lse.lo & htons(1 << MPLS_BOS_SHIFT)) {
213 return MAX(count, FLOW_MAX_MPLS_LABELS);
216 static inline ovs_be16
217 parse_vlan(void **datap, size_t *sizep)
219 const struct eth_header *eth = *datap;
222 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
226 data_pull(datap, sizep, ETH_ADDR_LEN * 2);
228 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
229 if (OVS_LIKELY(*sizep
230 >= sizeof(struct qtag_prefix) + sizeof(ovs_be16))) {
231 const struct qtag_prefix *qp = data_pull(datap, sizep, sizeof *qp);
232 return qp->tci | htons(VLAN_CFI);
238 static inline ovs_be16
239 parse_ethertype(void **datap, size_t *sizep)
241 const struct llc_snap_header *llc;
244 proto = *(ovs_be16 *) data_pull(datap, sizep, sizeof proto);
245 if (OVS_LIKELY(ntohs(proto) >= ETH_TYPE_MIN)) {
249 if (OVS_UNLIKELY(*sizep < sizeof *llc)) {
250 return htons(FLOW_DL_TYPE_NONE);
254 if (OVS_UNLIKELY(llc->llc.llc_dsap != LLC_DSAP_SNAP
255 || llc->llc.llc_ssap != LLC_SSAP_SNAP
256 || llc->llc.llc_cntl != LLC_CNTL_SNAP
257 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
258 sizeof llc->snap.snap_org))) {
259 return htons(FLOW_DL_TYPE_NONE);
262 data_pull(datap, sizep, sizeof *llc);
264 if (OVS_LIKELY(ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN)) {
265 return llc->snap.snap_type;
268 return htons(FLOW_DL_TYPE_NONE);
272 parse_icmpv6(void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
273 const struct in6_addr **nd_target,
274 uint8_t arp_buf[2][ETH_ADDR_LEN])
276 if (icmp->icmp6_code == 0 &&
277 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
278 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
280 *nd_target = data_try_pull(datap, sizep, sizeof *nd_target);
281 if (OVS_UNLIKELY(!*nd_target)) {
285 while (*sizep >= 8) {
286 /* The minimum size of an option is 8 bytes, which also is
287 * the size of Ethernet link-layer options. */
288 const struct nd_opt_hdr *nd_opt = *datap;
289 int opt_len = nd_opt->nd_opt_len * 8;
291 if (!opt_len || opt_len > *sizep) {
295 /* Store the link layer address if the appropriate option is
296 * provided. It is considered an error if the same link
297 * layer option is specified twice. */
298 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
300 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
301 memcpy(arp_buf[0], nd_opt + 1, ETH_ADDR_LEN);
305 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
307 if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
308 memcpy(arp_buf[1], nd_opt + 1, ETH_ADDR_LEN);
314 if (OVS_UNLIKELY(!data_try_pull(datap, sizep, opt_len))) {
326 /* Initializes 'flow' members from 'packet' and 'md'
328 * Initializes 'packet' header l2 pointer to the start of the Ethernet
329 * header, and the layer offsets as follows:
331 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
332 * when there is no MPLS shim header.
334 * - packet->l3_ofs to just past the Ethernet header, or just past the
335 * vlan_header if one is present, to the first byte of the payload of the
336 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
339 * - packet->l4_ofs to just past the IPv4 header, if one is present and
340 * has at least the content used for the fields of interest for the flow,
341 * otherwise UINT16_MAX.
344 flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
347 uint32_t buf[FLOW_U32S];
350 COVERAGE_INC(flow_extract);
352 miniflow_initialize(&mf, buf);
353 miniflow_extract(packet, md, &mf);
354 miniflow_expand(&mf, flow);
357 /* Caller is responsible for initializing 'dst->values' with enough storage
358 * (FLOW_U64S * 8 bytes is enough). */
360 miniflow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
361 struct miniflow *dst)
363 void *data = ofpbuf_data(packet);
364 size_t size = ofpbuf_size(packet);
366 struct mf_ctx mf = { 0, dst->values, dst->values + FLOW_U32S };
368 uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
372 if (md->tunnel.ip_dst) {
373 miniflow_push_words(mf, tunnel, &md->tunnel,
374 sizeof md->tunnel / 4);
376 miniflow_push_uint32_check(mf, skb_priority, md->skb_priority);
377 miniflow_push_uint32_check(mf, pkt_mark, md->pkt_mark);
378 miniflow_push_uint32_check(mf, recirc_id, md->recirc_id);
379 miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
382 /* Initialize packet's layer pointer and offsets. */
384 ofpbuf_set_frame(packet, data);
386 /* Must have full Ethernet header to proceed. */
387 if (OVS_UNLIKELY(size < sizeof(struct eth_header))) {
393 BUILD_ASSERT(offsetof(struct flow, dl_dst) + 6
394 == offsetof(struct flow, dl_src));
395 miniflow_push_words(mf, dl_dst, data, ETH_ADDR_LEN * 2 / 4);
396 /* dl_type, vlan_tci. */
397 vlan_tci = parse_vlan(&data, &size);
398 dl_type = parse_ethertype(&data, &size);
399 miniflow_push_be16(mf, dl_type, dl_type);
400 miniflow_push_be16(mf, vlan_tci, vlan_tci);
404 if (OVS_UNLIKELY(eth_type_mpls(dl_type))) {
406 const void *mpls = data;
408 packet->l2_5_ofs = (char *)data - l2;
409 count = parse_mpls(&data, &size);
410 miniflow_push_words(mf, mpls_lse, mpls, count);
414 packet->l3_ofs = (char *)data - l2;
417 if (OVS_LIKELY(dl_type == htons(ETH_TYPE_IP))) {
418 const struct ip_header *nh = data;
421 if (OVS_UNLIKELY(size < IP_HEADER_LEN)) {
424 ip_len = IP_IHL(nh->ip_ihl_ver) * 4;
426 if (OVS_UNLIKELY(ip_len < IP_HEADER_LEN)) {
430 /* Push both source and destination address at once. */
431 miniflow_push_words(mf, nw_src, &nh->ip_src, 2);
435 nw_proto = nh->ip_proto;
436 if (OVS_UNLIKELY(IP_IS_FRAGMENT(nh->ip_frag_off))) {
437 nw_frag = FLOW_NW_FRAG_ANY;
438 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
439 nw_frag |= FLOW_NW_FRAG_LATER;
442 if (OVS_UNLIKELY(size < ip_len)) {
445 data_pull(&data, &size, ip_len);
447 } else if (dl_type == htons(ETH_TYPE_IPV6)) {
448 const struct ovs_16aligned_ip6_hdr *nh;
451 if (OVS_UNLIKELY(size < sizeof *nh)) {
454 nh = data_pull(&data, &size, sizeof *nh);
456 miniflow_push_words(mf, ipv6_src, &nh->ip6_src,
457 sizeof nh->ip6_src / 4);
458 miniflow_push_words(mf, ipv6_dst, &nh->ip6_dst,
459 sizeof nh->ip6_dst / 4);
461 tc_flow = get_16aligned_be32(&nh->ip6_flow);
463 ovs_be32 label = tc_flow & htonl(IPV6_LABEL_MASK);
464 miniflow_push_be32_check(mf, ipv6_label, label);
467 nw_tos = ntohl(tc_flow) >> 20;
468 nw_ttl = nh->ip6_hlim;
469 nw_proto = nh->ip6_nxt;
472 if (OVS_LIKELY((nw_proto != IPPROTO_HOPOPTS)
473 && (nw_proto != IPPROTO_ROUTING)
474 && (nw_proto != IPPROTO_DSTOPTS)
475 && (nw_proto != IPPROTO_AH)
476 && (nw_proto != IPPROTO_FRAGMENT))) {
477 /* It's either a terminal header (e.g., TCP, UDP) or one we
478 * don't understand. In either case, we're done with the
479 * packet, so use it to fill in 'nw_proto'. */
483 /* We only verify that at least 8 bytes of the next header are
484 * available, but many of these headers are longer. Ensure that
485 * accesses within the extension header are within those first 8
486 * bytes. All extension headers are required to be at least 8
488 if (OVS_UNLIKELY(size < 8)) {
492 if ((nw_proto == IPPROTO_HOPOPTS)
493 || (nw_proto == IPPROTO_ROUTING)
494 || (nw_proto == IPPROTO_DSTOPTS)) {
495 /* These headers, while different, have the fields we care
496 * about in the same location and with the same
498 const struct ip6_ext *ext_hdr = data;
499 nw_proto = ext_hdr->ip6e_nxt;
500 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
501 (ext_hdr->ip6e_len + 1) * 8))) {
504 } else if (nw_proto == IPPROTO_AH) {
505 /* A standard AH definition isn't available, but the fields
506 * we care about are in the same location as the generic
507 * option header--only the header length is calculated
509 const struct ip6_ext *ext_hdr = data;
510 nw_proto = ext_hdr->ip6e_nxt;
511 if (OVS_UNLIKELY(!data_try_pull(&data, &size,
512 (ext_hdr->ip6e_len + 2) * 4))) {
515 } else if (nw_proto == IPPROTO_FRAGMENT) {
516 const struct ovs_16aligned_ip6_frag *frag_hdr = data;
518 nw_proto = frag_hdr->ip6f_nxt;
519 if (!data_try_pull(&data, &size, sizeof *frag_hdr)) {
523 /* We only process the first fragment. */
524 if (frag_hdr->ip6f_offlg != htons(0)) {
525 nw_frag = FLOW_NW_FRAG_ANY;
526 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
527 nw_frag |= FLOW_NW_FRAG_LATER;
528 nw_proto = IPPROTO_FRAGMENT;
535 if (dl_type == htons(ETH_TYPE_ARP) ||
536 dl_type == htons(ETH_TYPE_RARP)) {
537 uint8_t arp_buf[2][ETH_ADDR_LEN];
538 const struct arp_eth_header *arp = (const struct arp_eth_header *)
539 data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
541 if (OVS_LIKELY(arp) && OVS_LIKELY(arp->ar_hrd == htons(1))
542 && OVS_LIKELY(arp->ar_pro == htons(ETH_TYPE_IP))
543 && OVS_LIKELY(arp->ar_hln == ETH_ADDR_LEN)
544 && OVS_LIKELY(arp->ar_pln == 4)) {
545 miniflow_push_words(mf, nw_src, &arp->ar_spa, 1);
546 miniflow_push_words(mf, nw_dst, &arp->ar_tpa, 1);
548 /* We only match on the lower 8 bits of the opcode. */
549 if (OVS_LIKELY(ntohs(arp->ar_op) <= 0xff)) {
550 miniflow_push_be32(mf, nw_frag, htonl(ntohs(arp->ar_op)));
553 /* Must be adjacent. */
554 BUILD_ASSERT(offsetof(struct flow, arp_sha) + 6
555 == offsetof(struct flow, arp_tha));
557 memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
558 memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
559 miniflow_push_words(mf, arp_sha, arp_buf,
560 ETH_ADDR_LEN * 2 / 4);
566 packet->l4_ofs = (char *)data - l2;
567 miniflow_push_be32(mf, nw_frag,
568 BYTES_TO_BE32(nw_frag, nw_tos, nw_ttl, nw_proto));
570 if (OVS_LIKELY(!(nw_frag & FLOW_NW_FRAG_LATER))) {
571 if (OVS_LIKELY(nw_proto == IPPROTO_TCP)) {
572 if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
573 const struct tcp_header *tcp = data;
575 miniflow_push_be32(mf, tcp_flags,
576 TCP_FLAGS_BE32(tcp->tcp_ctl));
577 miniflow_push_words(mf, tp_src, &tcp->tcp_src, 1);
579 } else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
580 if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
581 const struct udp_header *udp = data;
583 miniflow_push_words(mf, tp_src, &udp->udp_src, 1);
585 } else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
586 if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
587 const struct sctp_header *sctp = data;
589 miniflow_push_words(mf, tp_src, &sctp->sctp_src, 1);
591 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
592 if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
593 const struct icmp_header *icmp = data;
595 miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
596 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
598 } else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
599 if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
600 const struct in6_addr *nd_target = NULL;
601 uint8_t arp_buf[2][ETH_ADDR_LEN];
602 const struct icmp6_hdr *icmp = data_pull(&data, &size,
604 memset(arp_buf, 0, sizeof arp_buf);
605 if (OVS_LIKELY(parse_icmpv6(&data, &size, icmp, &nd_target,
608 miniflow_push_words(mf, nd_target, nd_target,
609 sizeof *nd_target / 4);
611 miniflow_push_words(mf, arp_sha, arp_buf,
612 ETH_ADDR_LEN * 2 / 4);
613 miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
614 miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
620 miniflow_push_uint32_check(mf, dp_hash, md->dp_hash);
626 /* For every bit of a field that is wildcarded in 'wildcards', sets the
627 * corresponding bit in 'flow' to zero. */
629 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
631 uint32_t *flow_u32 = (uint32_t *) flow;
632 const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
635 for (i = 0; i < FLOW_U32S; i++) {
636 flow_u32[i] &= wc_u32[i];
641 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
643 if (flow->nw_proto != IPPROTO_ICMP) {
644 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
645 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
647 wc->masks.tp_src = htons(0xff);
648 wc->masks.tp_dst = htons(0xff);
652 /* Initializes 'fmd' with the metadata found in 'flow'. */
654 flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
656 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26);
658 fmd->dp_hash = flow->dp_hash;
659 fmd->recirc_id = flow->recirc_id;
660 fmd->tun_id = flow->tunnel.tun_id;
661 fmd->tun_src = flow->tunnel.ip_src;
662 fmd->tun_dst = flow->tunnel.ip_dst;
663 fmd->metadata = flow->metadata;
664 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
665 fmd->pkt_mark = flow->pkt_mark;
666 fmd->in_port = flow->in_port.ofp_port;
670 flow_to_string(const struct flow *flow)
672 struct ds ds = DS_EMPTY_INITIALIZER;
673 flow_format(&ds, flow);
678 flow_tun_flag_to_string(uint32_t flags)
681 case FLOW_TNL_F_DONT_FRAGMENT:
683 case FLOW_TNL_F_CSUM:
693 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
694 uint32_t flags, char del)
702 uint32_t bit = rightmost_1bit(flags);
705 s = bit_to_string(bit);
707 ds_put_format(ds, "%s%c", s, del);
716 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
722 format_flags_masked(struct ds *ds, const char *name,
723 const char *(*bit_to_string)(uint32_t), uint32_t flags,
727 ds_put_format(ds, "%s=", name);
730 uint32_t bit = rightmost_1bit(mask);
731 const char *s = bit_to_string(bit);
733 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
734 s ? s : "[Unknown]");
740 flow_format(struct ds *ds, const struct flow *flow)
744 match_wc_init(&match, flow);
745 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
749 flow_print(FILE *stream, const struct flow *flow)
751 char *s = flow_to_string(flow);
756 /* flow_wildcards functions. */
758 /* Initializes 'wc' as a set of wildcards that matches every packet. */
760 flow_wildcards_init_catchall(struct flow_wildcards *wc)
762 memset(&wc->masks, 0, sizeof wc->masks);
765 /* Clear the metadata and register wildcard masks. They are not packet
768 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
770 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
771 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
774 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
777 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
779 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
782 for (i = 0; i < FLOW_U32S; i++) {
790 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
791 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
792 * in 'src1' or 'src2' or both. */
794 flow_wildcards_and(struct flow_wildcards *dst,
795 const struct flow_wildcards *src1,
796 const struct flow_wildcards *src2)
798 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
799 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
800 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
803 for (i = 0; i < FLOW_U32S; i++) {
804 dst_u32[i] = src1_u32[i] & src2_u32[i];
808 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
809 * is, a bit or a field is wildcarded in 'dst' if it is neither
810 * wildcarded in 'src1' nor 'src2'. */
812 flow_wildcards_or(struct flow_wildcards *dst,
813 const struct flow_wildcards *src1,
814 const struct flow_wildcards *src2)
816 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
817 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
818 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
821 for (i = 0; i < FLOW_U32S; i++) {
822 dst_u32[i] = src1_u32[i] | src2_u32[i];
826 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
827 * fields in 'dst', storing the result in 'dst'. */
829 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
831 uint32_t *dst_u32 = (uint32_t *) dst;
832 const uint32_t *p = src->values;
835 for (map = src->map; map; map = zero_rightmost_1bit(map)) {
836 dst_u32[raw_ctz(map)] |= *p++;
840 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
842 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
843 const struct minimask *mask)
845 flow_union_with_miniflow(&wc->masks, &mask->masks);
849 miniflow_get_map_in_range(const struct miniflow *miniflow,
850 uint8_t start, uint8_t end, unsigned int *offset)
852 uint64_t map = miniflow->map;
856 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
857 *offset = count_1bits(map & msk);
860 if (end < FLOW_U32S) {
861 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
867 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
868 * in range [start, end). */
870 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
871 const struct minimask *mask,
872 uint8_t start, uint8_t end)
874 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
876 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
878 const uint32_t *p = mask->masks.values + offset;
880 for (; map; map = zero_rightmost_1bit(map)) {
881 dst_u32[raw_ctz(map)] |= *p++;
885 /* Returns a hash of the wildcards in 'wc'. */
887 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
889 return flow_hash(&wc->masks, basis);
892 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
895 flow_wildcards_equal(const struct flow_wildcards *a,
896 const struct flow_wildcards *b)
898 return flow_equal(&a->masks, &b->masks);
901 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
902 * 'b', false otherwise. */
904 flow_wildcards_has_extra(const struct flow_wildcards *a,
905 const struct flow_wildcards *b)
907 const uint32_t *a_u32 = (const uint32_t *) &a->masks;
908 const uint32_t *b_u32 = (const uint32_t *) &b->masks;
911 for (i = 0; i < FLOW_U32S; i++) {
912 if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
919 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
920 * in 'wc' do not need to be equal in 'a' and 'b'. */
922 flow_equal_except(const struct flow *a, const struct flow *b,
923 const struct flow_wildcards *wc)
925 const uint32_t *a_u32 = (const uint32_t *) a;
926 const uint32_t *b_u32 = (const uint32_t *) b;
927 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
930 for (i = 0; i < FLOW_U32S; i++) {
931 if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
938 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
939 * (A 0-bit indicates a wildcard bit.) */
941 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
943 wc->masks.regs[idx] = mask;
946 /* Calculates the 5-tuple hash from the given flow. */
948 miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
956 hash = mhash_add(basis,
957 miniflow_get_u32(flow, offsetof(struct flow, nw_src)));
958 hash = mhash_add(hash,
959 miniflow_get_u32(flow, offsetof(struct flow, nw_dst)));
960 hash = mhash_add(hash,
961 miniflow_get_u32(flow, offsetof(struct flow, tp_src)));
962 hash = mhash_add(hash,
963 miniflow_get_u8(flow, offsetof(struct flow, nw_proto)));
965 return mhash_finish(hash, 13);
968 BUILD_ASSERT_DECL(offsetof(struct flow, tp_src) + 2
969 == offsetof(struct flow, tp_dst) &&
970 offsetof(struct flow, tp_src) / 4
971 == offsetof(struct flow, tp_dst) / 4);
973 /* Calculates the 5-tuple hash from the given flow. */
975 flow_hash_5tuple(const struct flow *flow, uint32_t basis)
977 const uint32_t *flow_u32 = (const uint32_t *)flow;
984 hash = mhash_add(basis, (OVS_FORCE uint32_t) flow->nw_src);
985 hash = mhash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
986 hash = mhash_add(hash, flow_u32[offsetof(struct flow, tp_src) / 4]);
987 hash = mhash_add(hash, flow->nw_proto);
989 return mhash_finish(hash, 13);
992 /* Hashes 'flow' based on its L2 through L4 protocol information. */
994 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
999 struct in6_addr ipv6_addr;
1004 uint8_t eth_addr[ETH_ADDR_LEN];
1010 memset(&fields, 0, sizeof fields);
1011 for (i = 0; i < ETH_ADDR_LEN; i++) {
1012 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
1014 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
1015 fields.eth_type = flow->dl_type;
1017 /* UDP source and destination port are not taken into account because they
1018 * will not necessarily be symmetric in a bidirectional flow. */
1019 if (fields.eth_type == htons(ETH_TYPE_IP)) {
1020 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
1021 fields.ip_proto = flow->nw_proto;
1022 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1023 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1025 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
1026 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
1027 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
1028 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
1030 for (i=0; i<16; i++) {
1031 ipv6_addr[i] = a[i] ^ b[i];
1033 fields.ip_proto = flow->nw_proto;
1034 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
1035 fields.tp_port = flow->tp_src ^ flow->tp_dst;
1038 return jhash_bytes(&fields, sizeof fields, basis);
1041 /* Initialize a flow with random fields that matter for nx_hash_fields. */
1043 flow_random_hash_fields(struct flow *flow)
1045 uint16_t rnd = random_uint16();
1047 /* Initialize to all zeros. */
1048 memset(flow, 0, sizeof *flow);
1050 eth_addr_random(flow->dl_src);
1051 eth_addr_random(flow->dl_dst);
1053 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
1055 /* Make most of the random flows IPv4, some IPv6, and rest random. */
1056 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
1057 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
1059 if (dl_type_is_ip_any(flow->dl_type)) {
1060 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1061 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
1062 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
1064 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
1065 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
1067 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
1068 rnd = random_uint16();
1069 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
1070 rnd < 0xc000 ? IPPROTO_UDP :
1071 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
1072 if (flow->nw_proto == IPPROTO_TCP ||
1073 flow->nw_proto == IPPROTO_UDP ||
1074 flow->nw_proto == IPPROTO_SCTP) {
1075 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
1076 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
1081 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
1083 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
1084 enum nx_hash_fields fields)
1087 case NX_HASH_FIELDS_ETH_SRC:
1088 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1091 case NX_HASH_FIELDS_SYMMETRIC_L4:
1092 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
1093 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
1094 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1095 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
1096 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
1097 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1098 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
1099 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
1101 if (is_ip_any(flow)) {
1102 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
1103 flow_unwildcard_tp_ports(flow, wc);
1105 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
1113 /* Hashes the portions of 'flow' designated by 'fields'. */
1115 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
1120 case NX_HASH_FIELDS_ETH_SRC:
1121 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
1123 case NX_HASH_FIELDS_SYMMETRIC_L4:
1124 return flow_hash_symmetric_l4(flow, basis);
1130 /* Returns a string representation of 'fields'. */
1132 flow_hash_fields_to_str(enum nx_hash_fields fields)
1135 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
1136 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
1137 default: return "<unknown>";
1141 /* Returns true if the value of 'fields' is supported. Otherwise false. */
1143 flow_hash_fields_valid(enum nx_hash_fields fields)
1145 return fields == NX_HASH_FIELDS_ETH_SRC
1146 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
1149 /* Returns a hash value for the bits of 'flow' that are active based on
1150 * 'wc', given 'basis'. */
1152 flow_hash_in_wildcards(const struct flow *flow,
1153 const struct flow_wildcards *wc, uint32_t basis)
1155 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
1156 const uint32_t *flow_u32 = (const uint32_t *) flow;
1161 for (i = 0; i < FLOW_U32S; i++) {
1162 hash = mhash_add(hash, flow_u32[i] & wc_u32[i]);
1164 return mhash_finish(hash, 4 * FLOW_U32S);
1167 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1168 * OpenFlow 1.0 "dl_vlan" value:
1170 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1171 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1172 * 'flow' previously matched packets without a VLAN header).
1174 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1175 * without a VLAN tag.
1177 * - Other values of 'vid' should not be used. */
1179 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
1181 if (vid == htons(OFP10_VLAN_NONE)) {
1182 flow->vlan_tci = htons(0);
1184 vid &= htons(VLAN_VID_MASK);
1185 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1186 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1190 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1191 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1194 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1196 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1197 flow->vlan_tci &= ~mask;
1198 flow->vlan_tci |= vid & mask;
1201 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1204 * This function has no effect on the VLAN ID that 'flow' matches.
1206 * After calling this function, 'flow' will not match packets without a VLAN
1209 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1212 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1213 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1216 /* Returns the number of MPLS LSEs present in 'flow'
1218 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1219 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1220 * first entry that has the BoS bit set. If no such entry exists then
1221 * the maximum number of LSEs that can be stored in 'flow' is returned.
1224 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1227 wc->masks.dl_type = OVS_BE16_MAX;
1229 if (eth_type_mpls(flow->dl_type)) {
1231 int len = FLOW_MAX_MPLS_LABELS;
1233 for (i = 0; i < len; i++) {
1235 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1237 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1248 /* Returns the number consecutive of MPLS LSEs, starting at the
1249 * innermost LSE, that are common in 'a' and 'b'.
1251 * 'an' must be flow_count_mpls_labels(a).
1252 * 'bn' must be flow_count_mpls_labels(b).
1255 flow_count_common_mpls_labels(const struct flow *a, int an,
1256 const struct flow *b, int bn,
1257 struct flow_wildcards *wc)
1259 int min_n = MIN(an, bn);
1264 int a_last = an - 1;
1265 int b_last = bn - 1;
1268 for (i = 0; i < min_n; i++) {
1270 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
1271 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
1273 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
1284 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
1285 * to 'mpls_eth_type', which must be an MPLS Ethertype.
1287 * If the new label is the first MPLS label in 'flow', it is generated as;
1289 * - label: 2, if 'flow' is IPv6, otherwise 0.
1291 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
1293 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
1297 * If the new label is the second or label MPLS label in 'flow', it is
1300 * - label: Copied from outer label.
1302 * - TTL: Copied from outer label.
1304 * - TC: Copied from outer label.
1308 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
1309 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
1312 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
1313 struct flow_wildcards *wc)
1315 ovs_assert(eth_type_mpls(mpls_eth_type));
1316 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
1318 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1322 for (i = n; i >= 1; i--) {
1323 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
1325 flow->mpls_lse[0] = (flow->mpls_lse[1]
1326 & htonl(~MPLS_BOS_MASK));
1328 int label = 0; /* IPv4 Explicit Null. */
1332 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1336 if (is_ip_any(flow)) {
1337 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1338 wc->masks.nw_tos |= IP_DSCP_MASK;
1343 wc->masks.nw_ttl = 0xff;
1346 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
1348 /* Clear all L3 and L4 fields. */
1349 BUILD_ASSERT(FLOW_WC_SEQ == 26);
1350 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
1351 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
1353 flow->dl_type = mpls_eth_type;
1356 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
1357 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
1360 * 'n' must be flow_count_mpls_labels(flow). */
1362 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
1363 struct flow_wildcards *wc)
1368 /* Nothing to pop. */
1370 } else if (n == FLOW_MAX_MPLS_LABELS
1371 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
1372 /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
1376 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1377 for (i = 1; i < n; i++) {
1378 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
1380 flow->mpls_lse[n - 1] = 0;
1381 flow->dl_type = eth_type;
1385 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
1386 * as an OpenFlow 1.1 "mpls_label" value. */
1388 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
1390 set_mpls_lse_label(&flow->mpls_lse[idx], label);
1393 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
1396 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
1398 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
1401 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
1404 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
1406 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
1409 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
1411 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
1413 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
1416 /* Sets the entire MPLS LSE. */
1418 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
1420 flow->mpls_lse[idx] = lse;
1424 flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
1428 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1429 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1430 if (flow->nw_proto == IPPROTO_TCP) {
1431 struct tcp_header *tcp;
1433 l4_len = sizeof *tcp;
1434 tcp = ofpbuf_put_zeros(b, l4_len);
1435 tcp->tcp_src = flow->tp_src;
1436 tcp->tcp_dst = flow->tp_dst;
1437 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
1438 } else if (flow->nw_proto == IPPROTO_UDP) {
1439 struct udp_header *udp;
1441 l4_len = sizeof *udp;
1442 udp = ofpbuf_put_zeros(b, l4_len);
1443 udp->udp_src = flow->tp_src;
1444 udp->udp_dst = flow->tp_dst;
1445 } else if (flow->nw_proto == IPPROTO_SCTP) {
1446 struct sctp_header *sctp;
1448 l4_len = sizeof *sctp;
1449 sctp = ofpbuf_put_zeros(b, l4_len);
1450 sctp->sctp_src = flow->tp_src;
1451 sctp->sctp_dst = flow->tp_dst;
1452 } else if (flow->nw_proto == IPPROTO_ICMP) {
1453 struct icmp_header *icmp;
1455 l4_len = sizeof *icmp;
1456 icmp = ofpbuf_put_zeros(b, l4_len);
1457 icmp->icmp_type = ntohs(flow->tp_src);
1458 icmp->icmp_code = ntohs(flow->tp_dst);
1459 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1460 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1461 struct icmp6_hdr *icmp;
1463 l4_len = sizeof *icmp;
1464 icmp = ofpbuf_put_zeros(b, l4_len);
1465 icmp->icmp6_type = ntohs(flow->tp_src);
1466 icmp->icmp6_code = ntohs(flow->tp_dst);
1468 if (icmp->icmp6_code == 0 &&
1469 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
1470 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
1471 struct in6_addr *nd_target;
1472 struct nd_opt_hdr *nd_opt;
1474 l4_len += sizeof *nd_target;
1475 nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
1476 *nd_target = flow->nd_target;
1478 if (!eth_addr_is_zero(flow->arp_sha)) {
1480 nd_opt = ofpbuf_put_zeros(b, 8);
1481 nd_opt->nd_opt_len = 1;
1482 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
1483 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
1485 if (!eth_addr_is_zero(flow->arp_tha)) {
1487 nd_opt = ofpbuf_put_zeros(b, 8);
1488 nd_opt->nd_opt_len = 1;
1489 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
1490 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
1493 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
1494 csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
1500 /* Puts into 'b' a packet that flow_extract() would parse as having the given
1503 * (This is useful only for testing, obviously, and the packet isn't really
1504 * valid. It hasn't got some checksums filled in, for one, and lots of fields
1505 * are just zeroed.) */
1507 flow_compose(struct ofpbuf *b, const struct flow *flow)
1511 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
1512 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1513 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1514 struct eth_header *eth = ofpbuf_l2(b);
1515 eth->eth_type = htons(ofpbuf_size(b));
1519 if (flow->vlan_tci & htons(VLAN_CFI)) {
1520 eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
1523 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1524 struct ip_header *ip;
1526 ip = ofpbuf_put_zeros(b, sizeof *ip);
1527 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1528 ip->ip_tos = flow->nw_tos;
1529 ip->ip_ttl = flow->nw_ttl;
1530 ip->ip_proto = flow->nw_proto;
1531 put_16aligned_be32(&ip->ip_src, flow->nw_src);
1532 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
1534 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
1535 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
1536 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
1537 ip->ip_frag_off |= htons(100);
1541 ofpbuf_set_l4(b, ofpbuf_tail(b));
1543 l4_len = flow_compose_l4(b, flow);
1545 ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
1546 ip->ip_csum = csum(ip, sizeof *ip);
1547 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1548 struct ovs_16aligned_ip6_hdr *nh;
1550 nh = ofpbuf_put_zeros(b, sizeof *nh);
1551 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
1552 htonl(flow->nw_tos << 20) | flow->ipv6_label);
1553 nh->ip6_hlim = flow->nw_ttl;
1554 nh->ip6_nxt = flow->nw_proto;
1556 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
1557 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
1559 ofpbuf_set_l4(b, ofpbuf_tail(b));
1561 l4_len = flow_compose_l4(b, flow);
1563 nh->ip6_plen = htons(l4_len);
1564 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1565 flow->dl_type == htons(ETH_TYPE_RARP)) {
1566 struct arp_eth_header *arp;
1568 arp = ofpbuf_put_zeros(b, sizeof *arp);
1569 ofpbuf_set_l3(b, arp);
1570 arp->ar_hrd = htons(1);
1571 arp->ar_pro = htons(ETH_TYPE_IP);
1572 arp->ar_hln = ETH_ADDR_LEN;
1574 arp->ar_op = htons(flow->nw_proto);
1576 if (flow->nw_proto == ARP_OP_REQUEST ||
1577 flow->nw_proto == ARP_OP_REPLY) {
1578 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
1579 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
1580 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1581 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1585 if (eth_type_mpls(flow->dl_type)) {
1588 b->l2_5_ofs = b->l3_ofs;
1589 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
1590 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
1595 push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
1600 /* Compressed flow. */
1603 miniflow_n_values(const struct miniflow *flow)
1605 return count_1bits(flow->map);
1609 miniflow_alloc_values(struct miniflow *flow, int n)
1611 if (n <= MINI_N_INLINE) {
1612 return flow->inline_values;
1614 COVERAGE_INC(miniflow_malloc);
1615 return xmalloc(n * sizeof *flow->values);
1619 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
1620 * the caller. The caller must have already initialized 'dst->map' properly
1621 * to indicate the significant uint32_t elements of 'src'. 'n' must be the
1622 * number of 1-bits in 'dst->map'.
1624 * Normally the significant elements are the ones that are non-zero. However,
1625 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
1626 * so that the flow and mask always have the same maps.
1628 * This function initializes 'dst->values' (either inline if possible or with
1629 * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
1630 * 'dst->map' into it. */
1632 miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
1634 const uint32_t *src_u32 = (const uint32_t *) src;
1638 dst->values = miniflow_alloc_values(dst, n);
1640 for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
1641 dst->values[ofs++] = src_u32[raw_ctz(map)];
1645 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1646 * with miniflow_destroy(). */
1648 miniflow_init(struct miniflow *dst, const struct flow *src)
1650 const uint32_t *src_u32 = (const uint32_t *) src;
1654 /* Initialize dst->map, counting the number of nonzero elements. */
1658 for (i = 0; i < FLOW_U32S; i++) {
1660 dst->map |= UINT64_C(1) << i;
1665 miniflow_init__(dst, src, n);
1668 /* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
1669 * caller must eventually free 'dst' with miniflow_destroy(). */
1671 miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
1672 const struct minimask *mask)
1674 dst->map = mask->masks.map;
1675 miniflow_init__(dst, src, miniflow_n_values(dst));
1678 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1679 * with miniflow_destroy(). */
1681 miniflow_clone(struct miniflow *dst, const struct miniflow *src)
1683 int n = miniflow_n_values(src);
1684 dst->map = src->map;
1685 dst->values = miniflow_alloc_values(dst, n);
1686 memcpy(dst->values, src->values, n * sizeof *dst->values);
1689 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1690 * The caller must eventually free 'dst' with miniflow_destroy(). */
1692 miniflow_move(struct miniflow *dst, struct miniflow *src)
1694 if (src->values == src->inline_values) {
1695 dst->values = dst->inline_values;
1696 memcpy(dst->values, src->values,
1697 miniflow_n_values(src) * sizeof *dst->values);
1699 dst->values = src->values;
1701 dst->map = src->map;
1704 /* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
1705 * itself resides; the caller is responsible for that. */
1707 miniflow_destroy(struct miniflow *flow)
1709 if (flow->values != flow->inline_values) {
1714 /* Initializes 'dst' as a copy of 'src'. */
1716 miniflow_expand(const struct miniflow *src, struct flow *dst)
1718 memset(dst, 0, sizeof *dst);
1719 flow_union_with_miniflow(dst, src);
1722 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
1723 * were expanded into a "struct flow". */
1725 miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
1727 return (flow->map & UINT64_C(1) << u32_ofs)
1729 count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1)))
1733 /* Returns true if 'a' and 'b' are the same flow, false otherwise. */
1735 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
1737 const uint32_t *ap = a->values;
1738 const uint32_t *bp = b->values;
1739 const uint64_t a_map = a->map;
1740 const uint64_t b_map = b->map;
1743 if (a_map == b_map) {
1744 for (map = a_map; map; map = zero_rightmost_1bit(map)) {
1745 if (*ap++ != *bp++) {
1750 for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
1751 uint64_t bit = rightmost_1bit(map);
1752 uint64_t a_value = a_map & bit ? *ap++ : 0;
1753 uint64_t b_value = b_map & bit ? *bp++ : 0;
1755 if (a_value != b_value) {
1764 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1765 * in 'mask', false if they differ. */
1767 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
1768 const struct minimask *mask)
1773 p = mask->masks.values;
1775 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1776 int ofs = raw_ctz(map);
1778 if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
1787 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1788 * in 'mask', false if they differ. */
1790 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
1791 const struct minimask *mask)
1793 const uint32_t *b_u32 = (const uint32_t *) b;
1797 p = mask->masks.values;
1799 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1800 int ofs = raw_ctz(map);
1802 if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
1811 /* Returns a hash value for 'flow', given 'basis'. */
1813 miniflow_hash(const struct miniflow *flow, uint32_t basis)
1815 const uint32_t *p = flow->values;
1816 uint32_t hash = basis;
1817 uint64_t hash_map = 0;
1820 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
1822 hash = mhash_add(hash, *p);
1823 hash_map |= rightmost_1bit(map);
1827 hash = mhash_add(hash, hash_map);
1828 hash = mhash_add(hash, hash_map >> 32);
1830 return mhash_finish(hash, p - flow->values);
1833 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1834 * 'mask', given 'basis'.
1836 * The hash values returned by this function are the same as those returned by
1837 * flow_hash_in_minimask(), only the form of the arguments differ. */
1839 miniflow_hash_in_minimask(const struct miniflow *flow,
1840 const struct minimask *mask, uint32_t basis)
1842 const uint32_t *p = mask->masks.values;
1843 uint32_t hash = basis;
1846 MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
1847 hash = mhash_add(hash, flow_u32 & *p++);
1850 return mhash_finish(hash, (p - mask->masks.values) * 4);
1853 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1854 * 'mask', given 'basis'.
1856 * The hash values returned by this function are the same as those returned by
1857 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
1859 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
1862 const uint32_t *flow_u32 = (const uint32_t *)flow;
1863 const uint32_t *p = mask->masks.values;
1868 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1869 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1872 return mhash_finish(hash, (p - mask->masks.values) * 4);
1875 /* Returns a hash value for the bits of range [start, end) in 'flow',
1876 * where there are 1-bits in 'mask', given 'hash'.
1878 * The hash values returned by this function are the same as those returned by
1879 * minimatch_hash_range(), only the form of the arguments differ. */
1881 flow_hash_in_minimask_range(const struct flow *flow,
1882 const struct minimask *mask,
1883 uint8_t start, uint8_t end, uint32_t *basis)
1885 const uint32_t *flow_u32 = (const uint32_t *)flow;
1886 unsigned int offset;
1887 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
1889 const uint32_t *p = mask->masks.values + offset;
1890 uint32_t hash = *basis;
1892 for (; map; map = zero_rightmost_1bit(map)) {
1893 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1896 *basis = hash; /* Allow continuation from the unfinished value. */
1897 return mhash_finish(hash, (p - mask->masks.values) * 4);
1901 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1902 * with minimask_destroy(). */
1904 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
1906 miniflow_init(&mask->masks, &wc->masks);
1909 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1910 * with minimask_destroy(). */
1912 minimask_clone(struct minimask *dst, const struct minimask *src)
1914 miniflow_clone(&dst->masks, &src->masks);
1917 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1918 * The caller must eventually free 'dst' with minimask_destroy(). */
1920 minimask_move(struct minimask *dst, struct minimask *src)
1922 miniflow_move(&dst->masks, &src->masks);
1925 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
1927 * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
1928 * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
1930 minimask_combine(struct minimask *dst_,
1931 const struct minimask *a_, const struct minimask *b_,
1932 uint32_t storage[FLOW_U32S])
1934 struct miniflow *dst = &dst_->masks;
1935 const struct miniflow *a = &a_->masks;
1936 const struct miniflow *b = &b_->masks;
1940 dst->values = storage;
1943 for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
1944 int ofs = raw_ctz(map);
1945 uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
1948 dst->map |= rightmost_1bit(map);
1949 dst->values[n++] = mask;
1954 /* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
1955 * itself resides; the caller is responsible for that. */
1957 minimask_destroy(struct minimask *mask)
1959 miniflow_destroy(&mask->masks);
1962 /* Initializes 'dst' as a copy of 'src'. */
1964 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
1966 miniflow_expand(&mask->masks, &wc->masks);
1969 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
1970 * were expanded into a "struct flow_wildcards". */
1972 minimask_get(const struct minimask *mask, unsigned int u32_ofs)
1974 return miniflow_get(&mask->masks, u32_ofs);
1977 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise. */
1979 minimask_equal(const struct minimask *a, const struct minimask *b)
1981 return miniflow_equal(&a->masks, &b->masks);
1984 /* Returns a hash value for 'mask', given 'basis'. */
1986 minimask_hash(const struct minimask *mask, uint32_t basis)
1988 return miniflow_hash(&mask->masks, basis);
1991 /* Returns true if at least one bit is wildcarded in 'a_' but not in 'b_',
1992 * false otherwise. */
1994 minimask_has_extra(const struct minimask *a_, const struct minimask *b_)
1996 const struct miniflow *a = &a_->masks;
1997 const struct miniflow *b = &b_->masks;
2000 for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
2001 int ofs = raw_ctz(map);
2002 uint32_t a_u32 = miniflow_get(a, ofs);
2003 uint32_t b_u32 = miniflow_get(b, ofs);
2005 if ((a_u32 & b_u32) != b_u32) {
2013 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
2016 minimask_is_catchall(const struct minimask *mask_)
2018 const struct miniflow *mask = &mask_->masks;
2019 const uint32_t *p = mask->values;
2022 for (map = mask->map; map; map = zero_rightmost_1bit(map)) {