2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
36 #include "openflow/openflow.h"
40 #include "unaligned.h"
42 COVERAGE_DEFINE(flow_extract);
43 COVERAGE_DEFINE(miniflow_malloc);
45 /* U32 indices for segmented flow classification. */
46 const uint8_t flow_segment_u32s[4] = {
47 FLOW_SEGMENT_1_ENDS_AT / 4,
48 FLOW_SEGMENT_2_ENDS_AT / 4,
49 FLOW_SEGMENT_3_ENDS_AT / 4,
53 static struct arp_eth_header *
54 pull_arp(struct ofpbuf *packet)
56 return ofpbuf_try_pull(packet, ARP_ETH_HEADER_LEN);
59 static struct ip_header *
60 pull_ip(struct ofpbuf *packet)
62 if (ofpbuf_size(packet) >= IP_HEADER_LEN) {
63 struct ip_header *ip = ofpbuf_data(packet);
64 int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
65 if (ip_len >= IP_HEADER_LEN && ofpbuf_size(packet) >= ip_len) {
66 return ofpbuf_pull(packet, ip_len);
72 static struct icmp_header *
73 pull_icmp(struct ofpbuf *packet)
75 return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
78 static struct icmp6_hdr *
79 pull_icmpv6(struct ofpbuf *packet)
81 return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr));
85 parse_mpls(struct ofpbuf *b, struct flow *flow)
90 while ((mh = ofpbuf_try_pull(b, sizeof *mh))) {
91 if (idx < FLOW_MAX_MPLS_LABELS) {
92 flow->mpls_lse[idx++] = mh->mpls_lse;
94 if (mh->mpls_lse & htonl(MPLS_BOS_MASK)) {
101 parse_vlan(struct ofpbuf *b, struct flow *flow)
104 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
108 if (ofpbuf_size(b) >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
109 struct qtag_prefix *qp = ofpbuf_pull(b, sizeof *qp);
110 flow->vlan_tci = qp->tci | htons(VLAN_CFI);
115 parse_ethertype(struct ofpbuf *b)
117 struct llc_snap_header *llc;
120 proto = *(ovs_be16 *) ofpbuf_pull(b, sizeof proto);
121 if (ntohs(proto) >= ETH_TYPE_MIN) {
125 if (ofpbuf_size(b) < sizeof *llc) {
126 return htons(FLOW_DL_TYPE_NONE);
129 llc = ofpbuf_data(b);
130 if (llc->llc.llc_dsap != LLC_DSAP_SNAP
131 || llc->llc.llc_ssap != LLC_SSAP_SNAP
132 || llc->llc.llc_cntl != LLC_CNTL_SNAP
133 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
134 sizeof llc->snap.snap_org)) {
135 return htons(FLOW_DL_TYPE_NONE);
138 ofpbuf_pull(b, sizeof *llc);
140 if (ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN) {
141 return llc->snap.snap_type;
144 return htons(FLOW_DL_TYPE_NONE);
148 parse_ipv6(struct ofpbuf *packet, struct flow *flow)
150 const struct ovs_16aligned_ip6_hdr *nh;
154 nh = ofpbuf_try_pull(packet, sizeof *nh);
159 nexthdr = nh->ip6_nxt;
161 memcpy(&flow->ipv6_src, &nh->ip6_src, sizeof flow->ipv6_src);
162 memcpy(&flow->ipv6_dst, &nh->ip6_dst, sizeof flow->ipv6_dst);
164 tc_flow = get_16aligned_be32(&nh->ip6_flow);
165 flow->nw_tos = ntohl(tc_flow) >> 20;
166 flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
167 flow->nw_ttl = nh->ip6_hlim;
168 flow->nw_proto = IPPROTO_NONE;
171 if ((nexthdr != IPPROTO_HOPOPTS)
172 && (nexthdr != IPPROTO_ROUTING)
173 && (nexthdr != IPPROTO_DSTOPTS)
174 && (nexthdr != IPPROTO_AH)
175 && (nexthdr != IPPROTO_FRAGMENT)) {
176 /* It's either a terminal header (e.g., TCP, UDP) or one we
177 * don't understand. In either case, we're done with the
178 * packet, so use it to fill in 'nw_proto'. */
182 /* We only verify that at least 8 bytes of the next header are
183 * available, but many of these headers are longer. Ensure that
184 * accesses within the extension header are within those first 8
185 * bytes. All extension headers are required to be at least 8
187 if (ofpbuf_size(packet) < 8) {
191 if ((nexthdr == IPPROTO_HOPOPTS)
192 || (nexthdr == IPPROTO_ROUTING)
193 || (nexthdr == IPPROTO_DSTOPTS)) {
194 /* These headers, while different, have the fields we care about
195 * in the same location and with the same interpretation. */
196 const struct ip6_ext *ext_hdr = ofpbuf_data(packet);
197 nexthdr = ext_hdr->ip6e_nxt;
198 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) {
201 } else if (nexthdr == IPPROTO_AH) {
202 /* A standard AH definition isn't available, but the fields
203 * we care about are in the same location as the generic
204 * option header--only the header length is calculated
206 const struct ip6_ext *ext_hdr = ofpbuf_data(packet);
207 nexthdr = ext_hdr->ip6e_nxt;
208 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) {
211 } else if (nexthdr == IPPROTO_FRAGMENT) {
212 const struct ovs_16aligned_ip6_frag *frag_hdr = ofpbuf_data(packet);
214 nexthdr = frag_hdr->ip6f_nxt;
215 if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) {
219 /* We only process the first fragment. */
220 if (frag_hdr->ip6f_offlg != htons(0)) {
221 flow->nw_frag = FLOW_NW_FRAG_ANY;
222 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
223 flow->nw_frag |= FLOW_NW_FRAG_LATER;
224 nexthdr = IPPROTO_FRAGMENT;
231 flow->nw_proto = nexthdr;
236 parse_tcp(struct ofpbuf *b, struct flow *flow)
238 if (ofpbuf_size(b) >= TCP_HEADER_LEN) {
239 const struct tcp_header *tcp = ofpbuf_data(b);
241 flow->tp_src = tcp->tcp_src;
242 flow->tp_dst = tcp->tcp_dst;
243 flow->tcp_flags = tcp->tcp_ctl & htons(0x0fff);
248 parse_udp(struct ofpbuf *b, struct flow *flow)
250 if (ofpbuf_size(b) >= UDP_HEADER_LEN) {
251 const struct udp_header *udp = ofpbuf_data(b);
253 flow->tp_src = udp->udp_src;
254 flow->tp_dst = udp->udp_dst;
259 parse_sctp(struct ofpbuf *b, struct flow *flow)
261 if (ofpbuf_size(b) >= SCTP_HEADER_LEN) {
262 const struct sctp_header *sctp = ofpbuf_data(b);
264 flow->tp_src = sctp->sctp_src;
265 flow->tp_dst = sctp->sctp_dst;
270 parse_icmpv6(struct ofpbuf *b, struct flow *flow)
272 const struct icmp6_hdr *icmp = pull_icmpv6(b);
278 /* The ICMPv6 type and code fields use the 16-bit transport port
279 * fields, so we need to store them in 16-bit network byte order. */
280 flow->tp_src = htons(icmp->icmp6_type);
281 flow->tp_dst = htons(icmp->icmp6_code);
283 if (icmp->icmp6_code == 0 &&
284 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
285 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
286 const struct in6_addr *nd_target;
288 nd_target = ofpbuf_try_pull(b, sizeof *nd_target);
292 flow->nd_target = *nd_target;
294 while (ofpbuf_size(b) >= 8) {
295 /* The minimum size of an option is 8 bytes, which also is
296 * the size of Ethernet link-layer options. */
297 const struct nd_opt_hdr *nd_opt = ofpbuf_data(b);
298 int opt_len = nd_opt->nd_opt_len * 8;
300 if (!opt_len || opt_len > ofpbuf_size(b)) {
304 /* Store the link layer address if the appropriate option is
305 * provided. It is considered an error if the same link
306 * layer option is specified twice. */
307 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
309 if (eth_addr_is_zero(flow->arp_sha)) {
310 memcpy(flow->arp_sha, nd_opt + 1, ETH_ADDR_LEN);
314 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
316 if (eth_addr_is_zero(flow->arp_tha)) {
317 memcpy(flow->arp_tha, nd_opt + 1, ETH_ADDR_LEN);
323 if (!ofpbuf_try_pull(b, opt_len)) {
332 memset(&flow->nd_target, 0, sizeof(flow->nd_target));
333 memset(flow->arp_sha, 0, sizeof(flow->arp_sha));
334 memset(flow->arp_tha, 0, sizeof(flow->arp_tha));
339 /* Initializes 'flow' members from 'packet' and 'md'
341 * Initializes 'packet' header l2 pointer to the start of the Ethernet
342 * header, and the layer offsets as follows:
344 * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
345 * when there is no MPLS shim header.
347 * - packet->l3_ofs to just past the Ethernet header, or just past the
348 * vlan_header if one is present, to the first byte of the payload of the
349 * Ethernet frame. UINT16_MAX if the frame is too short to contain an
352 * - packet->l4_ofs to just past the IPv4 header, if one is present and
353 * has at least the content used for the fields of interest for the flow,
354 * otherwise UINT16_MAX.
357 flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
360 struct ofpbuf b = *packet;
361 struct eth_header *eth;
363 COVERAGE_INC(flow_extract);
365 memset(flow, 0, sizeof *flow);
368 flow->tunnel = md->tunnel;
369 flow->in_port = md->in_port;
370 flow->skb_priority = md->skb_priority;
371 flow->pkt_mark = md->pkt_mark;
374 ofpbuf_set_frame(packet, ofpbuf_data(packet));
376 if (ofpbuf_size(&b) < sizeof *eth) {
381 eth = ofpbuf_data(&b);
382 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
383 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
385 /* dl_type, vlan_tci. */
386 ofpbuf_pull(&b, ETH_ADDR_LEN * 2);
387 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
388 parse_vlan(&b, flow);
390 flow->dl_type = parse_ethertype(&b);
392 /* Parse mpls, copy l3 ttl. */
393 if (eth_type_mpls(flow->dl_type)) {
394 ofpbuf_set_l2_5(packet, ofpbuf_data(&b));
395 parse_mpls(&b, flow);
399 ofpbuf_set_l3(packet, ofpbuf_data(&b));
400 if (flow->dl_type == htons(ETH_TYPE_IP)) {
401 const struct ip_header *nh = pull_ip(&b);
403 ofpbuf_set_l4(packet, ofpbuf_data(&b));
405 flow->nw_src = get_16aligned_be32(&nh->ip_src);
406 flow->nw_dst = get_16aligned_be32(&nh->ip_dst);
407 flow->nw_proto = nh->ip_proto;
409 flow->nw_tos = nh->ip_tos;
410 if (IP_IS_FRAGMENT(nh->ip_frag_off)) {
411 flow->nw_frag = FLOW_NW_FRAG_ANY;
412 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
413 flow->nw_frag |= FLOW_NW_FRAG_LATER;
416 flow->nw_ttl = nh->ip_ttl;
418 if (!(nh->ip_frag_off & htons(IP_FRAG_OFF_MASK))) {
419 if (flow->nw_proto == IPPROTO_TCP) {
421 } else if (flow->nw_proto == IPPROTO_UDP) {
423 } else if (flow->nw_proto == IPPROTO_SCTP) {
424 parse_sctp(&b, flow);
425 } else if (flow->nw_proto == IPPROTO_ICMP) {
426 const struct icmp_header *icmp = pull_icmp(&b);
428 flow->tp_src = htons(icmp->icmp_type);
429 flow->tp_dst = htons(icmp->icmp_code);
434 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
435 if (parse_ipv6(&b, flow)) {
439 ofpbuf_set_l4(packet, ofpbuf_data(&b));
440 if (flow->nw_proto == IPPROTO_TCP) {
442 } else if (flow->nw_proto == IPPROTO_UDP) {
444 } else if (flow->nw_proto == IPPROTO_SCTP) {
445 parse_sctp(&b, flow);
446 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
447 parse_icmpv6(&b, flow);
449 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
450 flow->dl_type == htons(ETH_TYPE_RARP)) {
451 const struct arp_eth_header *arp = pull_arp(&b);
452 if (arp && arp->ar_hrd == htons(1)
453 && arp->ar_pro == htons(ETH_TYPE_IP)
454 && arp->ar_hln == ETH_ADDR_LEN
455 && arp->ar_pln == 4) {
456 /* We only match on the lower 8 bits of the opcode. */
457 if (ntohs(arp->ar_op) <= 0xff) {
458 flow->nw_proto = ntohs(arp->ar_op);
461 flow->nw_src = get_16aligned_be32(&arp->ar_spa);
462 flow->nw_dst = get_16aligned_be32(&arp->ar_tpa);
463 memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN);
464 memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN);
469 /* For every bit of a field that is wildcarded in 'wildcards', sets the
470 * corresponding bit in 'flow' to zero. */
472 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
474 uint32_t *flow_u32 = (uint32_t *) flow;
475 const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
478 for (i = 0; i < FLOW_U32S; i++) {
479 flow_u32[i] &= wc_u32[i];
484 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
486 if (flow->nw_proto != IPPROTO_ICMP) {
487 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
488 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
490 wc->masks.tp_src = htons(0xff);
491 wc->masks.tp_dst = htons(0xff);
495 /* Initializes 'fmd' with the metadata found in 'flow'. */
497 flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
499 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 25);
501 fmd->dp_hash = flow->dp_hash;
502 fmd->recirc_id = flow->recirc_id;
503 fmd->tun_id = flow->tunnel.tun_id;
504 fmd->tun_src = flow->tunnel.ip_src;
505 fmd->tun_dst = flow->tunnel.ip_dst;
506 fmd->metadata = flow->metadata;
507 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
508 fmd->pkt_mark = flow->pkt_mark;
509 fmd->in_port = flow->in_port.ofp_port;
513 flow_to_string(const struct flow *flow)
515 struct ds ds = DS_EMPTY_INITIALIZER;
516 flow_format(&ds, flow);
521 flow_tun_flag_to_string(uint32_t flags)
524 case FLOW_TNL_F_DONT_FRAGMENT:
526 case FLOW_TNL_F_CSUM:
536 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
537 uint32_t flags, char del)
545 uint32_t bit = rightmost_1bit(flags);
548 s = bit_to_string(bit);
550 ds_put_format(ds, "%s%c", s, del);
559 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
565 format_flags_masked(struct ds *ds, const char *name,
566 const char *(*bit_to_string)(uint32_t), uint32_t flags,
570 ds_put_format(ds, "%s=", name);
573 uint32_t bit = rightmost_1bit(mask);
574 const char *s = bit_to_string(bit);
576 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
577 s ? s : "[Unknown]");
583 flow_format(struct ds *ds, const struct flow *flow)
587 match_wc_init(&match, flow);
588 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
592 flow_print(FILE *stream, const struct flow *flow)
594 char *s = flow_to_string(flow);
599 /* flow_wildcards functions. */
601 /* Initializes 'wc' as a set of wildcards that matches every packet. */
603 flow_wildcards_init_catchall(struct flow_wildcards *wc)
605 memset(&wc->masks, 0, sizeof wc->masks);
608 /* Clear the metadata and register wildcard masks. They are not packet
611 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
613 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
614 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
617 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
620 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
622 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
625 for (i = 0; i < FLOW_U32S; i++) {
633 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
634 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
635 * in 'src1' or 'src2' or both. */
637 flow_wildcards_and(struct flow_wildcards *dst,
638 const struct flow_wildcards *src1,
639 const struct flow_wildcards *src2)
641 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
642 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
643 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
646 for (i = 0; i < FLOW_U32S; i++) {
647 dst_u32[i] = src1_u32[i] & src2_u32[i];
651 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
652 * is, a bit or a field is wildcarded in 'dst' if it is neither
653 * wildcarded in 'src1' nor 'src2'. */
655 flow_wildcards_or(struct flow_wildcards *dst,
656 const struct flow_wildcards *src1,
657 const struct flow_wildcards *src2)
659 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
660 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
661 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
664 for (i = 0; i < FLOW_U32S; i++) {
665 dst_u32[i] = src1_u32[i] | src2_u32[i];
669 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
670 * fields in 'dst', storing the result in 'dst'. */
672 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
674 uint32_t *dst_u32 = (uint32_t *) dst;
675 const uint32_t *p = src->values;
678 for (map = src->map; map; map = zero_rightmost_1bit(map)) {
679 dst_u32[raw_ctz(map)] |= *p++;
683 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
685 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
686 const struct minimask *mask)
688 flow_union_with_miniflow(&wc->masks, &mask->masks);
692 miniflow_get_map_in_range(const struct miniflow *miniflow,
693 uint8_t start, uint8_t end, unsigned int *offset)
695 uint64_t map = miniflow->map;
699 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
700 *offset = count_1bits(map & msk);
703 if (end < FLOW_U32S) {
704 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
710 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
711 * in range [start, end). */
713 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
714 const struct minimask *mask,
715 uint8_t start, uint8_t end)
717 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
719 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
721 const uint32_t *p = mask->masks.values + offset;
723 for (; map; map = zero_rightmost_1bit(map)) {
724 dst_u32[raw_ctz(map)] |= *p++;
728 /* Returns a hash of the wildcards in 'wc'. */
730 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
732 return flow_hash(&wc->masks, basis);
735 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
738 flow_wildcards_equal(const struct flow_wildcards *a,
739 const struct flow_wildcards *b)
741 return flow_equal(&a->masks, &b->masks);
744 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
745 * 'b', false otherwise. */
747 flow_wildcards_has_extra(const struct flow_wildcards *a,
748 const struct flow_wildcards *b)
750 const uint32_t *a_u32 = (const uint32_t *) &a->masks;
751 const uint32_t *b_u32 = (const uint32_t *) &b->masks;
754 for (i = 0; i < FLOW_U32S; i++) {
755 if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
762 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
763 * in 'wc' do not need to be equal in 'a' and 'b'. */
765 flow_equal_except(const struct flow *a, const struct flow *b,
766 const struct flow_wildcards *wc)
768 const uint32_t *a_u32 = (const uint32_t *) a;
769 const uint32_t *b_u32 = (const uint32_t *) b;
770 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
773 for (i = 0; i < FLOW_U32S; i++) {
774 if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
781 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
782 * (A 0-bit indicates a wildcard bit.) */
784 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
786 wc->masks.regs[idx] = mask;
789 /* Calculates the 5-tuple hash from the given flow. */
791 flow_hash_5tuple(const struct flow *flow, uint32_t basis)
799 hash = mhash_add(basis, (OVS_FORCE uint32_t) flow->nw_src);
800 hash = mhash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
801 hash = mhash_add(hash, ((OVS_FORCE uint32_t) flow->tp_src << 16)
802 | (OVS_FORCE uint32_t) flow->tp_dst);
803 hash = mhash_add(hash, flow->nw_proto);
805 return mhash_finish(hash, 13);
808 /* Hashes 'flow' based on its L2 through L4 protocol information. */
810 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
815 struct in6_addr ipv6_addr;
820 uint8_t eth_addr[ETH_ADDR_LEN];
826 memset(&fields, 0, sizeof fields);
827 for (i = 0; i < ETH_ADDR_LEN; i++) {
828 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
830 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
831 fields.eth_type = flow->dl_type;
833 /* UDP source and destination port are not taken into account because they
834 * will not necessarily be symmetric in a bidirectional flow. */
835 if (fields.eth_type == htons(ETH_TYPE_IP)) {
836 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
837 fields.ip_proto = flow->nw_proto;
838 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
839 fields.tp_port = flow->tp_src ^ flow->tp_dst;
841 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
842 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
843 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
844 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
846 for (i=0; i<16; i++) {
847 ipv6_addr[i] = a[i] ^ b[i];
849 fields.ip_proto = flow->nw_proto;
850 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
851 fields.tp_port = flow->tp_src ^ flow->tp_dst;
854 return jhash_bytes(&fields, sizeof fields, basis);
857 /* Initialize a flow with random fields that matter for nx_hash_fields. */
859 flow_random_hash_fields(struct flow *flow)
861 uint16_t rnd = random_uint16();
863 /* Initialize to all zeros. */
864 memset(flow, 0, sizeof *flow);
866 eth_addr_random(flow->dl_src);
867 eth_addr_random(flow->dl_dst);
869 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
871 /* Make most of the random flows IPv4, some IPv6, and rest random. */
872 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
873 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
875 if (dl_type_is_ip_any(flow->dl_type)) {
876 if (flow->dl_type == htons(ETH_TYPE_IP)) {
877 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
878 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
880 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
881 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
883 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
884 rnd = random_uint16();
885 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
886 rnd < 0xc000 ? IPPROTO_UDP :
887 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
888 if (flow->nw_proto == IPPROTO_TCP ||
889 flow->nw_proto == IPPROTO_UDP ||
890 flow->nw_proto == IPPROTO_SCTP) {
891 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
892 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
897 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
899 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
900 enum nx_hash_fields fields)
903 case NX_HASH_FIELDS_ETH_SRC:
904 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
907 case NX_HASH_FIELDS_SYMMETRIC_L4:
908 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
909 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
910 if (flow->dl_type == htons(ETH_TYPE_IP)) {
911 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
912 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
913 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
914 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
915 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
917 if (is_ip_any(flow)) {
918 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
919 flow_unwildcard_tp_ports(flow, wc);
921 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
929 /* Hashes the portions of 'flow' designated by 'fields'. */
931 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
936 case NX_HASH_FIELDS_ETH_SRC:
937 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
939 case NX_HASH_FIELDS_SYMMETRIC_L4:
940 return flow_hash_symmetric_l4(flow, basis);
946 /* Returns a string representation of 'fields'. */
948 flow_hash_fields_to_str(enum nx_hash_fields fields)
951 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
952 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
953 default: return "<unknown>";
957 /* Returns true if the value of 'fields' is supported. Otherwise false. */
959 flow_hash_fields_valid(enum nx_hash_fields fields)
961 return fields == NX_HASH_FIELDS_ETH_SRC
962 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
965 /* Returns a hash value for the bits of 'flow' that are active based on
966 * 'wc', given 'basis'. */
968 flow_hash_in_wildcards(const struct flow *flow,
969 const struct flow_wildcards *wc, uint32_t basis)
971 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
972 const uint32_t *flow_u32 = (const uint32_t *) flow;
977 for (i = 0; i < FLOW_U32S; i++) {
978 hash = mhash_add(hash, flow_u32[i] & wc_u32[i]);
980 return mhash_finish(hash, 4 * FLOW_U32S);
983 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
984 * OpenFlow 1.0 "dl_vlan" value:
986 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
987 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
988 * 'flow' previously matched packets without a VLAN header).
990 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
991 * without a VLAN tag.
993 * - Other values of 'vid' should not be used. */
995 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
997 if (vid == htons(OFP10_VLAN_NONE)) {
998 flow->vlan_tci = htons(0);
1000 vid &= htons(VLAN_VID_MASK);
1001 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1002 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1006 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1007 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1010 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1012 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1013 flow->vlan_tci &= ~mask;
1014 flow->vlan_tci |= vid & mask;
1017 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1020 * This function has no effect on the VLAN ID that 'flow' matches.
1022 * After calling this function, 'flow' will not match packets without a VLAN
1025 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1028 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1029 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1032 /* Returns the number of MPLS LSEs present in 'flow'
1034 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1035 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1036 * first entry that has the BoS bit set. If no such entry exists then
1037 * the maximum number of LSEs that can be stored in 'flow' is returned.
1040 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1043 wc->masks.dl_type = OVS_BE16_MAX;
1045 if (eth_type_mpls(flow->dl_type)) {
1047 int len = FLOW_MAX_MPLS_LABELS;
1049 for (i = 0; i < len; i++) {
1051 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1053 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1064 /* Returns the number consecutive of MPLS LSEs, starting at the
1065 * innermost LSE, that are common in 'a' and 'b'.
1067 * 'an' must be flow_count_mpls_labels(a).
1068 * 'bn' must be flow_count_mpls_labels(b).
1071 flow_count_common_mpls_labels(const struct flow *a, int an,
1072 const struct flow *b, int bn,
1073 struct flow_wildcards *wc)
1075 int min_n = MIN(an, bn);
1080 int a_last = an - 1;
1081 int b_last = bn - 1;
1084 for (i = 0; i < min_n; i++) {
1086 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
1087 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
1089 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
1100 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
1101 * to 'mpls_eth_type', which must be an MPLS Ethertype.
1103 * If the new label is the first MPLS label in 'flow', it is generated as;
1105 * - label: 2, if 'flow' is IPv6, otherwise 0.
1107 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
1109 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
1113 * If the new label is the second or label MPLS label in 'flow', it is
1116 * - label: Copied from outer label.
1118 * - TTL: Copied from outer label.
1120 * - TC: Copied from outer label.
1124 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
1125 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
1128 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
1129 struct flow_wildcards *wc)
1131 ovs_assert(eth_type_mpls(mpls_eth_type));
1132 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
1134 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1138 for (i = n; i >= 1; i--) {
1139 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
1141 flow->mpls_lse[0] = (flow->mpls_lse[1]
1142 & htonl(~MPLS_BOS_MASK));
1144 int label = 0; /* IPv4 Explicit Null. */
1148 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1152 if (is_ip_any(flow)) {
1153 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1154 wc->masks.nw_tos |= IP_DSCP_MASK;
1159 wc->masks.nw_ttl = 0xff;
1162 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
1164 /* Clear all L3 and L4 fields. */
1165 BUILD_ASSERT(FLOW_WC_SEQ == 25);
1166 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
1167 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
1169 flow->dl_type = mpls_eth_type;
1172 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
1173 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
1176 * 'n' must be flow_count_mpls_labels(flow). */
1178 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
1179 struct flow_wildcards *wc)
1184 /* Nothing to pop. */
1186 } else if (n == FLOW_MAX_MPLS_LABELS
1187 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
1188 /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
1192 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1193 for (i = 1; i < n; i++) {
1194 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
1196 flow->mpls_lse[n - 1] = 0;
1197 flow->dl_type = eth_type;
1201 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
1202 * as an OpenFlow 1.1 "mpls_label" value. */
1204 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
1206 set_mpls_lse_label(&flow->mpls_lse[idx], label);
1209 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
1212 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
1214 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
1217 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
1220 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
1222 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
1225 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
1227 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
1229 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
1232 /* Sets the entire MPLS LSE. */
1234 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
1236 flow->mpls_lse[idx] = lse;
1240 flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
1244 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1245 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1246 if (flow->nw_proto == IPPROTO_TCP) {
1247 struct tcp_header *tcp;
1249 l4_len = sizeof *tcp;
1250 tcp = ofpbuf_put_zeros(b, l4_len);
1251 tcp->tcp_src = flow->tp_src;
1252 tcp->tcp_dst = flow->tp_dst;
1253 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
1254 } else if (flow->nw_proto == IPPROTO_UDP) {
1255 struct udp_header *udp;
1257 l4_len = sizeof *udp;
1258 udp = ofpbuf_put_zeros(b, l4_len);
1259 udp->udp_src = flow->tp_src;
1260 udp->udp_dst = flow->tp_dst;
1261 } else if (flow->nw_proto == IPPROTO_SCTP) {
1262 struct sctp_header *sctp;
1264 l4_len = sizeof *sctp;
1265 sctp = ofpbuf_put_zeros(b, l4_len);
1266 sctp->sctp_src = flow->tp_src;
1267 sctp->sctp_dst = flow->tp_dst;
1268 } else if (flow->nw_proto == IPPROTO_ICMP) {
1269 struct icmp_header *icmp;
1271 l4_len = sizeof *icmp;
1272 icmp = ofpbuf_put_zeros(b, l4_len);
1273 icmp->icmp_type = ntohs(flow->tp_src);
1274 icmp->icmp_code = ntohs(flow->tp_dst);
1275 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1276 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1277 struct icmp6_hdr *icmp;
1279 l4_len = sizeof *icmp;
1280 icmp = ofpbuf_put_zeros(b, l4_len);
1281 icmp->icmp6_type = ntohs(flow->tp_src);
1282 icmp->icmp6_code = ntohs(flow->tp_dst);
1284 if (icmp->icmp6_code == 0 &&
1285 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
1286 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
1287 struct in6_addr *nd_target;
1288 struct nd_opt_hdr *nd_opt;
1290 l4_len += sizeof *nd_target;
1291 nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
1292 *nd_target = flow->nd_target;
1294 if (!eth_addr_is_zero(flow->arp_sha)) {
1296 nd_opt = ofpbuf_put_zeros(b, 8);
1297 nd_opt->nd_opt_len = 1;
1298 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
1299 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
1301 if (!eth_addr_is_zero(flow->arp_tha)) {
1303 nd_opt = ofpbuf_put_zeros(b, 8);
1304 nd_opt->nd_opt_len = 1;
1305 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
1306 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
1309 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
1310 csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
1316 /* Puts into 'b' a packet that flow_extract() would parse as having the given
1319 * (This is useful only for testing, obviously, and the packet isn't really
1320 * valid. It hasn't got some checksums filled in, for one, and lots of fields
1321 * are just zeroed.) */
1323 flow_compose(struct ofpbuf *b, const struct flow *flow)
1327 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
1328 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1329 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1330 struct eth_header *eth = ofpbuf_l2(b);
1331 eth->eth_type = htons(ofpbuf_size(b));
1335 if (flow->vlan_tci & htons(VLAN_CFI)) {
1336 eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
1339 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1340 struct ip_header *ip;
1342 ip = ofpbuf_put_zeros(b, sizeof *ip);
1343 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1344 ip->ip_tos = flow->nw_tos;
1345 ip->ip_ttl = flow->nw_ttl;
1346 ip->ip_proto = flow->nw_proto;
1347 put_16aligned_be32(&ip->ip_src, flow->nw_src);
1348 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
1350 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
1351 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
1352 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
1353 ip->ip_frag_off |= htons(100);
1357 ofpbuf_set_l4(b, ofpbuf_tail(b));
1359 l4_len = flow_compose_l4(b, flow);
1361 ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
1362 ip->ip_csum = csum(ip, sizeof *ip);
1363 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1364 struct ovs_16aligned_ip6_hdr *nh;
1366 nh = ofpbuf_put_zeros(b, sizeof *nh);
1367 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
1368 htonl(flow->nw_tos << 20) | flow->ipv6_label);
1369 nh->ip6_hlim = flow->nw_ttl;
1370 nh->ip6_nxt = flow->nw_proto;
1372 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
1373 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
1375 ofpbuf_set_l4(b, ofpbuf_tail(b));
1377 l4_len = flow_compose_l4(b, flow);
1379 nh->ip6_plen = htons(l4_len);
1380 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1381 flow->dl_type == htons(ETH_TYPE_RARP)) {
1382 struct arp_eth_header *arp;
1384 arp = ofpbuf_put_zeros(b, sizeof *arp);
1385 ofpbuf_set_l3(b, arp);
1386 arp->ar_hrd = htons(1);
1387 arp->ar_pro = htons(ETH_TYPE_IP);
1388 arp->ar_hln = ETH_ADDR_LEN;
1390 arp->ar_op = htons(flow->nw_proto);
1392 if (flow->nw_proto == ARP_OP_REQUEST ||
1393 flow->nw_proto == ARP_OP_REPLY) {
1394 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
1395 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
1396 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1397 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1401 if (eth_type_mpls(flow->dl_type)) {
1404 b->l2_5_ofs = b->l3_ofs;
1405 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
1406 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
1411 push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
1416 /* Compressed flow. */
1419 miniflow_n_values(const struct miniflow *flow)
1421 return count_1bits(flow->map);
1425 miniflow_alloc_values(struct miniflow *flow, int n)
1427 if (n <= MINI_N_INLINE) {
1428 return flow->inline_values;
1430 COVERAGE_INC(miniflow_malloc);
1431 return xmalloc(n * sizeof *flow->values);
1435 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
1436 * the caller. The caller must have already initialized 'dst->map' properly
1437 * to indicate the significant uint32_t elements of 'src'. 'n' must be the
1438 * number of 1-bits in 'dst->map'.
1440 * Normally the significant elements are the ones that are non-zero. However,
1441 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
1442 * so that the flow and mask always have the same maps.
1444 * This function initializes 'dst->values' (either inline if possible or with
1445 * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
1446 * 'dst->map' into it. */
1448 miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
1450 const uint32_t *src_u32 = (const uint32_t *) src;
1454 dst->values = miniflow_alloc_values(dst, n);
1456 for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
1457 dst->values[ofs++] = src_u32[raw_ctz(map)];
1461 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1462 * with miniflow_destroy(). */
1464 miniflow_init(struct miniflow *dst, const struct flow *src)
1466 const uint32_t *src_u32 = (const uint32_t *) src;
1470 /* Initialize dst->map, counting the number of nonzero elements. */
1474 for (i = 0; i < FLOW_U32S; i++) {
1476 dst->map |= UINT64_C(1) << i;
1481 miniflow_init__(dst, src, n);
1484 /* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
1485 * caller must eventually free 'dst' with miniflow_destroy(). */
1487 miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
1488 const struct minimask *mask)
1490 dst->map = mask->masks.map;
1491 miniflow_init__(dst, src, miniflow_n_values(dst));
1494 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1495 * with miniflow_destroy(). */
1497 miniflow_clone(struct miniflow *dst, const struct miniflow *src)
1499 int n = miniflow_n_values(src);
1500 dst->map = src->map;
1501 dst->values = miniflow_alloc_values(dst, n);
1502 memcpy(dst->values, src->values, n * sizeof *dst->values);
1505 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1506 * The caller must eventually free 'dst' with miniflow_destroy(). */
1508 miniflow_move(struct miniflow *dst, struct miniflow *src)
1510 if (src->values == src->inline_values) {
1511 dst->values = dst->inline_values;
1512 memcpy(dst->values, src->values,
1513 miniflow_n_values(src) * sizeof *dst->values);
1515 dst->values = src->values;
1517 dst->map = src->map;
1520 /* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
1521 * itself resides; the caller is responsible for that. */
1523 miniflow_destroy(struct miniflow *flow)
1525 if (flow->values != flow->inline_values) {
1530 /* Initializes 'dst' as a copy of 'src'. */
1532 miniflow_expand(const struct miniflow *src, struct flow *dst)
1534 memset(dst, 0, sizeof *dst);
1535 flow_union_with_miniflow(dst, src);
1538 static const uint32_t *
1539 miniflow_get__(const struct miniflow *flow, unsigned int u32_ofs)
1541 if (!(flow->map & (UINT64_C(1) << u32_ofs))) {
1542 static const uint32_t zero = 0;
1545 return flow->values +
1546 count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1));
1549 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
1550 * were expanded into a "struct flow". */
1552 miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
1554 return *miniflow_get__(flow, u32_ofs);
1557 /* Returns the ovs_be16 that would be at byte offset 'u8_ofs' if 'flow' were
1558 * expanded into a "struct flow". */
1560 miniflow_get_be16(const struct miniflow *flow, unsigned int u8_ofs)
1562 const uint32_t *u32p = miniflow_get__(flow, u8_ofs / 4);
1563 const ovs_be16 *be16p = (const ovs_be16 *) u32p;
1564 return be16p[u8_ofs % 4 != 0];
1567 /* Returns the VID within the vlan_tci member of the "struct flow" represented
1570 miniflow_get_vid(const struct miniflow *flow)
1572 ovs_be16 tci = miniflow_get_be16(flow, offsetof(struct flow, vlan_tci));
1573 return vlan_tci_to_vid(tci);
1576 /* Returns true if 'a' and 'b' are the same flow, false otherwise. */
1578 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
1580 const uint32_t *ap = a->values;
1581 const uint32_t *bp = b->values;
1582 const uint64_t a_map = a->map;
1583 const uint64_t b_map = b->map;
1586 if (a_map == b_map) {
1587 for (map = a_map; map; map = zero_rightmost_1bit(map)) {
1588 if (*ap++ != *bp++) {
1593 for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
1594 uint64_t bit = rightmost_1bit(map);
1595 uint64_t a_value = a_map & bit ? *ap++ : 0;
1596 uint64_t b_value = b_map & bit ? *bp++ : 0;
1598 if (a_value != b_value) {
1607 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1608 * in 'mask', false if they differ. */
1610 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
1611 const struct minimask *mask)
1616 p = mask->masks.values;
1618 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1619 int ofs = raw_ctz(map);
1621 if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
1630 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1631 * in 'mask', false if they differ. */
1633 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
1634 const struct minimask *mask)
1636 const uint32_t *b_u32 = (const uint32_t *) b;
1640 p = mask->masks.values;
1642 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1643 int ofs = raw_ctz(map);
1645 if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
1654 /* Returns a hash value for 'flow', given 'basis'. */
1656 miniflow_hash(const struct miniflow *flow, uint32_t basis)
1658 const uint32_t *p = flow->values;
1659 uint32_t hash = basis;
1660 uint64_t hash_map = 0;
1663 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
1665 hash = mhash_add(hash, *p);
1666 hash_map |= rightmost_1bit(map);
1670 hash = mhash_add(hash, hash_map);
1671 hash = mhash_add(hash, hash_map >> 32);
1673 return mhash_finish(hash, p - flow->values);
1676 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1677 * 'mask', given 'basis'.
1679 * The hash values returned by this function are the same as those returned by
1680 * flow_hash_in_minimask(), only the form of the arguments differ. */
1682 miniflow_hash_in_minimask(const struct miniflow *flow,
1683 const struct minimask *mask, uint32_t basis)
1685 const uint32_t *p = mask->masks.values;
1691 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1692 hash = mhash_add(hash, miniflow_get(flow, raw_ctz(map)) & *p++);
1695 return mhash_finish(hash, (p - mask->masks.values) * 4);
1698 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1699 * 'mask', given 'basis'.
1701 * The hash values returned by this function are the same as those returned by
1702 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
1704 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
1707 const uint32_t *flow_u32 = (const uint32_t *)flow;
1708 const uint32_t *p = mask->masks.values;
1713 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1714 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1717 return mhash_finish(hash, (p - mask->masks.values) * 4);
1720 /* Returns a hash value for the bits of range [start, end) in 'flow',
1721 * where there are 1-bits in 'mask', given 'hash'.
1723 * The hash values returned by this function are the same as those returned by
1724 * minimatch_hash_range(), only the form of the arguments differ. */
1726 flow_hash_in_minimask_range(const struct flow *flow,
1727 const struct minimask *mask,
1728 uint8_t start, uint8_t end, uint32_t *basis)
1730 const uint32_t *flow_u32 = (const uint32_t *)flow;
1731 unsigned int offset;
1732 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
1734 const uint32_t *p = mask->masks.values + offset;
1735 uint32_t hash = *basis;
1737 for (; map; map = zero_rightmost_1bit(map)) {
1738 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1741 *basis = hash; /* Allow continuation from the unfinished value. */
1742 return mhash_finish(hash, (p - mask->masks.values) * 4);
1746 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1747 * with minimask_destroy(). */
1749 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
1751 miniflow_init(&mask->masks, &wc->masks);
1754 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1755 * with minimask_destroy(). */
1757 minimask_clone(struct minimask *dst, const struct minimask *src)
1759 miniflow_clone(&dst->masks, &src->masks);
1762 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1763 * The caller must eventually free 'dst' with minimask_destroy(). */
1765 minimask_move(struct minimask *dst, struct minimask *src)
1767 miniflow_move(&dst->masks, &src->masks);
1770 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
1772 * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
1773 * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
1775 minimask_combine(struct minimask *dst_,
1776 const struct minimask *a_, const struct minimask *b_,
1777 uint32_t storage[FLOW_U32S])
1779 struct miniflow *dst = &dst_->masks;
1780 const struct miniflow *a = &a_->masks;
1781 const struct miniflow *b = &b_->masks;
1785 dst->values = storage;
1788 for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
1789 int ofs = raw_ctz(map);
1790 uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
1793 dst->map |= rightmost_1bit(map);
1794 dst->values[n++] = mask;
1799 /* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
1800 * itself resides; the caller is responsible for that. */
1802 minimask_destroy(struct minimask *mask)
1804 miniflow_destroy(&mask->masks);
1807 /* Initializes 'dst' as a copy of 'src'. */
1809 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
1811 miniflow_expand(&mask->masks, &wc->masks);
1814 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
1815 * were expanded into a "struct flow_wildcards". */
1817 minimask_get(const struct minimask *mask, unsigned int u32_ofs)
1819 return miniflow_get(&mask->masks, u32_ofs);
1822 /* Returns the VID mask within the vlan_tci member of the "struct
1823 * flow_wildcards" represented by 'mask'. */
1825 minimask_get_vid_mask(const struct minimask *mask)
1827 return miniflow_get_vid(&mask->masks);
1830 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise. */
1832 minimask_equal(const struct minimask *a, const struct minimask *b)
1834 return miniflow_equal(&a->masks, &b->masks);
1837 /* Returns a hash value for 'mask', given 'basis'. */
1839 minimask_hash(const struct minimask *mask, uint32_t basis)
1841 return miniflow_hash(&mask->masks, basis);
1844 /* Returns true if at least one bit is wildcarded in 'a_' but not in 'b_',
1845 * false otherwise. */
1847 minimask_has_extra(const struct minimask *a_, const struct minimask *b_)
1849 const struct miniflow *a = &a_->masks;
1850 const struct miniflow *b = &b_->masks;
1853 for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
1854 int ofs = raw_ctz(map);
1855 uint32_t a_u32 = miniflow_get(a, ofs);
1856 uint32_t b_u32 = miniflow_get(b, ofs);
1858 if ((a_u32 & b_u32) != b_u32) {
1866 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
1869 minimask_is_catchall(const struct minimask *mask_)
1871 const struct miniflow *mask = &mask_->masks;
1872 const uint32_t *p = mask->values;
1875 for (map = mask->map; map; map = zero_rightmost_1bit(map)) {