2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <sys/types.h>
22 #include <netinet/in.h>
23 #include <netinet/icmp6.h>
24 #include <netinet/ip6.h>
28 #include "byte-order.h"
31 #include "dynamic-string.h"
36 #include "openflow/openflow.h"
39 #include "unaligned.h"
41 COVERAGE_DEFINE(flow_extract);
42 COVERAGE_DEFINE(miniflow_malloc);
44 /* U32 indices for segmented flow classification. */
45 const uint8_t flow_segment_u32s[4] = {
46 FLOW_SEGMENT_1_ENDS_AT / 4,
47 FLOW_SEGMENT_2_ENDS_AT / 4,
48 FLOW_SEGMENT_3_ENDS_AT / 4,
52 static struct arp_eth_header *
53 pull_arp(struct ofpbuf *packet)
55 return ofpbuf_try_pull(packet, ARP_ETH_HEADER_LEN);
58 static struct ip_header *
59 pull_ip(struct ofpbuf *packet)
61 if (packet->size >= IP_HEADER_LEN) {
62 struct ip_header *ip = packet->data;
63 int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
64 if (ip_len >= IP_HEADER_LEN && packet->size >= ip_len) {
65 return ofpbuf_pull(packet, ip_len);
71 static struct tcp_header *
72 pull_tcp(struct ofpbuf *packet)
74 if (packet->size >= TCP_HEADER_LEN) {
75 struct tcp_header *tcp = packet->data;
76 int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
77 if (tcp_len >= TCP_HEADER_LEN && packet->size >= tcp_len) {
78 return ofpbuf_pull(packet, tcp_len);
84 static struct udp_header *
85 pull_udp(struct ofpbuf *packet)
87 return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
90 static struct sctp_header *
91 pull_sctp(struct ofpbuf *packet)
93 return ofpbuf_try_pull(packet, SCTP_HEADER_LEN);
96 static struct icmp_header *
97 pull_icmp(struct ofpbuf *packet)
99 return ofpbuf_try_pull(packet, ICMP_HEADER_LEN);
102 static struct icmp6_hdr *
103 pull_icmpv6(struct ofpbuf *packet)
105 return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr));
109 parse_mpls(struct ofpbuf *b, struct flow *flow)
114 while ((mh = ofpbuf_try_pull(b, sizeof *mh))) {
115 if (idx < FLOW_MAX_MPLS_LABELS) {
116 flow->mpls_lse[idx++] = mh->mpls_lse;
118 if (mh->mpls_lse & htonl(MPLS_BOS_MASK)) {
125 parse_vlan(struct ofpbuf *b, struct flow *flow)
128 ovs_be16 eth_type; /* ETH_TYPE_VLAN */
132 if (b->size >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
133 struct qtag_prefix *qp = ofpbuf_pull(b, sizeof *qp);
134 flow->vlan_tci = qp->tci | htons(VLAN_CFI);
139 parse_ethertype(struct ofpbuf *b)
141 struct llc_snap_header *llc;
144 proto = *(ovs_be16 *) ofpbuf_pull(b, sizeof proto);
145 if (ntohs(proto) >= ETH_TYPE_MIN) {
149 if (b->size < sizeof *llc) {
150 return htons(FLOW_DL_TYPE_NONE);
154 if (llc->llc.llc_dsap != LLC_DSAP_SNAP
155 || llc->llc.llc_ssap != LLC_SSAP_SNAP
156 || llc->llc.llc_cntl != LLC_CNTL_SNAP
157 || memcmp(llc->snap.snap_org, SNAP_ORG_ETHERNET,
158 sizeof llc->snap.snap_org)) {
159 return htons(FLOW_DL_TYPE_NONE);
162 ofpbuf_pull(b, sizeof *llc);
164 if (ntohs(llc->snap.snap_type) >= ETH_TYPE_MIN) {
165 return llc->snap.snap_type;
168 return htons(FLOW_DL_TYPE_NONE);
172 parse_ipv6(struct ofpbuf *packet, struct flow *flow)
174 const struct ovs_16aligned_ip6_hdr *nh;
178 nh = ofpbuf_try_pull(packet, sizeof *nh);
183 nexthdr = nh->ip6_nxt;
185 memcpy(&flow->ipv6_src, &nh->ip6_src, sizeof flow->ipv6_src);
186 memcpy(&flow->ipv6_dst, &nh->ip6_dst, sizeof flow->ipv6_dst);
188 tc_flow = get_16aligned_be32(&nh->ip6_flow);
189 flow->nw_tos = ntohl(tc_flow) >> 20;
190 flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
191 flow->nw_ttl = nh->ip6_hlim;
192 flow->nw_proto = IPPROTO_NONE;
195 if ((nexthdr != IPPROTO_HOPOPTS)
196 && (nexthdr != IPPROTO_ROUTING)
197 && (nexthdr != IPPROTO_DSTOPTS)
198 && (nexthdr != IPPROTO_AH)
199 && (nexthdr != IPPROTO_FRAGMENT)) {
200 /* It's either a terminal header (e.g., TCP, UDP) or one we
201 * don't understand. In either case, we're done with the
202 * packet, so use it to fill in 'nw_proto'. */
206 /* We only verify that at least 8 bytes of the next header are
207 * available, but many of these headers are longer. Ensure that
208 * accesses within the extension header are within those first 8
209 * bytes. All extension headers are required to be at least 8
211 if (packet->size < 8) {
215 if ((nexthdr == IPPROTO_HOPOPTS)
216 || (nexthdr == IPPROTO_ROUTING)
217 || (nexthdr == IPPROTO_DSTOPTS)) {
218 /* These headers, while different, have the fields we care about
219 * in the same location and with the same interpretation. */
220 const struct ip6_ext *ext_hdr = packet->data;
221 nexthdr = ext_hdr->ip6e_nxt;
222 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) {
225 } else if (nexthdr == IPPROTO_AH) {
226 /* A standard AH definition isn't available, but the fields
227 * we care about are in the same location as the generic
228 * option header--only the header length is calculated
230 const struct ip6_ext *ext_hdr = packet->data;
231 nexthdr = ext_hdr->ip6e_nxt;
232 if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) {
235 } else if (nexthdr == IPPROTO_FRAGMENT) {
236 const struct ovs_16aligned_ip6_frag *frag_hdr = packet->data;
238 nexthdr = frag_hdr->ip6f_nxt;
239 if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) {
243 /* We only process the first fragment. */
244 if (frag_hdr->ip6f_offlg != htons(0)) {
245 flow->nw_frag = FLOW_NW_FRAG_ANY;
246 if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) {
247 flow->nw_frag |= FLOW_NW_FRAG_LATER;
248 nexthdr = IPPROTO_FRAGMENT;
255 flow->nw_proto = nexthdr;
260 parse_tcp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
262 const struct tcp_header *tcp = pull_tcp(b);
264 flow->tp_src = tcp->tcp_src;
265 flow->tp_dst = tcp->tcp_dst;
266 flow->tcp_flags = tcp->tcp_ctl & htons(0x0fff);
267 packet->l7 = b->data;
272 parse_udp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
274 const struct udp_header *udp = pull_udp(b);
276 flow->tp_src = udp->udp_src;
277 flow->tp_dst = udp->udp_dst;
278 packet->l7 = b->data;
283 parse_sctp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
285 const struct sctp_header *sctp = pull_sctp(b);
287 flow->tp_src = sctp->sctp_src;
288 flow->tp_dst = sctp->sctp_dst;
289 packet->l7 = b->data;
294 parse_icmpv6(struct ofpbuf *b, struct flow *flow)
296 const struct icmp6_hdr *icmp = pull_icmpv6(b);
302 /* The ICMPv6 type and code fields use the 16-bit transport port
303 * fields, so we need to store them in 16-bit network byte order. */
304 flow->tp_src = htons(icmp->icmp6_type);
305 flow->tp_dst = htons(icmp->icmp6_code);
307 if (icmp->icmp6_code == 0 &&
308 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
309 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
310 const struct in6_addr *nd_target;
312 nd_target = ofpbuf_try_pull(b, sizeof *nd_target);
316 flow->nd_target = *nd_target;
318 while (b->size >= 8) {
319 /* The minimum size of an option is 8 bytes, which also is
320 * the size of Ethernet link-layer options. */
321 const struct nd_opt_hdr *nd_opt = b->data;
322 int opt_len = nd_opt->nd_opt_len * 8;
324 if (!opt_len || opt_len > b->size) {
328 /* Store the link layer address if the appropriate option is
329 * provided. It is considered an error if the same link
330 * layer option is specified twice. */
331 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
333 if (eth_addr_is_zero(flow->arp_sha)) {
334 memcpy(flow->arp_sha, nd_opt + 1, ETH_ADDR_LEN);
338 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
340 if (eth_addr_is_zero(flow->arp_tha)) {
341 memcpy(flow->arp_tha, nd_opt + 1, ETH_ADDR_LEN);
347 if (!ofpbuf_try_pull(b, opt_len)) {
356 memset(&flow->nd_target, 0, sizeof(flow->nd_target));
357 memset(flow->arp_sha, 0, sizeof(flow->arp_sha));
358 memset(flow->arp_tha, 0, sizeof(flow->arp_tha));
364 /* Initializes 'flow' members from 'packet', 'skb_priority', 'tnl', and
367 * Initializes 'packet' header pointers as follows:
369 * - packet->l2 to the start of the Ethernet header.
371 * - packet->l2_5 to the start of the MPLS shim header.
373 * - packet->l3 to just past the Ethernet header, or just past the
374 * vlan_header if one is present, to the first byte of the payload of the
377 * - packet->l4 to just past the IPv4 header, if one is present and has a
378 * correct length, and otherwise NULL.
380 * - packet->l7 to just past the TCP/UDP/SCTP/ICMP header, if one is
381 * present and has a correct length, and otherwise NULL.
384 flow_extract(struct ofpbuf *packet, uint32_t skb_priority, uint32_t pkt_mark,
385 const struct flow_tnl *tnl, const union flow_in_port *in_port,
388 struct ofpbuf b = *packet;
389 struct eth_header *eth;
391 COVERAGE_INC(flow_extract);
393 memset(flow, 0, sizeof *flow);
396 ovs_assert(tnl != &flow->tunnel);
400 flow->in_port = *in_port;
402 flow->skb_priority = skb_priority;
403 flow->pkt_mark = pkt_mark;
411 if (b.size < sizeof *eth) {
417 memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
418 memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
420 /* dl_type, vlan_tci. */
421 ofpbuf_pull(&b, ETH_ADDR_LEN * 2);
422 if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
423 parse_vlan(&b, flow);
425 flow->dl_type = parse_ethertype(&b);
427 /* Parse mpls, copy l3 ttl. */
428 if (eth_type_mpls(flow->dl_type)) {
429 packet->l2_5 = b.data;
430 parse_mpls(&b, flow);
435 if (flow->dl_type == htons(ETH_TYPE_IP)) {
436 const struct ip_header *nh = pull_ip(&b);
440 flow->nw_src = get_16aligned_be32(&nh->ip_src);
441 flow->nw_dst = get_16aligned_be32(&nh->ip_dst);
442 flow->nw_proto = nh->ip_proto;
444 flow->nw_tos = nh->ip_tos;
445 if (IP_IS_FRAGMENT(nh->ip_frag_off)) {
446 flow->nw_frag = FLOW_NW_FRAG_ANY;
447 if (nh->ip_frag_off & htons(IP_FRAG_OFF_MASK)) {
448 flow->nw_frag |= FLOW_NW_FRAG_LATER;
451 flow->nw_ttl = nh->ip_ttl;
453 if (!(nh->ip_frag_off & htons(IP_FRAG_OFF_MASK))) {
454 if (flow->nw_proto == IPPROTO_TCP) {
455 parse_tcp(packet, &b, flow);
456 } else if (flow->nw_proto == IPPROTO_UDP) {
457 parse_udp(packet, &b, flow);
458 } else if (flow->nw_proto == IPPROTO_SCTP) {
459 parse_sctp(packet, &b, flow);
460 } else if (flow->nw_proto == IPPROTO_ICMP) {
461 const struct icmp_header *icmp = pull_icmp(&b);
463 flow->tp_src = htons(icmp->icmp_type);
464 flow->tp_dst = htons(icmp->icmp_code);
470 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
471 if (parse_ipv6(&b, flow)) {
476 if (flow->nw_proto == IPPROTO_TCP) {
477 parse_tcp(packet, &b, flow);
478 } else if (flow->nw_proto == IPPROTO_UDP) {
479 parse_udp(packet, &b, flow);
480 } else if (flow->nw_proto == IPPROTO_SCTP) {
481 parse_sctp(packet, &b, flow);
482 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
483 if (parse_icmpv6(&b, flow)) {
487 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
488 flow->dl_type == htons(ETH_TYPE_RARP)) {
489 const struct arp_eth_header *arp = pull_arp(&b);
490 if (arp && arp->ar_hrd == htons(1)
491 && arp->ar_pro == htons(ETH_TYPE_IP)
492 && arp->ar_hln == ETH_ADDR_LEN
493 && arp->ar_pln == 4) {
494 /* We only match on the lower 8 bits of the opcode. */
495 if (ntohs(arp->ar_op) <= 0xff) {
496 flow->nw_proto = ntohs(arp->ar_op);
499 flow->nw_src = get_16aligned_be32(&arp->ar_spa);
500 flow->nw_dst = get_16aligned_be32(&arp->ar_tpa);
501 memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN);
502 memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN);
507 /* For every bit of a field that is wildcarded in 'wildcards', sets the
508 * corresponding bit in 'flow' to zero. */
510 flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
512 uint32_t *flow_u32 = (uint32_t *) flow;
513 const uint32_t *wc_u32 = (const uint32_t *) &wildcards->masks;
516 for (i = 0; i < FLOW_U32S; i++) {
517 flow_u32[i] &= wc_u32[i];
522 flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
524 if (flow->nw_proto != IPPROTO_ICMP) {
525 memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
526 memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
528 wc->masks.tp_src = htons(0xff);
529 wc->masks.tp_dst = htons(0xff);
533 /* Initializes 'fmd' with the metadata found in 'flow'. */
535 flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
537 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 24);
539 fmd->tun_id = flow->tunnel.tun_id;
540 fmd->tun_src = flow->tunnel.ip_src;
541 fmd->tun_dst = flow->tunnel.ip_dst;
542 fmd->metadata = flow->metadata;
543 memcpy(fmd->regs, flow->regs, sizeof fmd->regs);
544 fmd->pkt_mark = flow->pkt_mark;
545 fmd->in_port = flow->in_port.ofp_port;
549 flow_to_string(const struct flow *flow)
551 struct ds ds = DS_EMPTY_INITIALIZER;
552 flow_format(&ds, flow);
557 flow_tun_flag_to_string(uint32_t flags)
560 case FLOW_TNL_F_DONT_FRAGMENT:
562 case FLOW_TNL_F_CSUM:
572 format_flags(struct ds *ds, const char *(*bit_to_string)(uint32_t),
573 uint32_t flags, char del)
581 uint32_t bit = rightmost_1bit(flags);
584 s = bit_to_string(bit);
586 ds_put_format(ds, "%s%c", s, del);
595 ds_put_format(ds, "0x%"PRIx32"%c", bad, del);
601 format_flags_masked(struct ds *ds, const char *name,
602 const char *(*bit_to_string)(uint32_t), uint32_t flags,
606 ds_put_format(ds, "%s=", name);
609 uint32_t bit = rightmost_1bit(mask);
610 const char *s = bit_to_string(bit);
612 ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
613 s ? s : "[Unknown]");
619 flow_format(struct ds *ds, const struct flow *flow)
623 match_wc_init(&match, flow);
624 match_format(&match, ds, OFP_DEFAULT_PRIORITY);
628 flow_print(FILE *stream, const struct flow *flow)
630 char *s = flow_to_string(flow);
635 /* flow_wildcards functions. */
637 /* Initializes 'wc' as a set of wildcards that matches every packet. */
639 flow_wildcards_init_catchall(struct flow_wildcards *wc)
641 memset(&wc->masks, 0, sizeof wc->masks);
644 /* Clear the metadata and register wildcard masks. They are not packet
647 flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
649 memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
650 memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
653 /* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
656 flow_wildcards_is_catchall(const struct flow_wildcards *wc)
658 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
661 for (i = 0; i < FLOW_U32S; i++) {
669 /* Sets 'dst' as the bitwise AND of wildcards in 'src1' and 'src2'.
670 * That is, a bit or a field is wildcarded in 'dst' if it is wildcarded
671 * in 'src1' or 'src2' or both. */
673 flow_wildcards_and(struct flow_wildcards *dst,
674 const struct flow_wildcards *src1,
675 const struct flow_wildcards *src2)
677 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
678 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
679 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
682 for (i = 0; i < FLOW_U32S; i++) {
683 dst_u32[i] = src1_u32[i] & src2_u32[i];
687 /* Sets 'dst' as the bitwise OR of wildcards in 'src1' and 'src2'. That
688 * is, a bit or a field is wildcarded in 'dst' if it is neither
689 * wildcarded in 'src1' nor 'src2'. */
691 flow_wildcards_or(struct flow_wildcards *dst,
692 const struct flow_wildcards *src1,
693 const struct flow_wildcards *src2)
695 uint32_t *dst_u32 = (uint32_t *) &dst->masks;
696 const uint32_t *src1_u32 = (const uint32_t *) &src1->masks;
697 const uint32_t *src2_u32 = (const uint32_t *) &src2->masks;
700 for (i = 0; i < FLOW_U32S; i++) {
701 dst_u32[i] = src1_u32[i] | src2_u32[i];
705 /* Perform a bitwise OR of miniflow 'src' flow data with the equivalent
706 * fields in 'dst', storing the result in 'dst'. */
708 flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
710 uint32_t *dst_u32 = (uint32_t *) dst;
711 const uint32_t *p = src->values;
714 for (map = src->map; map; map = zero_rightmost_1bit(map)) {
715 dst_u32[raw_ctz(map)] |= *p++;
719 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
721 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
722 const struct minimask *mask)
724 flow_union_with_miniflow(&wc->masks, &mask->masks);
728 miniflow_get_map_in_range(const struct miniflow *miniflow,
729 uint8_t start, uint8_t end, unsigned int *offset)
731 uint64_t map = miniflow->map;
735 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
736 *offset = count_1bits(map & msk);
739 if (end < FLOW_U32S) {
740 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
746 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
747 * in range [start, end). */
749 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
750 const struct minimask *mask,
751 uint8_t start, uint8_t end)
753 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
755 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
757 const uint32_t *p = mask->masks.values + offset;
759 for (; map; map = zero_rightmost_1bit(map)) {
760 dst_u32[raw_ctz(map)] |= *p++;
764 /* Returns a hash of the wildcards in 'wc'. */
766 flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
768 return flow_hash(&wc->masks, basis);
771 /* Returns true if 'a' and 'b' represent the same wildcards, false if they are
774 flow_wildcards_equal(const struct flow_wildcards *a,
775 const struct flow_wildcards *b)
777 return flow_equal(&a->masks, &b->masks);
780 /* Returns true if at least one bit or field is wildcarded in 'a' but not in
781 * 'b', false otherwise. */
783 flow_wildcards_has_extra(const struct flow_wildcards *a,
784 const struct flow_wildcards *b)
786 const uint32_t *a_u32 = (const uint32_t *) &a->masks;
787 const uint32_t *b_u32 = (const uint32_t *) &b->masks;
790 for (i = 0; i < FLOW_U32S; i++) {
791 if ((a_u32[i] & b_u32[i]) != b_u32[i]) {
798 /* Returns true if 'a' and 'b' are equal, except that 0-bits (wildcarded bits)
799 * in 'wc' do not need to be equal in 'a' and 'b'. */
801 flow_equal_except(const struct flow *a, const struct flow *b,
802 const struct flow_wildcards *wc)
804 const uint32_t *a_u32 = (const uint32_t *) a;
805 const uint32_t *b_u32 = (const uint32_t *) b;
806 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
809 for (i = 0; i < FLOW_U32S; i++) {
810 if ((a_u32[i] ^ b_u32[i]) & wc_u32[i]) {
817 /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
818 * (A 0-bit indicates a wildcard bit.) */
820 flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask)
822 wc->masks.regs[idx] = mask;
825 /* Hashes 'flow' based on its L2 through L4 protocol information. */
827 flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
832 struct in6_addr ipv6_addr;
837 uint8_t eth_addr[ETH_ADDR_LEN];
843 memset(&fields, 0, sizeof fields);
844 for (i = 0; i < ETH_ADDR_LEN; i++) {
845 fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
847 fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
848 fields.eth_type = flow->dl_type;
850 /* UDP source and destination port are not taken into account because they
851 * will not necessarily be symmetric in a bidirectional flow. */
852 if (fields.eth_type == htons(ETH_TYPE_IP)) {
853 fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
854 fields.ip_proto = flow->nw_proto;
855 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
856 fields.tp_port = flow->tp_src ^ flow->tp_dst;
858 } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
859 const uint8_t *a = &flow->ipv6_src.s6_addr[0];
860 const uint8_t *b = &flow->ipv6_dst.s6_addr[0];
861 uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0];
863 for (i=0; i<16; i++) {
864 ipv6_addr[i] = a[i] ^ b[i];
866 fields.ip_proto = flow->nw_proto;
867 if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
868 fields.tp_port = flow->tp_src ^ flow->tp_dst;
871 return jhash_bytes(&fields, sizeof fields, basis);
874 /* Initialize a flow with random fields that matter for nx_hash_fields. */
876 flow_random_hash_fields(struct flow *flow)
878 uint16_t rnd = random_uint16();
880 /* Initialize to all zeros. */
881 memset(flow, 0, sizeof *flow);
883 eth_addr_random(flow->dl_src);
884 eth_addr_random(flow->dl_dst);
886 flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
888 /* Make most of the random flows IPv4, some IPv6, and rest random. */
889 flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
890 rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
892 if (dl_type_is_ip_any(flow->dl_type)) {
893 if (flow->dl_type == htons(ETH_TYPE_IP)) {
894 flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
895 flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
897 random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
898 random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
900 /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
901 rnd = random_uint16();
902 flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
903 rnd < 0xc000 ? IPPROTO_UDP :
904 rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
905 if (flow->nw_proto == IPPROTO_TCP ||
906 flow->nw_proto == IPPROTO_UDP ||
907 flow->nw_proto == IPPROTO_SCTP) {
908 flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
909 flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
914 /* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
916 flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
917 enum nx_hash_fields fields)
920 case NX_HASH_FIELDS_ETH_SRC:
921 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
924 case NX_HASH_FIELDS_SYMMETRIC_L4:
925 memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
926 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
927 if (flow->dl_type == htons(ETH_TYPE_IP)) {
928 memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
929 memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
930 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
931 memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
932 memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
934 if (is_ip_any(flow)) {
935 memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
936 flow_unwildcard_tp_ports(flow, wc);
938 wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
946 /* Hashes the portions of 'flow' designated by 'fields'. */
948 flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields,
953 case NX_HASH_FIELDS_ETH_SRC:
954 return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
956 case NX_HASH_FIELDS_SYMMETRIC_L4:
957 return flow_hash_symmetric_l4(flow, basis);
963 /* Returns a string representation of 'fields'. */
965 flow_hash_fields_to_str(enum nx_hash_fields fields)
968 case NX_HASH_FIELDS_ETH_SRC: return "eth_src";
969 case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4";
970 default: return "<unknown>";
974 /* Returns true if the value of 'fields' is supported. Otherwise false. */
976 flow_hash_fields_valid(enum nx_hash_fields fields)
978 return fields == NX_HASH_FIELDS_ETH_SRC
979 || fields == NX_HASH_FIELDS_SYMMETRIC_L4;
982 /* Returns a hash value for the bits of 'flow' that are active based on
983 * 'wc', given 'basis'. */
985 flow_hash_in_wildcards(const struct flow *flow,
986 const struct flow_wildcards *wc, uint32_t basis)
988 const uint32_t *wc_u32 = (const uint32_t *) &wc->masks;
989 const uint32_t *flow_u32 = (const uint32_t *) flow;
994 for (i = 0; i < FLOW_U32S; i++) {
995 hash = mhash_add(hash, flow_u32[i] & wc_u32[i]);
997 return mhash_finish(hash, 4 * FLOW_U32S);
1000 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1001 * OpenFlow 1.0 "dl_vlan" value:
1003 * - If it is in the range 0...4095, 'flow->vlan_tci' is set to match
1004 * that VLAN. Any existing PCP match is unchanged (it becomes 0 if
1005 * 'flow' previously matched packets without a VLAN header).
1007 * - If it is OFP_VLAN_NONE, 'flow->vlan_tci' is set to match a packet
1008 * without a VLAN tag.
1010 * - Other values of 'vid' should not be used. */
1012 flow_set_dl_vlan(struct flow *flow, ovs_be16 vid)
1014 if (vid == htons(OFP10_VLAN_NONE)) {
1015 flow->vlan_tci = htons(0);
1017 vid &= htons(VLAN_VID_MASK);
1018 flow->vlan_tci &= ~htons(VLAN_VID_MASK);
1019 flow->vlan_tci |= htons(VLAN_CFI) | vid;
1023 /* Sets the VLAN VID that 'flow' matches to 'vid', which is interpreted as an
1024 * OpenFlow 1.2 "vlan_vid" value, that is, the low 13 bits of 'vlan_tci' (VID
1027 flow_set_vlan_vid(struct flow *flow, ovs_be16 vid)
1029 ovs_be16 mask = htons(VLAN_VID_MASK | VLAN_CFI);
1030 flow->vlan_tci &= ~mask;
1031 flow->vlan_tci |= vid & mask;
1034 /* Sets the VLAN PCP that 'flow' matches to 'pcp', which should be in the
1037 * This function has no effect on the VLAN ID that 'flow' matches.
1039 * After calling this function, 'flow' will not match packets without a VLAN
1042 flow_set_vlan_pcp(struct flow *flow, uint8_t pcp)
1045 flow->vlan_tci &= ~htons(VLAN_PCP_MASK);
1046 flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
1049 /* Returns the number of MPLS LSEs present in 'flow'
1051 * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
1052 * Otherwise traverses 'flow''s MPLS label stack stopping at the
1053 * first entry that has the BoS bit set. If no such entry exists then
1054 * the maximum number of LSEs that can be stored in 'flow' is returned.
1057 flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
1060 wc->masks.dl_type = OVS_BE16_MAX;
1062 if (eth_type_mpls(flow->dl_type)) {
1064 int len = FLOW_MAX_MPLS_LABELS;
1066 for (i = 0; i < len; i++) {
1068 wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
1070 if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
1081 /* Returns the number consecutive of MPLS LSEs, starting at the
1082 * innermost LSE, that are common in 'a' and 'b'.
1084 * 'an' must be flow_count_mpls_labels(a).
1085 * 'bn' must be flow_count_mpls_labels(b).
1088 flow_count_common_mpls_labels(const struct flow *a, int an,
1089 const struct flow *b, int bn,
1090 struct flow_wildcards *wc)
1092 int min_n = MIN(an, bn);
1097 int a_last = an - 1;
1098 int b_last = bn - 1;
1101 for (i = 0; i < min_n; i++) {
1103 wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
1104 wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
1106 if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
1117 /* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
1118 * to 'mpls_eth_type', which must be an MPLS Ethertype.
1120 * If the new label is the first MPLS label in 'flow', it is generated as;
1122 * - label: 2, if 'flow' is IPv6, otherwise 0.
1124 * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
1126 * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
1130 * If the new label is the second or label MPLS label in 'flow', it is
1135 * - TTL: Copied from outer label.
1137 * - TC: Copied from outer label.
1141 * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
1142 * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
1145 flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
1146 struct flow_wildcards *wc)
1148 ovs_assert(eth_type_mpls(mpls_eth_type));
1149 ovs_assert(n < FLOW_MAX_MPLS_LABELS);
1151 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1155 for (i = n; i >= 1; i--) {
1156 flow->mpls_lse[i] = flow->mpls_lse[i - 1];
1158 flow->mpls_lse[0] = (flow->mpls_lse[1]
1159 & htonl(MPLS_TTL_MASK | MPLS_TC_MASK));
1161 int label = 0; /* IPv4 Explicit Null. */
1165 if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1169 if (is_ip_any(flow)) {
1170 tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
1171 wc->masks.nw_tos |= IP_DSCP_MASK;
1176 wc->masks.nw_ttl = 0xff;
1179 flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
1181 /* Clear all L3 and L4 fields. */
1182 BUILD_ASSERT(FLOW_WC_SEQ == 24);
1183 memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
1184 sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
1186 flow->dl_type = mpls_eth_type;
1189 /* Tries to remove the outermost MPLS label from 'flow'. Returns true if
1190 * successful, false otherwise. On success, sets 'flow''s Ethernet type to
1193 * 'n' must be flow_count_mpls_labels(flow). */
1195 flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
1196 struct flow_wildcards *wc)
1201 /* Nothing to pop. */
1203 } else if (n == FLOW_MAX_MPLS_LABELS
1204 && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
1205 /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
1209 memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
1210 for (i = 1; i < n; i++) {
1211 flow->mpls_lse[i - 1] = flow->mpls_lse[i];
1213 flow->mpls_lse[n - 1] = 0;
1214 flow->dl_type = eth_type;
1218 /* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
1219 * as an OpenFlow 1.1 "mpls_label" value. */
1221 flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
1223 set_mpls_lse_label(&flow->mpls_lse[idx], label);
1226 /* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
1229 flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
1231 set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
1234 /* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
1237 flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
1239 set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
1242 /* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
1244 flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
1246 set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
1249 /* Sets the entire MPLS LSE. */
1251 flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
1253 flow->mpls_lse[idx] = lse;
1257 flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
1259 if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
1260 || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
1261 if (flow->nw_proto == IPPROTO_TCP) {
1262 struct tcp_header *tcp;
1264 tcp = ofpbuf_put_zeros(b, sizeof *tcp);
1265 tcp->tcp_src = flow->tp_src;
1266 tcp->tcp_dst = flow->tp_dst;
1267 tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
1268 b->l7 = ofpbuf_tail(b);
1269 } else if (flow->nw_proto == IPPROTO_UDP) {
1270 struct udp_header *udp;
1272 udp = ofpbuf_put_zeros(b, sizeof *udp);
1273 udp->udp_src = flow->tp_src;
1274 udp->udp_dst = flow->tp_dst;
1275 b->l7 = ofpbuf_tail(b);
1276 } else if (flow->nw_proto == IPPROTO_SCTP) {
1277 struct sctp_header *sctp;
1279 sctp = ofpbuf_put_zeros(b, sizeof *sctp);
1280 sctp->sctp_src = flow->tp_src;
1281 sctp->sctp_dst = flow->tp_dst;
1282 b->l7 = ofpbuf_tail(b);
1283 } else if (flow->nw_proto == IPPROTO_ICMP) {
1284 struct icmp_header *icmp;
1286 icmp = ofpbuf_put_zeros(b, sizeof *icmp);
1287 icmp->icmp_type = ntohs(flow->tp_src);
1288 icmp->icmp_code = ntohs(flow->tp_dst);
1289 icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
1290 b->l7 = ofpbuf_tail(b);
1291 } else if (flow->nw_proto == IPPROTO_ICMPV6) {
1292 struct icmp6_hdr *icmp;
1294 icmp = ofpbuf_put_zeros(b, sizeof *icmp);
1295 icmp->icmp6_type = ntohs(flow->tp_src);
1296 icmp->icmp6_code = ntohs(flow->tp_dst);
1298 if (icmp->icmp6_code == 0 &&
1299 (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
1300 icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
1301 struct in6_addr *nd_target;
1302 struct nd_opt_hdr *nd_opt;
1304 nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
1305 *nd_target = flow->nd_target;
1307 if (!eth_addr_is_zero(flow->arp_sha)) {
1308 nd_opt = ofpbuf_put_zeros(b, 8);
1309 nd_opt->nd_opt_len = 1;
1310 nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
1311 memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
1313 if (!eth_addr_is_zero(flow->arp_tha)) {
1314 nd_opt = ofpbuf_put_zeros(b, 8);
1315 nd_opt->nd_opt_len = 1;
1316 nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
1317 memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
1320 icmp->icmp6_cksum = (OVS_FORCE uint16_t)
1321 csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
1322 b->l7 = ofpbuf_tail(b);
1327 /* Puts into 'b' a packet that flow_extract() would parse as having the given
1330 * (This is useful only for testing, obviously, and the packet isn't really
1331 * valid. It hasn't got some checksums filled in, for one, and lots of fields
1332 * are just zeroed.) */
1334 flow_compose(struct ofpbuf *b, const struct flow *flow)
1336 /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
1337 eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
1338 if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
1339 struct eth_header *eth = b->l2;
1340 eth->eth_type = htons(b->size);
1344 if (flow->vlan_tci & htons(VLAN_CFI)) {
1345 eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
1348 if (flow->dl_type == htons(ETH_TYPE_IP)) {
1349 struct ip_header *ip;
1351 ip = ofpbuf_put_zeros(b, sizeof *ip);
1352 ip->ip_ihl_ver = IP_IHL_VER(5, 4);
1353 ip->ip_tos = flow->nw_tos;
1354 ip->ip_ttl = flow->nw_ttl;
1355 ip->ip_proto = flow->nw_proto;
1356 put_16aligned_be32(&ip->ip_src, flow->nw_src);
1357 put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
1359 if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
1360 ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
1361 if (flow->nw_frag & FLOW_NW_FRAG_LATER) {
1362 ip->ip_frag_off |= htons(100);
1366 b->l4 = ofpbuf_tail(b);
1368 flow_compose_l4(b, flow);
1370 ip->ip_tot_len = htons((uint8_t *) b->data + b->size
1371 - (uint8_t *) b->l3);
1372 ip->ip_csum = csum(ip, sizeof *ip);
1373 } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
1374 struct ovs_16aligned_ip6_hdr *nh;
1376 nh = ofpbuf_put_zeros(b, sizeof *nh);
1377 put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
1378 htonl(flow->nw_tos << 20) | flow->ipv6_label);
1379 nh->ip6_hlim = flow->nw_ttl;
1380 nh->ip6_nxt = flow->nw_proto;
1382 memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
1383 memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
1385 b->l4 = ofpbuf_tail(b);
1387 flow_compose_l4(b, flow);
1390 b->l7 ? htons((uint8_t *) b->l7 - (uint8_t *) b->l4) : htons(0);
1391 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
1392 flow->dl_type == htons(ETH_TYPE_RARP)) {
1393 struct arp_eth_header *arp;
1395 b->l3 = arp = ofpbuf_put_zeros(b, sizeof *arp);
1396 arp->ar_hrd = htons(1);
1397 arp->ar_pro = htons(ETH_TYPE_IP);
1398 arp->ar_hln = ETH_ADDR_LEN;
1400 arp->ar_op = htons(flow->nw_proto);
1402 if (flow->nw_proto == ARP_OP_REQUEST ||
1403 flow->nw_proto == ARP_OP_REPLY) {
1404 put_16aligned_be32(&arp->ar_spa, flow->nw_src);
1405 put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
1406 memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
1407 memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
1411 if (eth_type_mpls(flow->dl_type)) {
1415 for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
1416 if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
1421 push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
1426 /* Compressed flow. */
1429 miniflow_n_values(const struct miniflow *flow)
1431 return count_1bits(flow->map);
1435 miniflow_alloc_values(struct miniflow *flow, int n)
1437 if (n <= MINI_N_INLINE) {
1438 return flow->inline_values;
1440 COVERAGE_INC(miniflow_malloc);
1441 return xmalloc(n * sizeof *flow->values);
1445 /* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
1446 * the caller. The caller must have already initialized 'dst->map' properly
1447 * to indicate the significant uint32_t elements of 'src'. 'n' must be the
1448 * number of 1-bits in 'dst->map'.
1450 * Normally the significant elements are the ones that are non-zero. However,
1451 * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
1452 * so that the flow and mask always have the same maps.
1454 * This function initializes 'dst->values' (either inline if possible or with
1455 * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
1456 * 'dst->map' into it. */
1458 miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
1460 const uint32_t *src_u32 = (const uint32_t *) src;
1464 dst->values = miniflow_alloc_values(dst, n);
1466 for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
1467 dst->values[ofs++] = src_u32[raw_ctz(map)];
1471 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1472 * with miniflow_destroy(). */
1474 miniflow_init(struct miniflow *dst, const struct flow *src)
1476 const uint32_t *src_u32 = (const uint32_t *) src;
1480 /* Initialize dst->map, counting the number of nonzero elements. */
1484 for (i = 0; i < FLOW_U32S; i++) {
1486 dst->map |= UINT64_C(1) << i;
1491 miniflow_init__(dst, src, n);
1494 /* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
1495 * caller must eventually free 'dst' with miniflow_destroy(). */
1497 miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
1498 const struct minimask *mask)
1500 dst->map = mask->masks.map;
1501 miniflow_init__(dst, src, miniflow_n_values(dst));
1504 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1505 * with miniflow_destroy(). */
1507 miniflow_clone(struct miniflow *dst, const struct miniflow *src)
1509 int n = miniflow_n_values(src);
1510 dst->map = src->map;
1511 dst->values = miniflow_alloc_values(dst, n);
1512 memcpy(dst->values, src->values, n * sizeof *dst->values);
1515 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1516 * The caller must eventually free 'dst' with miniflow_destroy(). */
1518 miniflow_move(struct miniflow *dst, struct miniflow *src)
1520 if (src->values == src->inline_values) {
1521 dst->values = dst->inline_values;
1522 memcpy(dst->values, src->values,
1523 miniflow_n_values(src) * sizeof *dst->values);
1525 dst->values = src->values;
1527 dst->map = src->map;
1530 /* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
1531 * itself resides; the caller is responsible for that. */
1533 miniflow_destroy(struct miniflow *flow)
1535 if (flow->values != flow->inline_values) {
1540 /* Initializes 'dst' as a copy of 'src'. */
1542 miniflow_expand(const struct miniflow *src, struct flow *dst)
1544 memset(dst, 0, sizeof *dst);
1545 flow_union_with_miniflow(dst, src);
1548 static const uint32_t *
1549 miniflow_get__(const struct miniflow *flow, unsigned int u32_ofs)
1551 if (!(flow->map & (UINT64_C(1) << u32_ofs))) {
1552 static const uint32_t zero = 0;
1555 return flow->values +
1556 count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1));
1559 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
1560 * were expanded into a "struct flow". */
1562 miniflow_get(const struct miniflow *flow, unsigned int u32_ofs)
1564 return *miniflow_get__(flow, u32_ofs);
1567 /* Returns the ovs_be16 that would be at byte offset 'u8_ofs' if 'flow' were
1568 * expanded into a "struct flow". */
1570 miniflow_get_be16(const struct miniflow *flow, unsigned int u8_ofs)
1572 const uint32_t *u32p = miniflow_get__(flow, u8_ofs / 4);
1573 const ovs_be16 *be16p = (const ovs_be16 *) u32p;
1574 return be16p[u8_ofs % 4 != 0];
1577 /* Returns the VID within the vlan_tci member of the "struct flow" represented
1580 miniflow_get_vid(const struct miniflow *flow)
1582 ovs_be16 tci = miniflow_get_be16(flow, offsetof(struct flow, vlan_tci));
1583 return vlan_tci_to_vid(tci);
1586 /* Returns true if 'a' and 'b' are the same flow, false otherwise. */
1588 miniflow_equal(const struct miniflow *a, const struct miniflow *b)
1590 const uint32_t *ap = a->values;
1591 const uint32_t *bp = b->values;
1592 const uint64_t a_map = a->map;
1593 const uint64_t b_map = b->map;
1596 if (a_map == b_map) {
1597 for (map = a_map; map; map = zero_rightmost_1bit(map)) {
1598 if (*ap++ != *bp++) {
1603 for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
1604 uint64_t bit = rightmost_1bit(map);
1605 uint64_t a_value = a_map & bit ? *ap++ : 0;
1606 uint64_t b_value = b_map & bit ? *bp++ : 0;
1608 if (a_value != b_value) {
1617 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1618 * in 'mask', false if they differ. */
1620 miniflow_equal_in_minimask(const struct miniflow *a, const struct miniflow *b,
1621 const struct minimask *mask)
1626 p = mask->masks.values;
1628 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1629 int ofs = raw_ctz(map);
1631 if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
1640 /* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
1641 * in 'mask', false if they differ. */
1643 miniflow_equal_flow_in_minimask(const struct miniflow *a, const struct flow *b,
1644 const struct minimask *mask)
1646 const uint32_t *b_u32 = (const uint32_t *) b;
1650 p = mask->masks.values;
1652 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1653 int ofs = raw_ctz(map);
1655 if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
1664 /* Returns a hash value for 'flow', given 'basis'. */
1666 miniflow_hash(const struct miniflow *flow, uint32_t basis)
1668 const uint32_t *p = flow->values;
1669 uint32_t hash = basis;
1670 uint64_t hash_map = 0;
1673 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
1675 hash = mhash_add(hash, *p);
1676 hash_map |= rightmost_1bit(map);
1680 hash = mhash_add(hash, hash_map);
1681 hash = mhash_add(hash, hash_map >> 32);
1683 return mhash_finish(hash, p - flow->values);
1686 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1687 * 'mask', given 'basis'.
1689 * The hash values returned by this function are the same as those returned by
1690 * flow_hash_in_minimask(), only the form of the arguments differ. */
1692 miniflow_hash_in_minimask(const struct miniflow *flow,
1693 const struct minimask *mask, uint32_t basis)
1695 const uint32_t *p = mask->masks.values;
1701 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1702 hash = mhash_add(hash, miniflow_get(flow, raw_ctz(map)) & *p++);
1705 return mhash_finish(hash, (p - mask->masks.values) * 4);
1708 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
1709 * 'mask', given 'basis'.
1711 * The hash values returned by this function are the same as those returned by
1712 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
1714 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
1717 const uint32_t *flow_u32 = (const uint32_t *)flow;
1718 const uint32_t *p = mask->masks.values;
1723 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
1724 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1727 return mhash_finish(hash, (p - mask->masks.values) * 4);
1730 /* Returns a hash value for the bits of range [start, end) in 'flow',
1731 * where there are 1-bits in 'mask', given 'hash'.
1733 * The hash values returned by this function are the same as those returned by
1734 * minimatch_hash_range(), only the form of the arguments differ. */
1736 flow_hash_in_minimask_range(const struct flow *flow,
1737 const struct minimask *mask,
1738 uint8_t start, uint8_t end, uint32_t *basis)
1740 const uint32_t *flow_u32 = (const uint32_t *)flow;
1741 unsigned int offset;
1742 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
1744 const uint32_t *p = mask->masks.values + offset;
1745 uint32_t hash = *basis;
1747 for (; map; map = zero_rightmost_1bit(map)) {
1748 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
1751 *basis = hash; /* Allow continuation from the unfinished value. */
1752 return mhash_finish(hash, (p - mask->masks.values) * 4);
1756 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1757 * with minimask_destroy(). */
1759 minimask_init(struct minimask *mask, const struct flow_wildcards *wc)
1761 miniflow_init(&mask->masks, &wc->masks);
1764 /* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
1765 * with minimask_destroy(). */
1767 minimask_clone(struct minimask *dst, const struct minimask *src)
1769 miniflow_clone(&dst->masks, &src->masks);
1772 /* Initializes 'dst' with the data in 'src', destroying 'src'.
1773 * The caller must eventually free 'dst' with minimask_destroy(). */
1775 minimask_move(struct minimask *dst, struct minimask *src)
1777 miniflow_move(&dst->masks, &src->masks);
1780 /* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
1782 * The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
1783 * by 'dst_'. The caller must *not* free 'dst_' with minimask_destroy(). */
1785 minimask_combine(struct minimask *dst_,
1786 const struct minimask *a_, const struct minimask *b_,
1787 uint32_t storage[FLOW_U32S])
1789 struct miniflow *dst = &dst_->masks;
1790 const struct miniflow *a = &a_->masks;
1791 const struct miniflow *b = &b_->masks;
1795 dst->values = storage;
1798 for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
1799 int ofs = raw_ctz(map);
1800 uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
1803 dst->map |= rightmost_1bit(map);
1804 dst->values[n++] = mask;
1809 /* Frees any memory owned by 'mask'. Does not free the storage in which 'mask'
1810 * itself resides; the caller is responsible for that. */
1812 minimask_destroy(struct minimask *mask)
1814 miniflow_destroy(&mask->masks);
1817 /* Initializes 'dst' as a copy of 'src'. */
1819 minimask_expand(const struct minimask *mask, struct flow_wildcards *wc)
1821 miniflow_expand(&mask->masks, &wc->masks);
1824 /* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'mask'
1825 * were expanded into a "struct flow_wildcards". */
1827 minimask_get(const struct minimask *mask, unsigned int u32_ofs)
1829 return miniflow_get(&mask->masks, u32_ofs);
1832 /* Returns the VID mask within the vlan_tci member of the "struct
1833 * flow_wildcards" represented by 'mask'. */
1835 minimask_get_vid_mask(const struct minimask *mask)
1837 return miniflow_get_vid(&mask->masks);
1840 /* Returns true if 'a' and 'b' are the same flow mask, false otherwise. */
1842 minimask_equal(const struct minimask *a, const struct minimask *b)
1844 return miniflow_equal(&a->masks, &b->masks);
1847 /* Returns a hash value for 'mask', given 'basis'. */
1849 minimask_hash(const struct minimask *mask, uint32_t basis)
1851 return miniflow_hash(&mask->masks, basis);
1854 /* Returns true if at least one bit is wildcarded in 'a_' but not in 'b_',
1855 * false otherwise. */
1857 minimask_has_extra(const struct minimask *a_, const struct minimask *b_)
1859 const struct miniflow *a = &a_->masks;
1860 const struct miniflow *b = &b_->masks;
1863 for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
1864 int ofs = raw_ctz(map);
1865 uint32_t a_u32 = miniflow_get(a, ofs);
1866 uint32_t b_u32 = miniflow_get(b, ofs);
1868 if ((a_u32 & b_u32) != b_u32) {
1876 /* Returns true if 'mask' matches every packet, false if 'mask' fixes any bits
1879 minimask_is_catchall(const struct minimask *mask_)
1881 const struct miniflow *mask = &mask_->masks;
1882 const uint32_t *p = mask->values;
1885 for (map = mask->map; map; map = zero_rightmost_1bit(map)) {