/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "ofpbuf.h"
#include "openflow/openflow.h"
#include "packets.h"
+#include "odp-util.h"
+#include "random.h"
#include "unaligned.h"
-#include "vlog.h"
-
-VLOG_DEFINE_THIS_MODULE(flow);
COVERAGE_DEFINE(flow_extract);
COVERAGE_DEFINE(miniflow_malloc);
+/* U32 indices for segmented flow classification. */
+const uint8_t flow_segment_u32s[4] = {
+ FLOW_SEGMENT_1_ENDS_AT / 4,
+ FLOW_SEGMENT_2_ENDS_AT / 4,
+ FLOW_SEGMENT_3_ENDS_AT / 4,
+ FLOW_U32S
+};
+
static struct arp_eth_header *
pull_arp(struct ofpbuf *packet)
{
static struct ip_header *
pull_ip(struct ofpbuf *packet)
{
- if (packet->size >= IP_HEADER_LEN) {
- struct ip_header *ip = packet->data;
+ if (ofpbuf_size(packet) >= IP_HEADER_LEN) {
+ struct ip_header *ip = ofpbuf_data(packet);
int ip_len = IP_IHL(ip->ip_ihl_ver) * 4;
- if (ip_len >= IP_HEADER_LEN && packet->size >= ip_len) {
+ if (ip_len >= IP_HEADER_LEN && ofpbuf_size(packet) >= ip_len) {
return ofpbuf_pull(packet, ip_len);
}
}
return NULL;
}
-static struct tcp_header *
-pull_tcp(struct ofpbuf *packet)
-{
- if (packet->size >= TCP_HEADER_LEN) {
- struct tcp_header *tcp = packet->data;
- int tcp_len = TCP_OFFSET(tcp->tcp_ctl) * 4;
- if (tcp_len >= TCP_HEADER_LEN && packet->size >= tcp_len) {
- return ofpbuf_pull(packet, tcp_len);
- }
- }
- return NULL;
-}
-
-static struct udp_header *
-pull_udp(struct ofpbuf *packet)
-{
- return ofpbuf_try_pull(packet, UDP_HEADER_LEN);
-}
-
static struct icmp_header *
pull_icmp(struct ofpbuf *packet)
{
parse_mpls(struct ofpbuf *b, struct flow *flow)
{
struct mpls_hdr *mh;
+ int idx = 0;
while ((mh = ofpbuf_try_pull(b, sizeof *mh))) {
- if (flow->mpls_depth++ == 0) {
- flow->mpls_lse = mh->mpls_lse;
+ ovs_be32 mpls_lse = get_16aligned_be32(&mh->mpls_lse);
+ if (idx < FLOW_MAX_MPLS_LABELS) {
+ flow->mpls_lse[idx++] = mpls_lse;
}
- if (mh->mpls_lse & htonl(MPLS_BOS_MASK)) {
+ if (mpls_lse & htonl(MPLS_BOS_MASK)) {
break;
}
}
ovs_be16 tci;
};
- if (b->size >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
+ if (ofpbuf_size(b) >= sizeof(struct qtag_prefix) + sizeof(ovs_be16)) {
struct qtag_prefix *qp = ofpbuf_pull(b, sizeof *qp);
flow->vlan_tci = qp->tci | htons(VLAN_CFI);
}
return proto;
}
- if (b->size < sizeof *llc) {
+ if (ofpbuf_size(b) < sizeof *llc) {
return htons(FLOW_DL_TYPE_NONE);
}
- llc = b->data;
+ llc = ofpbuf_data(b);
if (llc->llc.llc_dsap != LLC_DSAP_SNAP
|| llc->llc.llc_ssap != LLC_SSAP_SNAP
|| llc->llc.llc_cntl != LLC_CNTL_SNAP
static int
parse_ipv6(struct ofpbuf *packet, struct flow *flow)
{
- const struct ip6_hdr *nh;
+ const struct ovs_16aligned_ip6_hdr *nh;
ovs_be32 tc_flow;
int nexthdr;
nexthdr = nh->ip6_nxt;
- flow->ipv6_src = nh->ip6_src;
- flow->ipv6_dst = nh->ip6_dst;
+ memcpy(&flow->ipv6_src, &nh->ip6_src, sizeof flow->ipv6_src);
+ memcpy(&flow->ipv6_dst, &nh->ip6_dst, sizeof flow->ipv6_dst);
- tc_flow = get_unaligned_be32(&nh->ip6_flow);
+ tc_flow = get_16aligned_be32(&nh->ip6_flow);
flow->nw_tos = ntohl(tc_flow) >> 20;
flow->ipv6_label = tc_flow & htonl(IPV6_LABEL_MASK);
flow->nw_ttl = nh->ip6_hlim;
* accesses within the extension header are within those first 8
* bytes. All extension headers are required to be at least 8
* bytes. */
- if (packet->size < 8) {
+ if (ofpbuf_size(packet) < 8) {
return EINVAL;
}
|| (nexthdr == IPPROTO_DSTOPTS)) {
/* These headers, while different, have the fields we care about
* in the same location and with the same interpretation. */
- const struct ip6_ext *ext_hdr = packet->data;
+ const struct ip6_ext *ext_hdr = ofpbuf_data(packet);
nexthdr = ext_hdr->ip6e_nxt;
if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) {
return EINVAL;
* we care about are in the same location as the generic
* option header--only the header length is calculated
* differently. */
- const struct ip6_ext *ext_hdr = packet->data;
+ const struct ip6_ext *ext_hdr = ofpbuf_data(packet);
nexthdr = ext_hdr->ip6e_nxt;
if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) {
return EINVAL;
}
} else if (nexthdr == IPPROTO_FRAGMENT) {
- const struct ip6_frag *frag_hdr = packet->data;
+ const struct ovs_16aligned_ip6_frag *frag_hdr = ofpbuf_data(packet);
nexthdr = frag_hdr->ip6f_nxt;
if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) {
}
static void
-parse_tcp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
+parse_tcp(struct ofpbuf *b, struct flow *flow)
{
- const struct tcp_header *tcp = pull_tcp(b);
- if (tcp) {
+ if (ofpbuf_size(b) >= TCP_HEADER_LEN) {
+ const struct tcp_header *tcp = ofpbuf_data(b);
+
flow->tp_src = tcp->tcp_src;
flow->tp_dst = tcp->tcp_dst;
- packet->l7 = b->data;
+ flow->tcp_flags = tcp->tcp_ctl & htons(0x0fff);
}
}
static void
-parse_udp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow)
+parse_udp(struct ofpbuf *b, struct flow *flow)
{
- const struct udp_header *udp = pull_udp(b);
- if (udp) {
+ if (ofpbuf_size(b) >= UDP_HEADER_LEN) {
+ const struct udp_header *udp = ofpbuf_data(b);
+
flow->tp_src = udp->udp_src;
flow->tp_dst = udp->udp_dst;
- packet->l7 = b->data;
}
}
-static bool
+static void
+parse_sctp(struct ofpbuf *b, struct flow *flow)
+{
+ if (ofpbuf_size(b) >= SCTP_HEADER_LEN) {
+ const struct sctp_header *sctp = ofpbuf_data(b);
+
+ flow->tp_src = sctp->sctp_src;
+ flow->tp_dst = sctp->sctp_dst;
+ }
+}
+
+static void
parse_icmpv6(struct ofpbuf *b, struct flow *flow)
{
const struct icmp6_hdr *icmp = pull_icmpv6(b);
if (!icmp) {
- return false;
+ return;
}
/* The ICMPv6 type and code fields use the 16-bit transport port
nd_target = ofpbuf_try_pull(b, sizeof *nd_target);
if (!nd_target) {
- return false;
+ return;
}
flow->nd_target = *nd_target;
- while (b->size >= 8) {
+ while (ofpbuf_size(b) >= 8) {
/* The minimum size of an option is 8 bytes, which also is
* the size of Ethernet link-layer options. */
- const struct nd_opt_hdr *nd_opt = b->data;
+ const struct nd_opt_hdr *nd_opt = ofpbuf_data(b);
int opt_len = nd_opt->nd_opt_len * 8;
- if (!opt_len || opt_len > b->size) {
+ if (!opt_len || opt_len > ofpbuf_size(b)) {
goto invalid;
}
}
}
- return true;
+ return;
invalid:
memset(&flow->nd_target, 0, sizeof(flow->nd_target));
memset(flow->arp_sha, 0, sizeof(flow->arp_sha));
memset(flow->arp_tha, 0, sizeof(flow->arp_tha));
- return false;
-
+ return;
}
-/* Initializes 'flow' members from 'packet', 'skb_priority', 'tnl', and
- * 'in_port'.
+/* Initializes 'flow' members from 'packet' and 'md'
*
- * Initializes 'packet' header pointers as follows:
+ * Initializes 'packet' header l2 pointer to the start of the Ethernet
+ * header, and the layer offsets as follows:
*
- * - packet->l2 to the start of the Ethernet header.
+ * - packet->l2_5_ofs to the start of the MPLS shim header, or UINT16_MAX
+ * when there is no MPLS shim header.
*
- * - packet->l2_5 to the start of the MPLS shim header.
- *
- * - packet->l3 to just past the Ethernet header, or just past the
+ * - packet->l3_ofs to just past the Ethernet header, or just past the
* vlan_header if one is present, to the first byte of the payload of the
- * Ethernet frame.
- *
- * - packet->l4 to just past the IPv4 header, if one is present and has a
- * correct length, and otherwise NULL.
+ * Ethernet frame. UINT16_MAX if the frame is too short to contain an
+ * Ethernet header.
*
- * - packet->l7 to just past the TCP or UDP or ICMP header, if one is
- * present and has a correct length, and otherwise NULL.
+ * - packet->l4_ofs to just past the IPv4 header, if one is present and
+ * has at least the content used for the fields of interest for the flow,
+ * otherwise UINT16_MAX.
*/
void
-flow_extract(struct ofpbuf *packet, uint32_t skb_priority, uint32_t pkt_mark,
- const struct flow_tnl *tnl, const union flow_in_port *in_port,
+flow_extract(struct ofpbuf *packet, const struct pkt_metadata *md,
struct flow *flow)
{
struct ofpbuf b = *packet;
memset(flow, 0, sizeof *flow);
- if (tnl) {
- ovs_assert(tnl != &flow->tunnel);
- flow->tunnel = *tnl;
+ if (md) {
+ flow->tunnel = md->tunnel;
+ flow->in_port = md->in_port;
+ flow->skb_priority = md->skb_priority;
+ flow->pkt_mark = md->pkt_mark;
+ flow->recirc_id = md->recirc_id;
+ flow->dp_hash = md->dp_hash;
}
- if (in_port) {
- flow->in_port = *in_port;
- }
- flow->skb_priority = skb_priority;
- flow->pkt_mark = pkt_mark;
- packet->l2 = b.data;
- packet->l2_5 = NULL;
- packet->l3 = NULL;
- packet->l4 = NULL;
- packet->l7 = NULL;
+ ofpbuf_set_frame(packet, ofpbuf_data(packet));
- if (b.size < sizeof *eth) {
+ if (ofpbuf_size(&b) < sizeof *eth) {
return;
}
/* Link layer. */
- eth = b.data;
+ eth = ofpbuf_data(&b);
memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
/* Parse mpls, copy l3 ttl. */
if (eth_type_mpls(flow->dl_type)) {
- packet->l2_5 = b.data;
+ ofpbuf_set_l2_5(packet, ofpbuf_data(&b));
parse_mpls(&b, flow);
}
/* Network layer. */
- packet->l3 = b.data;
+ ofpbuf_set_l3(packet, ofpbuf_data(&b));
if (flow->dl_type == htons(ETH_TYPE_IP)) {
const struct ip_header *nh = pull_ip(&b);
if (nh) {
- packet->l4 = b.data;
+ ofpbuf_set_l4(packet, ofpbuf_data(&b));
- flow->nw_src = get_unaligned_be32(&nh->ip_src);
- flow->nw_dst = get_unaligned_be32(&nh->ip_dst);
+ flow->nw_src = get_16aligned_be32(&nh->ip_src);
+ flow->nw_dst = get_16aligned_be32(&nh->ip_dst);
flow->nw_proto = nh->ip_proto;
flow->nw_tos = nh->ip_tos;
if (!(nh->ip_frag_off & htons(IP_FRAG_OFF_MASK))) {
if (flow->nw_proto == IPPROTO_TCP) {
- parse_tcp(packet, &b, flow);
+ parse_tcp(&b, flow);
} else if (flow->nw_proto == IPPROTO_UDP) {
- parse_udp(packet, &b, flow);
+ parse_udp(&b, flow);
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ parse_sctp(&b, flow);
} else if (flow->nw_proto == IPPROTO_ICMP) {
const struct icmp_header *icmp = pull_icmp(&b);
if (icmp) {
flow->tp_src = htons(icmp->icmp_type);
flow->tp_dst = htons(icmp->icmp_code);
- packet->l7 = b.data;
}
}
}
return;
}
- packet->l4 = b.data;
+ ofpbuf_set_l4(packet, ofpbuf_data(&b));
if (flow->nw_proto == IPPROTO_TCP) {
- parse_tcp(packet, &b, flow);
+ parse_tcp(&b, flow);
} else if (flow->nw_proto == IPPROTO_UDP) {
- parse_udp(packet, &b, flow);
+ parse_udp(&b, flow);
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ parse_sctp(&b, flow);
} else if (flow->nw_proto == IPPROTO_ICMPV6) {
- if (parse_icmpv6(&b, flow)) {
- packet->l7 = b.data;
- }
+ parse_icmpv6(&b, flow);
}
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
flow->nw_proto = ntohs(arp->ar_op);
}
- flow->nw_src = arp->ar_spa;
- flow->nw_dst = arp->ar_tpa;
+ flow->nw_src = get_16aligned_be32(&arp->ar_spa);
+ flow->nw_dst = get_16aligned_be32(&arp->ar_tpa);
memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN);
memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN);
}
}
}
+void
+flow_unwildcard_tp_ports(const struct flow *flow, struct flow_wildcards *wc)
+{
+ if (flow->nw_proto != IPPROTO_ICMP) {
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+ } else {
+ wc->masks.tp_src = htons(0xff);
+ wc->masks.tp_dst = htons(0xff);
+ }
+}
+
/* Initializes 'fmd' with the metadata found in 'flow'. */
void
flow_get_metadata(const struct flow *flow, struct flow_metadata *fmd)
{
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 25);
+ fmd->dp_hash = flow->dp_hash;
+ fmd->recirc_id = flow->recirc_id;
fmd->tun_id = flow->tunnel.tun_id;
fmd->tun_src = flow->tunnel.ip_src;
fmd->tun_dst = flow->tunnel.ip_dst;
ds_chomp(ds, del);
}
+void
+format_flags_masked(struct ds *ds, const char *name,
+ const char *(*bit_to_string)(uint32_t), uint32_t flags,
+ uint32_t mask)
+{
+ if (name) {
+ ds_put_format(ds, "%s=", name);
+ }
+ while (mask) {
+ uint32_t bit = rightmost_1bit(mask);
+ const char *s = bit_to_string(bit);
+
+ ds_put_format(ds, "%s%s", (flags & bit) ? "+" : "-",
+ s ? s : "[Unknown]");
+ mask &= ~bit;
+ }
+}
+
void
flow_format(struct ds *ds, const struct flow *flow)
{
memset(&wc->masks, 0, sizeof wc->masks);
}
-/* Initializes 'wc' as an exact-match set of wildcards; that is, 'wc' does not
- * wildcard any bits or fields. */
+/* Clear the metadata and register wildcard masks. They are not packet
+ * header fields. */
void
-flow_wildcards_init_exact(struct flow_wildcards *wc)
+flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
- memset(&wc->masks, 0xff, sizeof wc->masks);
- memset(wc->masks.zeros, 0, sizeof wc->masks.zeros);
+ memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
+ memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
}
/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or
flow_union_with_miniflow(struct flow *dst, const struct miniflow *src)
{
uint32_t *dst_u32 = (uint32_t *) dst;
- int ofs;
- int i;
-
- ofs = 0;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
+ const uint32_t *p = src->values;
+ uint64_t map;
- for (map = src->map[i]; map; map = zero_rightmost_1bit(map)) {
- dst_u32[raw_ctz(map) + i * 32] |= src->values[ofs++];
- }
+ for (map = src->map; map; map = zero_rightmost_1bit(map)) {
+ dst_u32[raw_ctz(map)] |= *p++;
}
}
flow_union_with_miniflow(&wc->masks, &mask->masks);
}
+uint64_t
+miniflow_get_map_in_range(const struct miniflow *miniflow,
+ uint8_t start, uint8_t end, unsigned int *offset)
+{
+ uint64_t map = miniflow->map;
+ *offset = 0;
+
+ if (start > 0) {
+ uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
+ *offset = count_1bits(map & msk);
+ map &= ~msk;
+ }
+ if (end < FLOW_U32S) {
+ uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
+ map &= msk;
+ }
+ return map;
+}
+
+/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
+ * in range [start, end). */
+void
+flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
+ const struct minimask *mask,
+ uint8_t start, uint8_t end)
+{
+ uint32_t *dst_u32 = (uint32_t *)&wc->masks;
+ unsigned int offset;
+ uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
+ &offset);
+ const uint32_t *p = mask->masks.values + offset;
+
+ for (; map; map = zero_rightmost_1bit(map)) {
+ dst_u32[raw_ctz(map)] |= *p++;
+ }
+}
+
/* Returns a hash of the wildcards in 'wc'. */
uint32_t
flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis)
wc->masks.regs[idx] = mask;
}
+/* Calculates the 5-tuple hash from the given flow. */
+uint32_t
+flow_hash_5tuple(const struct flow *flow, uint32_t basis)
+{
+ uint32_t hash = 0;
+
+ if (!flow) {
+ return 0;
+ }
+
+ hash = mhash_add(basis, (OVS_FORCE uint32_t) flow->nw_src);
+ hash = mhash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
+ hash = mhash_add(hash, ((OVS_FORCE uint32_t) flow->tp_src << 16)
+ | (OVS_FORCE uint32_t) flow->tp_dst);
+ hash = mhash_add(hash, flow->nw_proto);
+
+ return mhash_finish(hash, 13);
+}
+
/* Hashes 'flow' based on its L2 through L4 protocol information. */
uint32_t
flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis)
if (fields.eth_type == htons(ETH_TYPE_IP)) {
fields.ipv4_addr = flow->nw_src ^ flow->nw_dst;
fields.ip_proto = flow->nw_proto;
- if (fields.ip_proto == IPPROTO_TCP) {
+ if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
fields.tp_port = flow->tp_src ^ flow->tp_dst;
}
} else if (fields.eth_type == htons(ETH_TYPE_IPV6)) {
ipv6_addr[i] = a[i] ^ b[i];
}
fields.ip_proto = flow->nw_proto;
- if (fields.ip_proto == IPPROTO_TCP) {
+ if (fields.ip_proto == IPPROTO_TCP || fields.ip_proto == IPPROTO_SCTP) {
fields.tp_port = flow->tp_src ^ flow->tp_dst;
}
}
return jhash_bytes(&fields, sizeof fields, basis);
}
+/* Initialize a flow with random fields that matter for nx_hash_fields. */
+void
+flow_random_hash_fields(struct flow *flow)
+{
+ uint16_t rnd = random_uint16();
+
+ /* Initialize to all zeros. */
+ memset(flow, 0, sizeof *flow);
+
+ eth_addr_random(flow->dl_src);
+ eth_addr_random(flow->dl_dst);
+
+ flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
+
+ /* Make most of the random flows IPv4, some IPv6, and rest random. */
+ flow->dl_type = rnd < 0x8000 ? htons(ETH_TYPE_IP) :
+ rnd < 0xc000 ? htons(ETH_TYPE_IPV6) : (OVS_FORCE ovs_be16)rnd;
+
+ if (dl_type_is_ip_any(flow->dl_type)) {
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ flow->nw_src = (OVS_FORCE ovs_be32)random_uint32();
+ flow->nw_dst = (OVS_FORCE ovs_be32)random_uint32();
+ } else {
+ random_bytes(&flow->ipv6_src, sizeof flow->ipv6_src);
+ random_bytes(&flow->ipv6_dst, sizeof flow->ipv6_dst);
+ }
+ /* Make most of IP flows TCP, some UDP or SCTP, and rest random. */
+ rnd = random_uint16();
+ flow->nw_proto = rnd < 0x8000 ? IPPROTO_TCP :
+ rnd < 0xc000 ? IPPROTO_UDP :
+ rnd < 0xd000 ? IPPROTO_SCTP : (uint8_t)rnd;
+ if (flow->nw_proto == IPPROTO_TCP ||
+ flow->nw_proto == IPPROTO_UDP ||
+ flow->nw_proto == IPPROTO_SCTP) {
+ flow->tp_src = (OVS_FORCE ovs_be16)random_uint16();
+ flow->tp_dst = (OVS_FORCE ovs_be16)random_uint16();
+ }
+ }
+}
+
/* Masks the fields in 'wc' that are used by the flow hash 'fields'. */
void
flow_mask_hash_fields(const struct flow *flow, struct flow_wildcards *wc,
}
if (is_ip_any(flow)) {
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
- memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+ flow_unwildcard_tp_ports(flow, wc);
}
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
break;
default:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
}
return flow_hash_symmetric_l4(flow, basis);
}
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
/* Returns a string representation of 'fields'. */
flow->vlan_tci |= htons((pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
}
+/* Returns the number of MPLS LSEs present in 'flow'
+ *
+ * Returns 0 if the 'dl_type' of 'flow' is not an MPLS ethernet type.
+ * Otherwise traverses 'flow''s MPLS label stack stopping at the
+ * first entry that has the BoS bit set. If no such entry exists then
+ * the maximum number of LSEs that can be stored in 'flow' is returned.
+ */
+int
+flow_count_mpls_labels(const struct flow *flow, struct flow_wildcards *wc)
+{
+ if (wc) {
+ wc->masks.dl_type = OVS_BE16_MAX;
+ }
+ if (eth_type_mpls(flow->dl_type)) {
+ int i;
+ int len = FLOW_MAX_MPLS_LABELS;
+
+ for (i = 0; i < len; i++) {
+ if (wc) {
+ wc->masks.mpls_lse[i] |= htonl(MPLS_BOS_MASK);
+ }
+ if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
+ return i + 1;
+ }
+ }
+
+ return len;
+ } else {
+ return 0;
+ }
+}
+
+/* Returns the number consecutive of MPLS LSEs, starting at the
+ * innermost LSE, that are common in 'a' and 'b'.
+ *
+ * 'an' must be flow_count_mpls_labels(a).
+ * 'bn' must be flow_count_mpls_labels(b).
+ */
+int
+flow_count_common_mpls_labels(const struct flow *a, int an,
+ const struct flow *b, int bn,
+ struct flow_wildcards *wc)
+{
+ int min_n = MIN(an, bn);
+ if (min_n == 0) {
+ return 0;
+ } else {
+ int common_n = 0;
+ int a_last = an - 1;
+ int b_last = bn - 1;
+ int i;
+
+ for (i = 0; i < min_n; i++) {
+ if (wc) {
+ wc->masks.mpls_lse[a_last - i] = OVS_BE32_MAX;
+ wc->masks.mpls_lse[b_last - i] = OVS_BE32_MAX;
+ }
+ if (a->mpls_lse[a_last - i] != b->mpls_lse[b_last - i]) {
+ break;
+ } else {
+ common_n++;
+ }
+ }
+
+ return common_n;
+ }
+}
+
+/* Adds a new outermost MPLS label to 'flow' and changes 'flow''s Ethernet type
+ * to 'mpls_eth_type', which must be an MPLS Ethertype.
+ *
+ * If the new label is the first MPLS label in 'flow', it is generated as;
+ *
+ * - label: 2, if 'flow' is IPv6, otherwise 0.
+ *
+ * - TTL: IPv4 or IPv6 TTL, if present and nonzero, otherwise 64.
+ *
+ * - TC: IPv4 or IPv6 TOS, if present, otherwise 0.
+ *
+ * - BoS: 1.
+ *
+ * If the new label is the second or label MPLS label in 'flow', it is
+ * generated as;
+ *
+ * - label: Copied from outer label.
+ *
+ * - TTL: Copied from outer label.
+ *
+ * - TC: Copied from outer label.
+ *
+ * - BoS: 0.
+ *
+ * 'n' must be flow_count_mpls_labels(flow). 'n' must be less than
+ * FLOW_MAX_MPLS_LABELS (because otherwise flow->mpls_lse[] would overflow).
+ */
+void
+flow_push_mpls(struct flow *flow, int n, ovs_be16 mpls_eth_type,
+ struct flow_wildcards *wc)
+{
+ ovs_assert(eth_type_mpls(mpls_eth_type));
+ ovs_assert(n < FLOW_MAX_MPLS_LABELS);
+
+ memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+ if (n) {
+ int i;
+
+ for (i = n; i >= 1; i--) {
+ flow->mpls_lse[i] = flow->mpls_lse[i - 1];
+ }
+ flow->mpls_lse[0] = (flow->mpls_lse[1]
+ & htonl(~MPLS_BOS_MASK));
+ } else {
+ int label = 0; /* IPv4 Explicit Null. */
+ int tc = 0;
+ int ttl = 64;
+
+ if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ label = 2;
+ }
+
+ if (is_ip_any(flow)) {
+ tc = (flow->nw_tos & IP_DSCP_MASK) >> 2;
+ wc->masks.nw_tos |= IP_DSCP_MASK;
+
+ if (flow->nw_ttl) {
+ ttl = flow->nw_ttl;
+ }
+ wc->masks.nw_ttl = 0xff;
+ }
+
+ flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
+
+ /* Clear all L3 and L4 fields. */
+ BUILD_ASSERT(FLOW_WC_SEQ == 25);
+ memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
+ sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
+ }
+ flow->dl_type = mpls_eth_type;
+}
+
+/* Tries to remove the outermost MPLS label from 'flow'. Returns true if
+ * successful, false otherwise. On success, sets 'flow''s Ethernet type to
+ * 'eth_type'.
+ *
+ * 'n' must be flow_count_mpls_labels(flow). */
+bool
+flow_pop_mpls(struct flow *flow, int n, ovs_be16 eth_type,
+ struct flow_wildcards *wc)
+{
+ int i;
+
+ if (n == 0) {
+ /* Nothing to pop. */
+ return false;
+ } else if (n == FLOW_MAX_MPLS_LABELS
+ && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
+ /* Can't pop because we don't know what to fill in mpls_lse[n - 1]. */
+ return false;
+ }
+
+ memset(wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+ for (i = 1; i < n; i++) {
+ flow->mpls_lse[i - 1] = flow->mpls_lse[i];
+ }
+ flow->mpls_lse[n - 1] = 0;
+ flow->dl_type = eth_type;
+ return true;
+}
+
/* Sets the MPLS Label that 'flow' matches to 'label', which is interpreted
* as an OpenFlow 1.1 "mpls_label" value. */
void
-flow_set_mpls_label(struct flow *flow, ovs_be32 label)
+flow_set_mpls_label(struct flow *flow, int idx, ovs_be32 label)
{
- set_mpls_lse_label(&flow->mpls_lse, label);
+ set_mpls_lse_label(&flow->mpls_lse[idx], label);
}
/* Sets the MPLS TTL that 'flow' matches to 'ttl', which should be in the
* range 0...255. */
void
-flow_set_mpls_ttl(struct flow *flow, uint8_t ttl)
+flow_set_mpls_ttl(struct flow *flow, int idx, uint8_t ttl)
{
- set_mpls_lse_ttl(&flow->mpls_lse, ttl);
+ set_mpls_lse_ttl(&flow->mpls_lse[idx], ttl);
}
/* Sets the MPLS TC that 'flow' matches to 'tc', which should be in the
* range 0...7. */
void
-flow_set_mpls_tc(struct flow *flow, uint8_t tc)
+flow_set_mpls_tc(struct flow *flow, int idx, uint8_t tc)
{
- set_mpls_lse_tc(&flow->mpls_lse, tc);
+ set_mpls_lse_tc(&flow->mpls_lse[idx], tc);
}
/* Sets the MPLS BOS bit that 'flow' matches to which should be 0 or 1. */
void
-flow_set_mpls_bos(struct flow *flow, uint8_t bos)
+flow_set_mpls_bos(struct flow *flow, int idx, uint8_t bos)
{
- set_mpls_lse_bos(&flow->mpls_lse, bos);
+ set_mpls_lse_bos(&flow->mpls_lse[idx], bos);
+}
+
+/* Sets the entire MPLS LSE. */
+void
+flow_set_mpls_lse(struct flow *flow, int idx, ovs_be32 lse)
+{
+ flow->mpls_lse[idx] = lse;
+}
+
+static size_t
+flow_compose_l4(struct ofpbuf *b, const struct flow *flow)
+{
+ size_t l4_len = 0;
+
+ if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
+ || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct tcp_header *tcp;
+
+ l4_len = sizeof *tcp;
+ tcp = ofpbuf_put_zeros(b, l4_len);
+ tcp->tcp_src = flow->tp_src;
+ tcp->tcp_dst = flow->tp_dst;
+ tcp->tcp_ctl = TCP_CTL(ntohs(flow->tcp_flags), 5);
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct udp_header *udp;
+
+ l4_len = sizeof *udp;
+ udp = ofpbuf_put_zeros(b, l4_len);
+ udp->udp_src = flow->tp_src;
+ udp->udp_dst = flow->tp_dst;
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ struct sctp_header *sctp;
+
+ l4_len = sizeof *sctp;
+ sctp = ofpbuf_put_zeros(b, l4_len);
+ sctp->sctp_src = flow->tp_src;
+ sctp->sctp_dst = flow->tp_dst;
+ } else if (flow->nw_proto == IPPROTO_ICMP) {
+ struct icmp_header *icmp;
+
+ l4_len = sizeof *icmp;
+ icmp = ofpbuf_put_zeros(b, l4_len);
+ icmp->icmp_type = ntohs(flow->tp_src);
+ icmp->icmp_code = ntohs(flow->tp_dst);
+ icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
+ } else if (flow->nw_proto == IPPROTO_ICMPV6) {
+ struct icmp6_hdr *icmp;
+
+ l4_len = sizeof *icmp;
+ icmp = ofpbuf_put_zeros(b, l4_len);
+ icmp->icmp6_type = ntohs(flow->tp_src);
+ icmp->icmp6_code = ntohs(flow->tp_dst);
+
+ if (icmp->icmp6_code == 0 &&
+ (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
+ icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
+ struct in6_addr *nd_target;
+ struct nd_opt_hdr *nd_opt;
+
+ l4_len += sizeof *nd_target;
+ nd_target = ofpbuf_put_zeros(b, sizeof *nd_target);
+ *nd_target = flow->nd_target;
+
+ if (!eth_addr_is_zero(flow->arp_sha)) {
+ l4_len += 8;
+ nd_opt = ofpbuf_put_zeros(b, 8);
+ nd_opt->nd_opt_len = 1;
+ nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
+ memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
+ }
+ if (!eth_addr_is_zero(flow->arp_tha)) {
+ l4_len += 8;
+ nd_opt = ofpbuf_put_zeros(b, 8);
+ nd_opt->nd_opt_len = 1;
+ nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
+ memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
+ }
+ }
+ icmp->icmp6_cksum = (OVS_FORCE uint16_t)
+ csum(icmp, (char *)ofpbuf_tail(b) - (char *)icmp);
+ }
+ }
+ return l4_len;
}
/* Puts into 'b' a packet that flow_extract() would parse as having the given
void
flow_compose(struct ofpbuf *b, const struct flow *flow)
{
+ size_t l4_len;
+
+ /* eth_compose() sets l3 pointer and makes sure it is 32-bit aligned. */
eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0);
if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) {
- struct eth_header *eth = b->l2;
- eth->eth_type = htons(b->size);
+ struct eth_header *eth = ofpbuf_l2(b);
+ eth->eth_type = htons(ofpbuf_size(b));
return;
}
if (flow->vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(b, flow->vlan_tci);
+ eth_push_vlan(b, htons(ETH_TYPE_VLAN), flow->vlan_tci);
}
if (flow->dl_type == htons(ETH_TYPE_IP)) {
struct ip_header *ip;
- b->l3 = ip = ofpbuf_put_zeros(b, sizeof *ip);
+ ip = ofpbuf_put_zeros(b, sizeof *ip);
ip->ip_ihl_ver = IP_IHL_VER(5, 4);
ip->ip_tos = flow->nw_tos;
ip->ip_ttl = flow->nw_ttl;
ip->ip_proto = flow->nw_proto;
- ip->ip_src = flow->nw_src;
- ip->ip_dst = flow->nw_dst;
+ put_16aligned_be32(&ip->ip_src, flow->nw_src);
+ put_16aligned_be32(&ip->ip_dst, flow->nw_dst);
if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
ip->ip_frag_off |= htons(IP_MORE_FRAGMENTS);
ip->ip_frag_off |= htons(100);
}
}
- if (!(flow->nw_frag & FLOW_NW_FRAG_ANY)
- || !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
- if (flow->nw_proto == IPPROTO_TCP) {
- struct tcp_header *tcp;
-
- b->l4 = tcp = ofpbuf_put_zeros(b, sizeof *tcp);
- tcp->tcp_src = flow->tp_src;
- tcp->tcp_dst = flow->tp_dst;
- tcp->tcp_ctl = TCP_CTL(0, 5);
- } else if (flow->nw_proto == IPPROTO_UDP) {
- struct udp_header *udp;
-
- b->l4 = udp = ofpbuf_put_zeros(b, sizeof *udp);
- udp->udp_src = flow->tp_src;
- udp->udp_dst = flow->tp_dst;
- } else if (flow->nw_proto == IPPROTO_ICMP) {
- struct icmp_header *icmp;
-
- b->l4 = icmp = ofpbuf_put_zeros(b, sizeof *icmp);
- icmp->icmp_type = ntohs(flow->tp_src);
- icmp->icmp_code = ntohs(flow->tp_dst);
- icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
- }
- }
- ip = b->l3;
- ip->ip_tot_len = htons((uint8_t *) b->data + b->size
- - (uint8_t *) b->l3);
+ ofpbuf_set_l4(b, ofpbuf_tail(b));
+
+ l4_len = flow_compose_l4(b, flow);
+
+ ip->ip_tot_len = htons(b->l4_ofs - b->l3_ofs + l4_len);
ip->ip_csum = csum(ip, sizeof *ip);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
- /* XXX */
+ struct ovs_16aligned_ip6_hdr *nh;
+
+ nh = ofpbuf_put_zeros(b, sizeof *nh);
+ put_16aligned_be32(&nh->ip6_flow, htonl(6 << 28) |
+ htonl(flow->nw_tos << 20) | flow->ipv6_label);
+ nh->ip6_hlim = flow->nw_ttl;
+ nh->ip6_nxt = flow->nw_proto;
+
+ memcpy(&nh->ip6_src, &flow->ipv6_src, sizeof(nh->ip6_src));
+ memcpy(&nh->ip6_dst, &flow->ipv6_dst, sizeof(nh->ip6_dst));
+
+ ofpbuf_set_l4(b, ofpbuf_tail(b));
+
+ l4_len = flow_compose_l4(b, flow);
+
+ nh->ip6_plen = htons(l4_len);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
struct arp_eth_header *arp;
- b->l3 = arp = ofpbuf_put_zeros(b, sizeof *arp);
+ arp = ofpbuf_put_zeros(b, sizeof *arp);
+ ofpbuf_set_l3(b, arp);
arp->ar_hrd = htons(1);
arp->ar_pro = htons(ETH_TYPE_IP);
arp->ar_hln = ETH_ADDR_LEN;
if (flow->nw_proto == ARP_OP_REQUEST ||
flow->nw_proto == ARP_OP_REPLY) {
- arp->ar_spa = flow->nw_src;
- arp->ar_tpa = flow->nw_dst;
+ put_16aligned_be32(&arp->ar_spa, flow->nw_src);
+ put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
}
}
if (eth_type_mpls(flow->dl_type)) {
- b->l2_5 = b->l3;
- push_mpls(b, flow->dl_type, flow->mpls_lse);
+ int n;
+
+ b->l2_5_ofs = b->l3_ofs;
+ for (n = 1; n < FLOW_MAX_MPLS_LABELS; n++) {
+ if (flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK)) {
+ break;
+ }
+ }
+ while (n > 0) {
+ push_mpls(b, flow->dl_type, flow->mpls_lse[--n]);
+ }
}
}
\f
static int
miniflow_n_values(const struct miniflow *flow)
{
- int n, i;
-
- n = 0;
- for (i = 0; i < MINI_N_MAPS; i++) {
- n += popcount(flow->map[i]);
- }
- return n;
+ return count_1bits(flow->map);
}
static uint32_t *
}
}
+/* Completes an initialization of 'dst' as a miniflow copy of 'src' begun by
+ * the caller. The caller must have already initialized 'dst->map' properly
+ * to indicate the significant uint32_t elements of 'src'. 'n' must be the
+ * number of 1-bits in 'dst->map'.
+ *
+ * Normally the significant elements are the ones that are non-zero. However,
+ * when a miniflow is initialized from a (mini)mask, the values can be zeroes,
+ * so that the flow and mask always have the same maps.
+ *
+ * This function initializes 'dst->values' (either inline if possible or with
+ * malloc() otherwise) and copies the uint32_t elements of 'src' indicated by
+ * 'dst->map' into it. */
+static void
+miniflow_init__(struct miniflow *dst, const struct flow *src, int n)
+{
+ const uint32_t *src_u32 = (const uint32_t *) src;
+ unsigned int ofs;
+ uint64_t map;
+
+ dst->values = miniflow_alloc_values(dst, n);
+ ofs = 0;
+ for (map = dst->map; map; map = zero_rightmost_1bit(map)) {
+ dst->values[ofs++] = src_u32[raw_ctz(map)];
+ }
+}
+
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
* with miniflow_destroy(). */
void
miniflow_init(struct miniflow *dst, const struct flow *src)
{
const uint32_t *src_u32 = (const uint32_t *) src;
- unsigned int ofs;
unsigned int i;
int n;
/* Initialize dst->map, counting the number of nonzero elements. */
n = 0;
- memset(dst->map, 0, sizeof dst->map);
+ dst->map = 0;
+
for (i = 0; i < FLOW_U32S; i++) {
if (src_u32[i]) {
- dst->map[i / 32] |= 1u << (i % 32);
+ dst->map |= UINT64_C(1) << i;
n++;
}
}
- /* Initialize dst->values. */
- dst->values = miniflow_alloc_values(dst, n);
- ofs = 0;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
+ miniflow_init__(dst, src, n);
+}
- for (map = dst->map[i]; map; map = zero_rightmost_1bit(map)) {
- dst->values[ofs++] = src_u32[raw_ctz(map) + i * 32];
- }
- }
+/* Initializes 'dst' as a copy of 'src', using 'mask->map' as 'dst''s map. The
+ * caller must eventually free 'dst' with miniflow_destroy(). */
+void
+miniflow_init_with_minimask(struct miniflow *dst, const struct flow *src,
+ const struct minimask *mask)
+{
+ dst->map = mask->masks.map;
+ miniflow_init__(dst, src, miniflow_n_values(dst));
}
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
miniflow_clone(struct miniflow *dst, const struct miniflow *src)
{
int n = miniflow_n_values(src);
- memcpy(dst->map, src->map, sizeof dst->map);
+ dst->map = src->map;
dst->values = miniflow_alloc_values(dst, n);
memcpy(dst->values, src->values, n * sizeof *dst->values);
}
+/* Initializes 'dst' with the data in 'src', destroying 'src'.
+ * The caller must eventually free 'dst' with miniflow_destroy(). */
+void
+miniflow_move(struct miniflow *dst, struct miniflow *src)
+{
+ if (src->values == src->inline_values) {
+ dst->values = dst->inline_values;
+ memcpy(dst->values, src->values,
+ miniflow_n_values(src) * sizeof *dst->values);
+ } else {
+ dst->values = src->values;
+ }
+ dst->map = src->map;
+}
+
/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
* itself resides; the caller is responsible for that. */
void
static const uint32_t *
miniflow_get__(const struct miniflow *flow, unsigned int u32_ofs)
{
- if (!(flow->map[u32_ofs / 32] & (1u << (u32_ofs % 32)))) {
+ if (!(flow->map & (UINT64_C(1) << u32_ofs))) {
static const uint32_t zero = 0;
return &zero;
- } else {
- const uint32_t *p = flow->values;
-
- BUILD_ASSERT(MINI_N_MAPS == 2);
- if (u32_ofs < 32) {
- p += popcount(flow->map[0] & ((1u << u32_ofs) - 1));
- } else {
- p += popcount(flow->map[0]);
- p += popcount(flow->map[1] & ((1u << (u32_ofs - 32)) - 1));
- }
- return p;
}
+ return flow->values +
+ count_1bits(flow->map & ((UINT64_C(1) << u32_ofs) - 1));
}
/* Returns the uint32_t that would be at byte offset '4 * u32_ofs' if 'flow'
bool
miniflow_equal(const struct miniflow *a, const struct miniflow *b)
{
- int i;
+ const uint32_t *ap = a->values;
+ const uint32_t *bp = b->values;
+ const uint64_t a_map = a->map;
+ const uint64_t b_map = b->map;
+ uint64_t map;
- for (i = 0; i < MINI_N_MAPS; i++) {
- if (a->map[i] != b->map[i]) {
- return false;
+ if (a_map == b_map) {
+ for (map = a_map; map; map = zero_rightmost_1bit(map)) {
+ if (*ap++ != *bp++) {
+ return false;
+ }
+ }
+ } else {
+ for (map = a_map | b_map; map; map = zero_rightmost_1bit(map)) {
+ uint64_t bit = rightmost_1bit(map);
+ uint64_t a_value = a_map & bit ? *ap++ : 0;
+ uint64_t b_value = b_map & bit ? *bp++ : 0;
+
+ if (a_value != b_value) {
+ return false;
+ }
}
}
- return !memcmp(a->values, b->values,
- miniflow_n_values(a) * sizeof *a->values);
+ return true;
}
/* Returns true if 'a' and 'b' are equal at the places where there are 1-bits
const struct minimask *mask)
{
const uint32_t *p;
- int i;
+ uint64_t map;
p = mask->masks.values;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
- for (map = mask->masks.map[i]; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
+ for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
+ int ofs = raw_ctz(map);
- if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
- return false;
- }
- p++;
+ if ((miniflow_get(a, ofs) ^ miniflow_get(b, ofs)) & *p) {
+ return false;
}
+ p++;
}
return true;
{
const uint32_t *b_u32 = (const uint32_t *) b;
const uint32_t *p;
- int i;
+ uint64_t map;
p = mask->masks.values;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
- for (map = mask->masks.map[i]; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
+ for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
+ int ofs = raw_ctz(map);
- if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
- return false;
- }
- p++;
+ if ((miniflow_get(a, ofs) ^ b_u32[ofs]) & *p) {
+ return false;
}
+ p++;
}
return true;
uint32_t
miniflow_hash(const struct miniflow *flow, uint32_t basis)
{
- BUILD_ASSERT_DECL(MINI_N_MAPS == 2);
- return hash_3words(flow->map[0], flow->map[1],
- hash_words(flow->values, miniflow_n_values(flow),
- basis));
+ const uint32_t *p = flow->values;
+ uint32_t hash = basis;
+ uint64_t hash_map = 0;
+ uint64_t map;
+
+ for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
+ if (*p) {
+ hash = mhash_add(hash, *p);
+ hash_map |= rightmost_1bit(map);
+ }
+ p++;
+ }
+ hash = mhash_add(hash, hash_map);
+ hash = mhash_add(hash, hash_map >> 32);
+
+ return mhash_finish(hash, p - flow->values);
}
/* Returns a hash value for the bits of 'flow' where there are 1-bits in
{
const uint32_t *p = mask->masks.values;
uint32_t hash;
- int i;
+ uint64_t map;
hash = basis;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
- for (map = mask->masks.map[i]; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
-
- hash = mhash_add(hash, miniflow_get(flow, ofs) & *p);
- p++;
- }
+ for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
+ hash = mhash_add(hash, miniflow_get(flow, raw_ctz(map)) & *p++);
}
return mhash_finish(hash, (p - mask->masks.values) * 4);
flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
uint32_t basis)
{
- const uint32_t *flow_u32 = (const uint32_t *) flow;
+ const uint32_t *flow_u32 = (const uint32_t *)flow;
const uint32_t *p = mask->masks.values;
uint32_t hash;
- int i;
+ uint64_t map;
hash = basis;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
+ for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
+ hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
+ }
- for (map = mask->masks.map[i]; map; map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
+ return mhash_finish(hash, (p - mask->masks.values) * 4);
+}
- hash = mhash_add(hash, flow_u32[ofs] & *p);
- p++;
- }
+/* Returns a hash value for the bits of range [start, end) in 'flow',
+ * where there are 1-bits in 'mask', given 'hash'.
+ *
+ * The hash values returned by this function are the same as those returned by
+ * minimatch_hash_range(), only the form of the arguments differ. */
+uint32_t
+flow_hash_in_minimask_range(const struct flow *flow,
+ const struct minimask *mask,
+ uint8_t start, uint8_t end, uint32_t *basis)
+{
+ const uint32_t *flow_u32 = (const uint32_t *)flow;
+ unsigned int offset;
+ uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
+ &offset);
+ const uint32_t *p = mask->masks.values + offset;
+ uint32_t hash = *basis;
+
+ for (; map; map = zero_rightmost_1bit(map)) {
+ hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
}
+ *basis = hash; /* Allow continuation from the unfinished value. */
return mhash_finish(hash, (p - mask->masks.values) * 4);
}
+
\f
/* Initializes 'dst' as a copy of 'src'. The caller must eventually free 'dst'
* with minimask_destroy(). */
miniflow_clone(&dst->masks, &src->masks);
}
+/* Initializes 'dst' with the data in 'src', destroying 'src'.
+ * The caller must eventually free 'dst' with minimask_destroy(). */
+void
+minimask_move(struct minimask *dst, struct minimask *src)
+{
+ miniflow_move(&dst->masks, &src->masks);
+}
+
/* Initializes 'dst_' as the bit-wise "and" of 'a_' and 'b_'.
*
* The caller must provide room for FLOW_U32S "uint32_t"s in 'storage', for use
struct miniflow *dst = &dst_->masks;
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- int i, n;
+ uint64_t map;
+ int n = 0;
- n = 0;
dst->values = storage;
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
-
- dst->map[i] = 0;
- for (map = a->map[i] & b->map[i]; map;
- map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
- uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
-
- if (mask) {
- dst->map[i] |= rightmost_1bit(map);
- dst->values[n++] = mask;
- }
+
+ dst->map = 0;
+ for (map = a->map & b->map; map; map = zero_rightmost_1bit(map)) {
+ int ofs = raw_ctz(map);
+ uint32_t mask = miniflow_get(a, ofs) & miniflow_get(b, ofs);
+
+ if (mask) {
+ dst->map |= rightmost_1bit(map);
+ dst->values[n++] = mask;
}
}
}
{
const struct miniflow *a = &a_->masks;
const struct miniflow *b = &b_->masks;
- int i;
-
- for (i = 0; i < MINI_N_MAPS; i++) {
- uint32_t map;
+ uint64_t map;
- for (map = a->map[i] | b->map[i]; map;
- map = zero_rightmost_1bit(map)) {
- int ofs = raw_ctz(map) + i * 32;
- uint32_t a_u32 = miniflow_get(a, ofs);
- uint32_t b_u32 = miniflow_get(b, ofs);
+ for (map = a->map | b->map; map; map = zero_rightmost_1bit(map)) {
+ int ofs = raw_ctz(map);
+ uint32_t a_u32 = miniflow_get(a, ofs);
+ uint32_t b_u32 = miniflow_get(b, ofs);
- if ((a_u32 & b_u32) != b_u32) {
- return true;
- }
+ if ((a_u32 & b_u32) != b_u32) {
+ return true;
}
}
minimask_is_catchall(const struct minimask *mask_)
{
const struct miniflow *mask = &mask_->masks;
+ const uint32_t *p = mask->values;
+ uint64_t map;
- BUILD_ASSERT(MINI_N_MAPS == 2);
- return !(mask->map[0] | mask->map[1]);
+ for (map = mask->map; map; map = zero_rightmost_1bit(map)) {
+ if (*p++) {
+ return false;
+ }
+ }
+ return true;
}