X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fflow.c;h=6b78ae36f2d753b0b7283ed44b3dfc67185ac842;hb=f613a0d72c521ca3a4eeb2c29ac523f6fdf72667;hp=38dee7b061f9f2aa36fbb1fa3675c0628af5eea1;hpb=36956a7d33c9ee204fcb184484a5aaacbd9ecef8;p=sliver-openvswitch.git diff --git a/lib/flow.c b/lib/flow.c index 38dee7b06..6b78ae36f 100644 --- a/lib/flow.c +++ b/lib/flow.c @@ -16,15 +16,19 @@ #include #include #include "flow.h" +#include +#include #include #include +#include +#include #include #include #include "byte-order.h" #include "coverage.h" +#include "dpif.h" #include "dynamic-string.h" #include "hash.h" -#include "ofp-util.h" #include "ofpbuf.h" #include "openflow/openflow.h" #include "openvswitch/datapath-protocol.h" @@ -80,6 +84,12 @@ pull_icmp(struct ofpbuf *packet) return ofpbuf_try_pull(packet, ICMP_HEADER_LEN); } +static struct icmp6_hdr * +pull_icmpv6(struct ofpbuf *packet) +{ + return ofpbuf_try_pull(packet, sizeof(struct icmp6_hdr)); +} + static void parse_vlan(struct ofpbuf *b, struct flow *flow) { @@ -122,7 +132,182 @@ parse_ethertype(struct ofpbuf *b) return llc->snap.snap_type; } -/* Initializes 'flow' members from 'packet', 'tun_id', and 'in_port. +static int +parse_ipv6(struct ofpbuf *packet, struct flow *flow) +{ + const struct ip6_hdr *nh; + ovs_be32 tc_flow; + int nexthdr; + + nh = ofpbuf_try_pull(packet, sizeof *nh); + if (!nh) { + return EINVAL; + } + + nexthdr = nh->ip6_nxt; + + flow->ipv6_src = nh->ip6_src; + flow->ipv6_dst = nh->ip6_dst; + + tc_flow = get_unaligned_be32(&nh->ip6_flow); + flow->nw_tos = (ntohl(tc_flow) >> 4) & IP_DSCP_MASK; + flow->nw_proto = IPPROTO_NONE; + + while (1) { + if ((nexthdr != IPPROTO_HOPOPTS) + && (nexthdr != IPPROTO_ROUTING) + && (nexthdr != IPPROTO_DSTOPTS) + && (nexthdr != IPPROTO_AH) + && (nexthdr != IPPROTO_FRAGMENT)) { + /* It's either a terminal header (e.g., TCP, UDP) or one we + * don't understand. In either case, we're done with the + * packet, so use it to fill in 'nw_proto'. */ + break; + } + + /* We only verify that at least 8 bytes of the next header are + * available, but many of these headers are longer. Ensure that + * accesses within the extension header are within those first 8 + * bytes. All extension headers are required to be at least 8 + * bytes. */ + if (packet->size < 8) { + return EINVAL; + } + + if ((nexthdr == IPPROTO_HOPOPTS) + || (nexthdr == IPPROTO_ROUTING) + || (nexthdr == IPPROTO_DSTOPTS)) { + /* These headers, while different, have the fields we care about + * in the same location and with the same interpretation. */ + const struct ip6_ext *ext_hdr = (struct ip6_ext *)packet->data; + nexthdr = ext_hdr->ip6e_nxt; + if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 1) * 8)) { + return EINVAL; + } + } else if (nexthdr == IPPROTO_AH) { + /* A standard AH definition isn't available, but the fields + * we care about are in the same location as the generic + * option header--only the header length is calculated + * differently. */ + const struct ip6_ext *ext_hdr = (struct ip6_ext *)packet->data; + nexthdr = ext_hdr->ip6e_nxt; + if (!ofpbuf_try_pull(packet, (ext_hdr->ip6e_len + 2) * 4)) { + return EINVAL; + } + } else if (nexthdr == IPPROTO_FRAGMENT) { + const struct ip6_frag *frag_hdr = (struct ip6_frag *)packet->data; + + nexthdr = frag_hdr->ip6f_nxt; + if (!ofpbuf_try_pull(packet, sizeof *frag_hdr)) { + return EINVAL; + } + + /* We only process the first fragment. */ + if ((frag_hdr->ip6f_offlg & IP6F_OFF_MASK) != htons(0)) { + nexthdr = IPPROTO_FRAGMENT; + break; + } + } + } + + flow->nw_proto = nexthdr; + return 0; +} + +static void +parse_tcp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow) +{ + const struct tcp_header *tcp = pull_tcp(b); + if (tcp) { + flow->tp_src = tcp->tcp_src; + flow->tp_dst = tcp->tcp_dst; + packet->l7 = b->data; + } +} + +static void +parse_udp(struct ofpbuf *packet, struct ofpbuf *b, struct flow *flow) +{ + const struct udp_header *udp = pull_udp(b); + if (udp) { + flow->tp_src = udp->udp_src; + flow->tp_dst = udp->udp_dst; + packet->l7 = b->data; + } +} + +static bool +parse_icmpv6(struct ofpbuf *b, struct flow *flow) +{ + const struct icmp6_hdr *icmp = pull_icmpv6(b); + + if (!icmp) { + return false; + } + + /* The ICMPv6 type and code fields use the 16-bit transport port + * fields, so we need to store them in 16-bit network byte order. */ + flow->icmp_type = htons(icmp->icmp6_type); + flow->icmp_code = htons(icmp->icmp6_code); + + if (icmp->icmp6_code == 0 && + (icmp->icmp6_type == ND_NEIGHBOR_SOLICIT || + icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) { + const struct in6_addr *nd_target; + + nd_target = ofpbuf_try_pull(b, sizeof *nd_target); + if (!nd_target) { + return false; + } + flow->nd_target = *nd_target; + + while (b->size >= 8) { + /* The minimum size of an option is 8 bytes, which also is + * the size of Ethernet link-layer options. */ + const struct nd_opt_hdr *nd_opt = b->data; + int opt_len = nd_opt->nd_opt_len * 8; + + if (!opt_len || opt_len > b->size) { + goto invalid; + } + + /* Store the link layer address if the appropriate option is + * provided. It is considered an error if the same link + * layer option is specified twice. */ + if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR + && opt_len == 8) { + if (eth_addr_is_zero(flow->arp_sha)) { + memcpy(flow->arp_sha, nd_opt + 1, ETH_ADDR_LEN); + } else { + goto invalid; + } + } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR + && opt_len == 8) { + if (eth_addr_is_zero(flow->arp_tha)) { + memcpy(flow->arp_tha, nd_opt + 1, ETH_ADDR_LEN); + } else { + goto invalid; + } + } + + if (!ofpbuf_try_pull(b, opt_len)) { + goto invalid; + } + } + } + + return true; + +invalid: + memset(&flow->nd_target, 0, sizeof(flow->nd_target)); + memset(flow->arp_sha, 0, sizeof(flow->arp_sha)); + memset(flow->arp_tha, 0, sizeof(flow->arp_tha)); + + return false; + +} + +/* Initializes 'flow' members from 'packet', 'tun_id', and 'ofp_in_port'. * Initializes 'packet' header pointers as follows: * * - packet->l2 to the start of the Ethernet header. @@ -138,7 +323,7 @@ parse_ethertype(struct ofpbuf *b) * present and has a correct length, and otherwise NULL. */ int -flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, +flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t ofp_in_port, struct flow *flow) { struct ofpbuf b = *packet; @@ -149,7 +334,7 @@ flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, memset(flow, 0, sizeof *flow); flow->tun_id = tun_id; - flow->in_port = in_port; + flow->in_port = ofp_in_port; packet->l2 = b.data; packet->l3 = NULL; @@ -183,21 +368,11 @@ flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, flow->nw_proto = nh->ip_proto; packet->l4 = b.data; if (!IP_IS_FRAGMENT(nh->ip_frag_off)) { - if (flow->nw_proto == IP_TYPE_TCP) { - const struct tcp_header *tcp = pull_tcp(&b); - if (tcp) { - flow->tp_src = tcp->tcp_src; - flow->tp_dst = tcp->tcp_dst; - packet->l7 = b.data; - } - } else if (flow->nw_proto == IP_TYPE_UDP) { - const struct udp_header *udp = pull_udp(&b); - if (udp) { - flow->tp_src = udp->udp_src; - flow->tp_dst = udp->udp_dst; - packet->l7 = b.data; - } - } else if (flow->nw_proto == IP_TYPE_ICMP) { + if (flow->nw_proto == IPPROTO_TCP) { + parse_tcp(packet, &b, flow); + } else if (flow->nw_proto == IPPROTO_UDP) { + parse_udp(packet, &b, flow); + } else if (flow->nw_proto == IPPROTO_ICMP) { const struct icmp_header *icmp = pull_icmp(&b); if (icmp) { flow->icmp_type = htons(icmp->icmp_type); @@ -209,6 +384,23 @@ flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, retval = 1; } } + } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) { + + retval = parse_ipv6(&b, flow); + if (retval) { + return 0; + } + + packet->l4 = b.data; + if (flow->nw_proto == IPPROTO_TCP) { + parse_tcp(packet, &b, flow); + } else if (flow->nw_proto == IPPROTO_UDP) { + parse_udp(packet, &b, flow); + } else if (flow->nw_proto == IPPROTO_ICMPV6) { + if (parse_icmpv6(&b, flow)) { + packet->l7 = b.data; + } + } } else if (flow->dl_type == htons(ETH_TYPE_ARP)) { const struct arp_eth_header *arp = pull_arp(&b); if (arp && arp->ar_hrd == htons(1) @@ -224,9 +416,12 @@ flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, || (flow->nw_proto == ARP_OP_REPLY)) { flow->nw_src = arp->ar_spa; flow->nw_dst = arp->ar_tpa; + memcpy(flow->arp_sha, arp->ar_sha, ETH_ADDR_LEN); + memcpy(flow->arp_tha, arp->ar_tha, ETH_ADDR_LEN); } } } + return retval; } @@ -235,12 +430,12 @@ flow_extract(struct ofpbuf *packet, ovs_be64 tun_id, uint16_t in_port, */ void flow_extract_stats(const struct flow *flow, struct ofpbuf *packet, - struct odp_flow_stats *stats) + struct dpif_flow_stats *stats) { - memset(stats, '\0', sizeof(*stats)); + memset(stats, 0, sizeof(*stats)); if ((flow->dl_type == htons(ETH_TYPE_IP)) && packet->l4) { - if ((flow->nw_proto == IP_TYPE_TCP) && packet->l7) { + if ((flow->nw_proto == IPPROTO_TCP) && packet->l7) { struct tcp_header *tcp = packet->l4; stats->tcp_flags = TCP_FLAGS(tcp->tcp_ctl); } @@ -250,6 +445,66 @@ flow_extract_stats(const struct flow *flow, struct ofpbuf *packet, stats->n_packets = 1; } +/* For every bit of a field that is wildcarded in 'wildcards', sets the + * corresponding bit in 'flow' to zero. */ +void +flow_zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards) +{ + const flow_wildcards_t wc = wildcards->wildcards; + int i; + + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); + + for (i = 0; i < FLOW_N_REGS; i++) { + flow->regs[i] &= wildcards->reg_masks[i]; + } + flow->tun_id &= wildcards->tun_id_mask; + flow->nw_src &= wildcards->nw_src_mask; + flow->nw_dst &= wildcards->nw_dst_mask; + if (wc & FWW_IN_PORT) { + flow->in_port = 0; + } + flow->vlan_tci &= wildcards->vlan_tci_mask; + if (wc & FWW_DL_TYPE) { + flow->dl_type = 0; + } + if (wc & FWW_TP_SRC) { + flow->tp_src = 0; + } + if (wc & FWW_TP_DST) { + flow->tp_dst = 0; + } + if (wc & FWW_DL_SRC) { + memset(flow->dl_src, 0, sizeof flow->dl_src); + } + if (wc & FWW_DL_DST) { + flow->dl_dst[0] &= 0x01; + memset(&flow->dl_dst[1], 0, 5); + } + if (wc & FWW_ETH_MCAST) { + flow->dl_dst[0] &= 0xfe; + } + if (wc & FWW_NW_PROTO) { + flow->nw_proto = 0; + } + if (wc & FWW_NW_TOS) { + flow->nw_tos = 0; + } + if (wc & FWW_ARP_SHA) { + memset(flow->arp_sha, 0, sizeof flow->arp_sha); + } + if (wc & FWW_ARP_THA) { + memset(flow->arp_tha, 0, sizeof flow->arp_tha); + } + flow->ipv6_src = ipv6_addr_bitand(&flow->ipv6_src, + &wildcards->ipv6_src_mask); + flow->ipv6_dst = ipv6_addr_bitand(&flow->ipv6_dst, + &wildcards->ipv6_dst_mask); + if (wc & FWW_ND_TARGET) { + memset(&flow->nd_target, 0, sizeof flow->nd_target); + } +} + char * flow_to_string(const struct flow *flow) { @@ -262,7 +517,7 @@ void flow_format(struct ds *ds, const struct flow *flow) { ds_put_format(ds, "tunnel%#"PRIx64":in_port%04"PRIx16":tci(", - flow->tun_id, flow->in_port); + ntohll(flow->tun_id), flow->in_port); if (flow->vlan_tci) { ds_put_format(ds, "vlan%"PRIu16",pcp%d", vlan_tci_to_vid(flow->vlan_tci), @@ -271,20 +526,36 @@ flow_format(struct ds *ds, const struct flow *flow) ds_put_char(ds, '0'); } ds_put_format(ds, ") mac"ETH_ADDR_FMT"->"ETH_ADDR_FMT - " type%04"PRIx16 - " proto%"PRIu8 - " tos%"PRIu8 - " ip"IP_FMT"->"IP_FMT - " port%"PRIu16"->%"PRIu16, + " type%04"PRIx16, ETH_ADDR_ARGS(flow->dl_src), ETH_ADDR_ARGS(flow->dl_dst), - ntohs(flow->dl_type), - flow->nw_proto, - flow->nw_tos, - IP_ARGS(&flow->nw_src), - IP_ARGS(&flow->nw_dst), - ntohs(flow->tp_src), - ntohs(flow->tp_dst)); + ntohs(flow->dl_type)); + + if (flow->dl_type == htons(ETH_TYPE_IPV6)) { + ds_put_format(ds, " proto%"PRIu8" tos%"PRIu8" ipv6", + flow->nw_proto, flow->nw_tos); + print_ipv6_addr(ds, &flow->ipv6_src); + ds_put_cstr(ds, "->"); + print_ipv6_addr(ds, &flow->ipv6_dst); + + } else { + ds_put_format(ds, " proto%"PRIu8 + " tos%"PRIu8 + " ip"IP_FMT"->"IP_FMT, + flow->nw_proto, + flow->nw_tos, + IP_ARGS(&flow->nw_src), + IP_ARGS(&flow->nw_dst)); + } + if (flow->tp_src || flow->tp_dst) { + ds_put_format(ds, " port%"PRIu16"->%"PRIu16, + ntohs(flow->tp_src), ntohs(flow->tp_dst)); + } + if (!eth_addr_is_zero(flow->arp_sha) || !eth_addr_is_zero(flow->arp_tha)) { + ds_put_format(ds, " arp_ha"ETH_ADDR_FMT"->"ETH_ADDR_FMT, + ETH_ADDR_ARGS(flow->arp_sha), + ETH_ADDR_ARGS(flow->arp_tha)); + } } void @@ -305,6 +576,8 @@ flow_wildcards_init_catchall(struct flow_wildcards *wc) wc->tun_id_mask = htonll(0); wc->nw_src_mask = htonl(0); wc->nw_dst_mask = htonl(0); + wc->ipv6_src_mask = in6addr_any; + wc->ipv6_dst_mask = in6addr_any; memset(wc->reg_masks, 0, sizeof wc->reg_masks); wc->vlan_tci_mask = htons(0); wc->zero = 0; @@ -319,6 +592,8 @@ flow_wildcards_init_exact(struct flow_wildcards *wc) wc->tun_id_mask = htonll(UINT64_MAX); wc->nw_src_mask = htonl(UINT32_MAX); wc->nw_dst_mask = htonl(UINT32_MAX); + wc->ipv6_src_mask = in6addr_exact; + wc->ipv6_dst_mask = in6addr_exact; memset(wc->reg_masks, 0xff, sizeof wc->reg_masks); wc->vlan_tci_mask = htons(UINT16_MAX); wc->zero = 0; @@ -331,16 +606,48 @@ flow_wildcards_is_exact(const struct flow_wildcards *wc) { int i; + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); + if (wc->wildcards || wc->tun_id_mask != htonll(UINT64_MAX) || wc->nw_src_mask != htonl(UINT32_MAX) || wc->nw_dst_mask != htonl(UINT32_MAX) - || wc->vlan_tci_mask != htons(UINT16_MAX)) { + || wc->vlan_tci_mask != htons(UINT16_MAX) + || !ipv6_mask_is_exact(&wc->ipv6_src_mask) + || !ipv6_mask_is_exact(&wc->ipv6_dst_mask)) { return false; } for (i = 0; i < FLOW_N_REGS; i++) { - if (wc->reg_masks[i] != htonl(UINT32_MAX)) { + if (wc->reg_masks[i] != UINT32_MAX) { + return false; + } + } + + return true; +} + +/* Returns true if 'wc' matches every packet, false if 'wc' fixes any bits or + * fields. */ +bool +flow_wildcards_is_catchall(const struct flow_wildcards *wc) +{ + int i; + + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 1); + + if (wc->wildcards != FWW_ALL + || wc->tun_id_mask != htonll(0) + || wc->nw_src_mask != htonl(0) + || wc->nw_dst_mask != htonl(0) + || wc->vlan_tci_mask != htons(0) + || !ipv6_mask_is_any(&wc->ipv6_src_mask) + || !ipv6_mask_is_any(&wc->ipv6_dst_mask)) { + return false; + } + + for (i = 0; i < FLOW_N_REGS; i++) { + if (wc->reg_masks[i] != 0) { return false; } } @@ -362,6 +669,10 @@ flow_wildcards_combine(struct flow_wildcards *dst, dst->tun_id_mask = src1->tun_id_mask & src2->tun_id_mask; dst->nw_src_mask = src1->nw_src_mask & src2->nw_src_mask; dst->nw_dst_mask = src1->nw_dst_mask & src2->nw_dst_mask; + dst->ipv6_src_mask = ipv6_addr_bitand(&src1->ipv6_src_mask, + &src2->ipv6_src_mask); + dst->ipv6_dst_mask = ipv6_addr_bitand(&src1->ipv6_dst_mask, + &src2->ipv6_dst_mask); for (i = 0; i < FLOW_N_REGS; i++) { dst->reg_masks[i] = src1->reg_masks[i] & src2->reg_masks[i]; } @@ -370,13 +681,13 @@ flow_wildcards_combine(struct flow_wildcards *dst, /* Returns a hash of the wildcards in 'wc'. */ uint32_t -flow_wildcards_hash(const struct flow_wildcards *wc) +flow_wildcards_hash(const struct flow_wildcards *wc, uint32_t basis) { /* If you change struct flow_wildcards and thereby trigger this * assertion, please check that the new struct flow_wildcards has no holes * in it before you update the assertion. */ - BUILD_ASSERT_DECL(sizeof *wc == 24 + FLOW_N_REGS * 4); - return hash_bytes(wc, sizeof *wc, 0); + BUILD_ASSERT_DECL(sizeof *wc == 56 + FLOW_N_REGS * 4); + return hash_bytes(wc, sizeof *wc, basis); } /* Returns true if 'a' and 'b' represent the same wildcards, false if they are @@ -391,7 +702,9 @@ flow_wildcards_equal(const struct flow_wildcards *a, || a->tun_id_mask != b->tun_id_mask || a->nw_src_mask != b->nw_src_mask || a->nw_dst_mask != b->nw_dst_mask - || a->vlan_tci_mask != b->vlan_tci_mask) { + || a->vlan_tci_mask != b->vlan_tci_mask + || !ipv6_addr_equals(&a->ipv6_src_mask, &b->ipv6_src_mask) + || !ipv6_addr_equals(&a->ipv6_dst_mask, &b->ipv6_dst_mask)) { return false; } @@ -411,6 +724,7 @@ flow_wildcards_has_extra(const struct flow_wildcards *a, const struct flow_wildcards *b) { int i; + struct in6_addr ipv6_masked; for (i = 0; i < FLOW_N_REGS; i++) { if ((a->reg_masks[i] & b->reg_masks[i]) != b->reg_masks[i]) { @@ -418,6 +732,16 @@ flow_wildcards_has_extra(const struct flow_wildcards *a, } } + ipv6_masked = ipv6_addr_bitand(&a->ipv6_src_mask, &b->ipv6_src_mask); + if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_src_mask)) { + return true; + } + + ipv6_masked = ipv6_addr_bitand(&a->ipv6_dst_mask, &b->ipv6_dst_mask); + if (!ipv6_addr_equals(&ipv6_masked, &b->ipv6_dst_mask)) { + return true; + } + return (a->wildcards & ~b->wildcards || (a->tun_id_mask & b->tun_id_mask) != b->tun_id_mask || (a->nw_src_mask & b->nw_src_mask) != b->nw_src_mask @@ -454,6 +778,37 @@ flow_wildcards_set_nw_dst_mask(struct flow_wildcards *wc, ovs_be32 mask) return set_nw_mask(&wc->nw_dst_mask, mask); } +static bool +set_ipv6_mask(struct in6_addr *maskp, const struct in6_addr *mask) +{ + if (ipv6_is_cidr(mask)) { + *maskp = *mask; + return true; + } else { + return false; + } +} + +/* Sets the IPv6 source wildcard mask to CIDR 'mask' (consisting of N + * high-order 1-bit and 128-N low-order 0-bits). Returns true if successful, + * false if 'mask' is not a CIDR mask. */ +bool +flow_wildcards_set_ipv6_src_mask(struct flow_wildcards *wc, + const struct in6_addr *mask) +{ + return set_ipv6_mask(&wc->ipv6_src_mask, mask); +} + +/* Sets the IPv6 destination wildcard mask to CIDR 'mask' (consisting of + * N high-order 1-bit and 128-N low-order 0-bits). Returns true if + * successful, false if 'mask' is not a CIDR mask. */ +bool +flow_wildcards_set_ipv6_dst_mask(struct flow_wildcards *wc, + const struct in6_addr *mask) +{ + return set_ipv6_mask(&wc->ipv6_dst_mask, mask); +} + /* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'. * (A 0-bit indicates a wildcard bit.) */ void @@ -461,3 +816,236 @@ flow_wildcards_set_reg_mask(struct flow_wildcards *wc, int idx, uint32_t mask) { wc->reg_masks[idx] = mask; } + +/* Returns the wildcard bitmask for the Ethernet destination address + * that 'wc' specifies. The bitmask has a 0 in each bit that is wildcarded + * and a 1 in each bit that must match. */ +const uint8_t * +flow_wildcards_to_dl_dst_mask(flow_wildcards_t wc) +{ + static const uint8_t no_wild[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + static const uint8_t addr_wild[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + static const uint8_t mcast_wild[] = {0xfe, 0xff, 0xff, 0xff, 0xff, 0xff}; + static const uint8_t all_wild[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + + switch (wc & (FWW_DL_DST | FWW_ETH_MCAST)) { + case 0: return no_wild; + case FWW_DL_DST: return addr_wild; + case FWW_ETH_MCAST: return mcast_wild; + case FWW_DL_DST | FWW_ETH_MCAST: return all_wild; + } + NOT_REACHED(); +} + +/* Returns true if 'mask' is a valid wildcard bitmask for the Ethernet + * destination address. Valid bitmasks are either all-bits-0 or all-bits-1, + * except that the multicast bit may differ from the rest of the bits. So, + * there are four possible valid bitmasks: + * + * - 00:00:00:00:00:00 + * - 01:00:00:00:00:00 + * - fe:ff:ff:ff:ff:ff + * - ff:ff:ff:ff:ff:ff + * + * All other bitmasks are invalid. */ +bool +flow_wildcards_is_dl_dst_mask_valid(const uint8_t mask[ETH_ADDR_LEN]) +{ + switch (mask[0]) { + case 0x00: + case 0x01: + return (mask[1] | mask[2] | mask[3] | mask[4] | mask[5]) == 0x00; + + case 0xfe: + case 0xff: + return (mask[1] & mask[2] & mask[3] & mask[4] & mask[5]) == 0xff; + + default: + return false; + } +} + +/* Returns 'wc' with the FWW_DL_DST and FWW_ETH_MCAST bits modified + * appropriately to match 'mask'. + * + * This function will assert-fail if 'mask' is invalid. Only 'mask' values + * accepted by flow_wildcards_is_dl_dst_mask_valid() are allowed. */ +flow_wildcards_t +flow_wildcards_set_dl_dst_mask(flow_wildcards_t wc, + const uint8_t mask[ETH_ADDR_LEN]) +{ + assert(flow_wildcards_is_dl_dst_mask_valid(mask)); + + switch (mask[0]) { + case 0x00: + return wc | FWW_DL_DST | FWW_ETH_MCAST; + + case 0x01: + return (wc | FWW_DL_DST) & ~FWW_ETH_MCAST; + + case 0xfe: + return (wc & ~FWW_DL_DST) | FWW_ETH_MCAST; + + case 0xff: + return wc & ~(FWW_DL_DST | FWW_ETH_MCAST); + + default: + NOT_REACHED(); + } +} + +/* Hashes 'flow' based on its L2 through L4 protocol information. */ +uint32_t +flow_hash_symmetric_l4(const struct flow *flow, uint32_t basis) +{ + struct { + union { + ovs_be32 ipv4_addr; + struct in6_addr ipv6_addr; + }; + ovs_be16 eth_type; + ovs_be16 vlan_tci; + ovs_be16 tp_addr; + uint8_t eth_addr[ETH_ADDR_LEN]; + uint8_t ip_proto; + } fields; + + int i; + + memset(&fields, 0, sizeof fields); + for (i = 0; i < ETH_ADDR_LEN; i++) { + fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i]; + } + fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK); + fields.eth_type = flow->dl_type; + + /* UDP source and destination port are not taken into account because they + * will not necessarily be symmetric in a bidirectional flow. */ + if (fields.eth_type == htons(ETH_TYPE_IP)) { + fields.ipv4_addr = flow->nw_src ^ flow->nw_dst; + fields.ip_proto = flow->nw_proto; + if (fields.ip_proto == IPPROTO_TCP) { + fields.tp_addr = flow->tp_src ^ flow->tp_dst; + } + } else if (fields.eth_type == htons(ETH_TYPE_IPV6)) { + const uint8_t *a = &flow->ipv6_src.s6_addr[0]; + const uint8_t *b = &flow->ipv6_dst.s6_addr[0]; + uint8_t *ipv6_addr = &fields.ipv6_addr.s6_addr[0]; + + for (i=0; i<16; i++) { + ipv6_addr[i] = a[i] ^ b[i]; + } + fields.ip_proto = flow->nw_proto; + if (fields.ip_proto == IPPROTO_TCP) { + fields.tp_addr = flow->tp_src ^ flow->tp_dst; + } + } + return hash_bytes(&fields, sizeof fields, basis); +} + +/* Hashes the portions of 'flow' designated by 'fields'. */ +uint32_t +flow_hash_fields(const struct flow *flow, enum nx_hash_fields fields, + uint16_t basis) +{ + switch (fields) { + + case NX_HASH_FIELDS_ETH_SRC: + return hash_bytes(flow->dl_src, sizeof flow->dl_src, basis); + + case NX_HASH_FIELDS_SYMMETRIC_L4: + return flow_hash_symmetric_l4(flow, basis); + } + + NOT_REACHED(); +} + +/* Returns a string representation of 'fields'. */ +const char * +flow_hash_fields_to_str(enum nx_hash_fields fields) +{ + switch (fields) { + case NX_HASH_FIELDS_ETH_SRC: return "eth_src"; + case NX_HASH_FIELDS_SYMMETRIC_L4: return "symmetric_l4"; + default: return ""; + } +} + +/* Returns true if the value of 'fields' is supported. Otherwise false. */ +bool +flow_hash_fields_valid(enum nx_hash_fields fields) +{ + return fields == NX_HASH_FIELDS_ETH_SRC + || fields == NX_HASH_FIELDS_SYMMETRIC_L4; +} + +/* Puts into 'b' a packet that flow_extract() would parse as having the given + * 'flow'. + * + * (This is useful only for testing, obviously, and the packet isn't really + * valid. It hasn't got any checksums filled in, for one, and lots of fields + * are just zeroed.) */ +void +flow_compose(struct ofpbuf *b, const struct flow *flow) +{ + eth_compose(b, flow->dl_dst, flow->dl_src, ntohs(flow->dl_type), 0); + if (flow->dl_type == htons(FLOW_DL_TYPE_NONE)) { + struct eth_header *eth = b->l2; + eth->eth_type = htons(b->size); + return; + } + + if (flow->vlan_tci & htons(VLAN_CFI)) { + eth_push_vlan(b, flow->vlan_tci & ~htons(VLAN_CFI)); + } + + if (flow->dl_type == htons(ETH_TYPE_IP)) { + struct ip_header *ip; + + b->l3 = ip = ofpbuf_put_zeros(b, sizeof *ip); + ip->ip_ihl_ver = IP_IHL_VER(5, 4); + ip->ip_tos = flow->nw_tos; + ip->ip_proto = flow->nw_proto; + ip->ip_src = flow->nw_src; + ip->ip_dst = flow->nw_dst; + + if (flow->nw_proto == IPPROTO_TCP) { + struct tcp_header *tcp; + + b->l4 = tcp = ofpbuf_put_zeros(b, sizeof *tcp); + tcp->tcp_src = flow->tp_src; + tcp->tcp_dst = flow->tp_dst; + } else if (flow->nw_proto == IPPROTO_UDP) { + struct udp_header *udp; + + b->l4 = udp = ofpbuf_put_zeros(b, sizeof *udp); + udp->udp_src = flow->tp_src; + udp->udp_dst = flow->tp_dst; + } else if (flow->nw_proto == IPPROTO_ICMP) { + struct icmp_header *icmp; + + b->l4 = icmp = ofpbuf_put_zeros(b, sizeof *icmp); + icmp->icmp_type = ntohs(flow->tp_src); + icmp->icmp_code = ntohs(flow->tp_dst); + } + } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) { + /* XXX */ + } else if (flow->dl_type == htons(ETH_TYPE_ARP)) { + struct arp_eth_header *arp; + + b->l3 = arp = ofpbuf_put_zeros(b, sizeof *arp); + arp->ar_hrd = htons(1); + arp->ar_pro = htons(ETH_TYPE_IP); + arp->ar_hln = ETH_ADDR_LEN; + arp->ar_pln = 4; + arp->ar_op = htons(flow->nw_proto); + + if (flow->nw_proto == ARP_OP_REQUEST || + flow->nw_proto == ARP_OP_REPLY) { + arp->ar_spa = flow->nw_src; + arp->ar_tpa = flow->nw_dst; + memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN); + memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN); + } + } +}