static struct cls_table *insert_table(struct classifier *,
const struct flow_wildcards *);
-static struct cls_table *classifier_first_table(const struct classifier *);
-static struct cls_table *classifier_next_table(const struct classifier *,
- const struct cls_table *);
static void destroy_table(struct classifier *, struct cls_table *);
static struct cls_rule *find_match(const struct cls_table *,
static bool flow_equal_except(const struct flow *, const struct flow *,
const struct flow_wildcards *);
-static void zero_wildcards(struct flow *, const struct flow_wildcards *);
/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
static struct cls_rule *next_rule_in_list__(struct cls_rule *);
static struct cls_rule *next_rule_in_list(struct cls_rule *);
-static struct cls_table *
-cls_table_from_hmap_node(const struct hmap_node *node)
-{
- return node ? CONTAINER_OF(node, struct cls_table, hmap_node) : NULL;
-}
-
/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
* 'wildcards' and 'priority'. */
void
unsigned int priority, struct cls_rule *rule)
{
rule->flow = *flow;
+ rule->flow.priority = 0;
flow_wildcards_init_exact(&rule->wc);
rule->priority = priority;
}
void
cls_rule_zero_wildcarded_fields(struct cls_rule *rule)
{
- zero_wildcards(&rule->flow, &rule->wc);
+ flow_zero_wildcards(&rule->flow, &rule->wc);
}
void
}
void
-cls_rule_set_in_port(struct cls_rule *rule, uint16_t odp_port)
+cls_rule_set_in_port(struct cls_rule *rule, uint16_t ofp_port)
{
rule->wc.wildcards &= ~FWW_IN_PORT;
- rule->flow.in_port = odp_port;
+ rule->flow.in_port = ofp_port;
}
void
memcpy(rule->flow.dl_src, dl_src, ETH_ADDR_LEN);
}
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' exactly. */
void
cls_rule_set_dl_dst(struct cls_rule *rule, const uint8_t dl_dst[ETH_ADDR_LEN])
{
memcpy(rule->flow.dl_dst, dl_dst, ETH_ADDR_LEN);
}
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' after each
+ * byte is ANDed with the appropriate byte in 'mask'.
+ *
+ * This function will assert-fail if 'mask' is invalid. Only 'mask' values
+ * accepted by flow_wildcards_is_dl_dst_mask_valid() are allowed. */
+void
+cls_rule_set_dl_dst_masked(struct cls_rule *rule,
+ const uint8_t dl_dst[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
+{
+ flow_wildcards_t *wc = &rule->wc.wildcards;
+ size_t i;
+
+ *wc = flow_wildcards_set_dl_dst_mask(*wc, mask);
+ for (i = 0; i < ETH_ADDR_LEN; i++) {
+ rule->flow.dl_dst[i] = dl_dst[i] & mask[i];
+ }
+}
+
void
cls_rule_set_dl_tci(struct cls_rule *rule, ovs_be16 tci)
{
void
cls_rule_set_dl_vlan(struct cls_rule *rule, ovs_be16 dl_vlan)
{
+ flow_set_vlan_vid(&rule->flow, dl_vlan);
if (dl_vlan == htons(OFP_VLAN_NONE)) {
- cls_rule_set_dl_tci(rule, htons(0));
+ rule->wc.vlan_tci_mask = htons(UINT16_MAX);
} else {
- dl_vlan &= htons(VLAN_VID_MASK);
- rule->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
- rule->flow.vlan_tci |= htons(VLAN_CFI) | dl_vlan;
rule->wc.vlan_tci_mask |= htons(VLAN_VID_MASK | VLAN_CFI);
}
}
void
cls_rule_set_dl_vlan_pcp(struct cls_rule *rule, uint8_t dl_vlan_pcp)
{
- dl_vlan_pcp &= 0x07;
- rule->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
- rule->flow.vlan_tci |= htons((dl_vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
+ flow_set_vlan_pcp(&rule->flow, dl_vlan_pcp);
rule->wc.vlan_tci_mask |= htons(VLAN_CFI | VLAN_PCP_MASK);
}
void
cls_rule_set_nw_src(struct cls_rule *rule, ovs_be32 nw_src)
{
- cls_rule_set_nw_src_masked(rule, nw_src, htonl(UINT32_MAX));
+ rule->flow.nw_src = nw_src;
+ rule->wc.nw_src_mask = htonl(UINT32_MAX);
}
-bool
-cls_rule_set_nw_src_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask)
+void
+cls_rule_set_nw_src_masked(struct cls_rule *rule,
+ ovs_be32 nw_src, ovs_be32 mask)
{
- if (flow_wildcards_set_nw_src_mask(&rule->wc, mask)) {
- rule->flow.nw_src = ip & mask;
- return true;
- } else {
- return false;
- }
+ rule->flow.nw_src = nw_src & mask;
+ rule->wc.nw_src_mask = mask;
}
void
cls_rule_set_nw_dst(struct cls_rule *rule, ovs_be32 nw_dst)
{
- cls_rule_set_nw_dst_masked(rule, nw_dst, htonl(UINT32_MAX));
+ rule->flow.nw_dst = nw_dst;
+ rule->wc.nw_dst_mask = htonl(UINT32_MAX);
}
-bool
+void
cls_rule_set_nw_dst_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask)
{
- if (flow_wildcards_set_nw_dst_mask(&rule->wc, mask)) {
- rule->flow.nw_dst = ip & mask;
- return true;
- } else {
- return false;
- }
+ rule->flow.nw_dst = ip & mask;
+ rule->wc.nw_dst_mask = mask;
}
void
-cls_rule_set_nw_tos(struct cls_rule *rule, uint8_t nw_tos)
+cls_rule_set_nw_dscp(struct cls_rule *rule, uint8_t nw_dscp)
{
- rule->wc.wildcards &= ~FWW_NW_TOS;
- rule->flow.nw_tos = nw_tos & IP_DSCP_MASK;
+ rule->wc.wildcards &= ~FWW_NW_DSCP;
+ rule->flow.nw_tos &= ~IP_DSCP_MASK;
+ rule->flow.nw_tos |= nw_dscp & IP_DSCP_MASK;
+}
+
+void
+cls_rule_set_nw_ecn(struct cls_rule *rule, uint8_t nw_ecn)
+{
+ rule->wc.wildcards &= ~FWW_NW_ECN;
+ rule->flow.nw_tos &= ~IP_ECN_MASK;
+ rule->flow.nw_tos |= nw_ecn & IP_ECN_MASK;
+}
+
+void
+cls_rule_set_nw_ttl(struct cls_rule *rule, uint8_t nw_ttl)
+{
+ rule->wc.wildcards &= ~FWW_NW_TTL;
+ rule->flow.nw_ttl = nw_ttl;
+}
+
+void
+cls_rule_set_nw_frag(struct cls_rule *rule, uint8_t nw_frag)
+{
+ rule->wc.nw_frag_mask |= FLOW_NW_FRAG_MASK;
+ rule->flow.nw_frag = nw_frag;
+}
+
+void
+cls_rule_set_nw_frag_masked(struct cls_rule *rule,
+ uint8_t nw_frag, uint8_t mask)
+{
+ rule->flow.nw_frag = nw_frag & mask;
+ rule->wc.nw_frag_mask = mask;
}
void
cls_rule_set_icmp_type(struct cls_rule *rule, uint8_t icmp_type)
{
rule->wc.wildcards &= ~FWW_TP_SRC;
- rule->flow.icmp_type = htons(icmp_type);
-
+ rule->flow.tp_src = htons(icmp_type);
}
void
cls_rule_set_icmp_code(struct cls_rule *rule, uint8_t icmp_code)
{
rule->wc.wildcards &= ~FWW_TP_DST;
- rule->flow.icmp_code = htons(icmp_code);
+ rule->flow.tp_dst = htons(icmp_code);
}
void
memcpy(rule->flow.arp_tha, tha, ETH_ADDR_LEN);
}
+void
+cls_rule_set_ipv6_src(struct cls_rule *rule, const struct in6_addr *src)
+{
+ rule->flow.ipv6_src = *src;
+ rule->wc.ipv6_src_mask = in6addr_exact;
+}
+
+void
+cls_rule_set_ipv6_src_masked(struct cls_rule *rule, const struct in6_addr *src,
+ const struct in6_addr *mask)
+{
+ rule->flow.ipv6_src = ipv6_addr_bitand(src, mask);
+ rule->wc.ipv6_src_mask = *mask;
+}
+
+void
+cls_rule_set_ipv6_dst(struct cls_rule *rule, const struct in6_addr *dst)
+{
+ rule->flow.ipv6_dst = *dst;
+ rule->wc.ipv6_dst_mask = in6addr_exact;
+}
+
+void
+cls_rule_set_ipv6_dst_masked(struct cls_rule *rule, const struct in6_addr *dst,
+ const struct in6_addr *mask)
+{
+ rule->flow.ipv6_dst = ipv6_addr_bitand(dst, mask);
+ rule->wc.ipv6_dst_mask = *mask;
+}
+
+void
+cls_rule_set_ipv6_label(struct cls_rule *rule, ovs_be32 ipv6_label)
+{
+ rule->wc.wildcards &= ~FWW_IPV6_LABEL;
+ rule->flow.ipv6_label = ipv6_label;
+}
+
+void
+cls_rule_set_nd_target(struct cls_rule *rule, const struct in6_addr *target)
+{
+ rule->wc.wildcards &= ~FWW_ND_TARGET;
+ rule->flow.nd_target = *target;
+}
+
/* Returns true if 'a' and 'b' have the same priority, wildcard the same
* fields, and have the same values for fixed fields, otherwise false. */
bool
&& flow_equal(&a->flow, &b->flow));
}
+/* Returns a hash value for the flow, wildcards, and priority in 'rule',
+ * starting from 'basis'. */
+uint32_t
+cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
+{
+ uint32_t h0 = flow_hash(&rule->flow, basis);
+ uint32_t h1 = flow_wildcards_hash(&rule->wc, h0);
+ return hash_int(rule->priority, h1);
+}
+
static void
format_ip_netmask(struct ds *s, const char *name, ovs_be32 ip,
ovs_be32 netmask)
{
if (netmask) {
- ds_put_format(s, "%s="IP_FMT, name, IP_ARGS(&ip));
- if (netmask != htonl(UINT32_MAX)) {
- if (ip_is_cidr(netmask)) {
- int wcbits = ofputil_netmask_to_wcbits(netmask);
- ds_put_format(s, "/%d", 32 - wcbits);
- } else {
- ds_put_format(s, "/"IP_FMT, IP_ARGS(&netmask));
- }
- }
+ ds_put_format(s, "%s=", name);
+ ip_format_masked(ip, netmask, s);
+ ds_put_char(s, ',');
+ }
+}
+
+static void
+format_ipv6_netmask(struct ds *s, const char *name,
+ const struct in6_addr *addr,
+ const struct in6_addr *netmask)
+{
+ if (!ipv6_mask_is_any(netmask)) {
+ ds_put_format(s, "%s=", name);
+ print_ipv6_masked(s, addr, netmask);
ds_put_char(s, ',');
}
}
int i;
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 7);
+
if (rule->priority != OFP_DEFAULT_PRIORITY) {
ds_put_format(s, "priority=%d,", rule->priority);
}
} else {
ds_put_cstr(s, "ip,");
}
+ } else if (f->dl_type == htons(ETH_TYPE_IPV6)) {
+ if (!(w & FWW_NW_PROTO)) {
+ skip_proto = true;
+ if (f->nw_proto == IPPROTO_ICMPV6) {
+ ds_put_cstr(s, "icmp6,");
+ } else if (f->nw_proto == IPPROTO_TCP) {
+ ds_put_cstr(s, "tcp6,");
+ } else if (f->nw_proto == IPPROTO_UDP) {
+ ds_put_cstr(s, "udp6,");
+ } else {
+ ds_put_cstr(s, "ipv6,");
+ skip_proto = false;
+ }
+ } else {
+ ds_put_cstr(s, "ipv6,");
+ }
} else if (f->dl_type == htons(ETH_TYPE_ARP)) {
ds_put_cstr(s, "arp,");
} else {
break;
}
if (!(w & FWW_IN_PORT)) {
- ds_put_format(s, "in_port=%"PRIu16",",
- odp_port_to_ofp_port(f->in_port));
+ ds_put_format(s, "in_port=%"PRIu16",", f->in_port);
}
if (wc->vlan_tci_mask) {
ovs_be16 vid_mask = wc->vlan_tci_mask & htons(VLAN_VID_MASK);
ds_put_format(s, "dl_vlan_pcp=%d,",
vlan_tci_to_pcp(f->vlan_tci));
}
+ } else if (wc->vlan_tci_mask == htons(0xffff)) {
+ ds_put_format(s, "vlan_tci=0x%04"PRIx16",", ntohs(f->vlan_tci));
} else {
ds_put_format(s, "vlan_tci=0x%04"PRIx16"/0x%04"PRIx16",",
ntohs(f->vlan_tci), ntohs(wc->vlan_tci_mask));
if (!skip_type && !(w & FWW_DL_TYPE)) {
ds_put_format(s, "dl_type=0x%04"PRIx16",", ntohs(f->dl_type));
}
- format_ip_netmask(s, "nw_src", f->nw_src, wc->nw_src_mask);
- format_ip_netmask(s, "nw_dst", f->nw_dst, wc->nw_dst_mask);
+ if (f->dl_type == htons(ETH_TYPE_IPV6)) {
+ format_ipv6_netmask(s, "ipv6_src", &f->ipv6_src, &wc->ipv6_src_mask);
+ format_ipv6_netmask(s, "ipv6_dst", &f->ipv6_dst, &wc->ipv6_dst_mask);
+ if (!(w & FWW_IPV6_LABEL)) {
+ ds_put_format(s, "ipv6_label=0x%05"PRIx32",", ntohl(f->ipv6_label));
+ }
+ } else {
+ format_ip_netmask(s, "nw_src", f->nw_src, wc->nw_src_mask);
+ format_ip_netmask(s, "nw_dst", f->nw_dst, wc->nw_dst_mask);
+ }
if (!skip_proto && !(w & FWW_NW_PROTO)) {
if (f->dl_type == htons(ETH_TYPE_ARP)) {
- ds_put_format(s, "opcode=%"PRIu8",", f->nw_proto);
+ ds_put_format(s, "arp_op=%"PRIu8",", f->nw_proto);
} else {
ds_put_format(s, "nw_proto=%"PRIu8",", f->nw_proto);
}
ETH_ADDR_ARGS(f->arp_tha));
}
}
- if (!(w & FWW_NW_TOS)) {
- ds_put_format(s, "nw_tos=%"PRIu8",", f->nw_tos);
+ if (!(w & FWW_NW_DSCP)) {
+ ds_put_format(s, "nw_tos=%"PRIu8",", f->nw_tos & IP_DSCP_MASK);
+ }
+ if (!(w & FWW_NW_ECN)) {
+ ds_put_format(s, "nw_ecn=%"PRIu8",", f->nw_tos & IP_ECN_MASK);
+ }
+ if (!(w & FWW_NW_TTL)) {
+ ds_put_format(s, "nw_ttl=%"PRIu8",", f->nw_ttl);
+ }
+ switch (wc->nw_frag_mask) {
+ case FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_ANY
+ ? (f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "first")
+ : (f->nw_frag & FLOW_NW_FRAG_LATER ? "<error>" : "no"));
+ break;
+
+ case FLOW_NW_FRAG_ANY:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_ANY ? "yes" : "no");
+ break;
+
+ case FLOW_NW_FRAG_LATER:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "not_later");
+ break;
}
if (f->nw_proto == IPPROTO_ICMP) {
if (!(w & FWW_TP_SRC)) {
if (!(w & FWW_TP_DST)) {
ds_put_format(s, "icmp_code=%"PRIu16",", ntohs(f->tp_dst));
}
- } else {
+ } else if (f->nw_proto == IPPROTO_ICMPV6) {
+ if (!(w & FWW_TP_SRC)) {
+ ds_put_format(s, "icmp_type=%"PRIu16",", ntohs(f->tp_src));
+ }
+ if (!(w & FWW_TP_DST)) {
+ ds_put_format(s, "icmp_code=%"PRIu16",", ntohs(f->tp_dst));
+ }
+ if (!(w & FWW_ND_TARGET)) {
+ ds_put_cstr(s, "nd_target=");
+ print_ipv6_addr(s, &f->nd_target);
+ ds_put_char(s, ',');
+ }
+ if (!(w & FWW_ARP_SHA)) {
+ ds_put_format(s, "nd_sll="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_sha));
+ }
+ if (!(w & FWW_ARP_THA)) {
+ ds_put_format(s, "nd_tll="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_tha));
+ }
+ } else {
if (!(w & FWW_TP_SRC)) {
ds_put_format(s, "tp_src=%"PRIu16",", ntohs(f->tp_src));
}
* rule, even rules that cannot have any effect because the new rule matches a
* superset of their flows and has higher priority. */
struct cls_rule *
-classifier_insert(struct classifier *cls, struct cls_rule *rule)
+classifier_replace(struct classifier *cls, struct cls_rule *rule)
{
struct cls_rule *old_rule;
struct cls_table *table;
return old_rule;
}
+/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
+ * must not modify or free it.
+ *
+ * 'cls' must not contain an identical rule (including wildcards, values of
+ * fixed fields, and priority). Use classifier_find_rule_exactly() to find
+ * such a rule. */
+void
+classifier_insert(struct classifier *cls, struct cls_rule *rule)
+{
+ struct cls_rule *displaced_rule = classifier_replace(cls, rule);
+ assert(!displaced_rule);
+}
+
/* Removes 'rule' from 'cls'. It is the caller's responsibility to free
* 'rule', if this is desirable. */
void
/* Finds and returns a rule in 'cls' with exactly the same priority and
* matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
- * contain an exact match.
- *
- * Priority is ignored for exact-match rules (because OpenFlow 1.0 always
- * treats exact-match rules as highest priority). */
+ * contain an exact match. */
struct cls_rule *
classifier_find_rule_exactly(const struct classifier *cls,
const struct cls_rule *target)
}
head = find_equal(table, &target->flow, flow_hash(&target->flow, 0));
- if (flow_wildcards_is_exact(&target->wc)) {
- return head;
- }
FOR_EACH_RULE_IN_LIST (rule, head) {
if (target->priority >= rule->priority) {
return target->priority == rule->priority ? rule : NULL;
{
struct cls_table *table;
- for (table = classifier_first_table(cursor->cls); table;
- table = classifier_next_table(cursor->cls, table)) {
+ HMAP_FOR_EACH (table, hmap_node, &cursor->cls->tables) {
struct cls_rule *rule = search_table(table, cursor->target);
if (rule) {
cursor->table = table;
}
}
- for (table = classifier_next_table(cursor->cls, cursor->table); table;
- table = classifier_next_table(cursor->cls, table)) {
+ table = cursor->table;
+ HMAP_FOR_EACH_CONTINUE (table, hmap_node, &cursor->cls->tables) {
rule = search_table(table, cursor->target);
if (rule) {
cursor->table = table;
{
struct cls_table *table;
- HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc),
+ HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc, 0),
&cls->tables) {
if (flow_wildcards_equal(wc, &table->wc)) {
return table;
table = xzalloc(sizeof *table);
hmap_init(&table->rules);
table->wc = *wc;
- hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc));
+ hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc, 0));
return table;
}
-static struct cls_table *
-classifier_first_table(const struct classifier *cls)
-{
- return cls_table_from_hmap_node(hmap_first(&cls->tables));
-}
-
-static struct cls_table *
-classifier_next_table(const struct classifier *cls,
- const struct cls_table *table)
-{
- return cls_table_from_hmap_node(hmap_next(&cls->tables,
- &table->hmap_node));
-}
-
static void
destroy_table(struct classifier *cls, struct cls_table *table)
{
struct flow f;
f = *flow;
- zero_wildcards(&f, &table->wc);
+ flow_zero_wildcards(&f, &table->wc);
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0),
&table->rules) {
if (flow_equal(&f, &rule->flow)) {
return next->priority < rule->priority ? next : NULL;
}
+static bool
+ipv6_equal_except(const struct in6_addr *a, const struct in6_addr *b,
+ const struct in6_addr *mask)
+{
+ int i;
+
+#ifdef s6_addr32
+ for (i=0; i<4; i++) {
+ if ((a->s6_addr32[i] ^ b->s6_addr32[i]) & mask->s6_addr32[i]) {
+ return false;
+ }
+ }
+#else
+ for (i=0; i<16; i++) {
+ if ((a->s6_addr[i] ^ b->s6_addr[i]) & mask->s6_addr[i]) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
+}
+
+
static bool
flow_equal_except(const struct flow *a, const struct flow *b,
const struct flow_wildcards *wildcards)
const flow_wildcards_t wc = wildcards->wildcards;
int i;
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 52 + FLOW_N_REGS * 4);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 7);
for (i = 0; i < FLOW_N_REGS; i++) {
if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
&& (wc & FWW_ETH_MCAST
|| !((a->dl_dst[0] ^ b->dl_dst[0]) & 0x01))
&& (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto)
- && (wc & FWW_NW_TOS || a->nw_tos == b->nw_tos)
+ && (wc & FWW_NW_TTL || a->nw_ttl == b->nw_ttl)
+ && (wc & FWW_NW_DSCP || !((a->nw_tos ^ b->nw_tos) & IP_DSCP_MASK))
+ && (wc & FWW_NW_ECN || !((a->nw_tos ^ b->nw_tos) & IP_ECN_MASK))
+ && !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask)
&& (wc & FWW_ARP_SHA || eth_addr_equals(a->arp_sha, b->arp_sha))
- && (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha)));
-}
-
-static void
-zero_wildcards(struct flow *flow, const struct flow_wildcards *wildcards)
-{
- const flow_wildcards_t wc = wildcards->wildcards;
- int i;
-
- BUILD_ASSERT_DECL(FLOW_SIG_SIZE == 52 + 4 * FLOW_N_REGS);
-
- for (i = 0; i < FLOW_N_REGS; i++) {
- flow->regs[i] &= wildcards->reg_masks[i];
- }
- flow->tun_id &= wildcards->tun_id_mask;
- flow->nw_src &= wildcards->nw_src_mask;
- flow->nw_dst &= wildcards->nw_dst_mask;
- if (wc & FWW_IN_PORT) {
- flow->in_port = 0;
- }
- flow->vlan_tci &= wildcards->vlan_tci_mask;
- if (wc & FWW_DL_TYPE) {
- flow->dl_type = 0;
- }
- if (wc & FWW_TP_SRC) {
- flow->tp_src = 0;
- }
- if (wc & FWW_TP_DST) {
- flow->tp_dst = 0;
- }
- if (wc & FWW_DL_SRC) {
- memset(flow->dl_src, 0, sizeof flow->dl_src);
- }
- if (wc & FWW_DL_DST) {
- flow->dl_dst[0] &= 0x01;
- memset(&flow->dl_dst[1], 0, 5);
- }
- if (wc & FWW_ETH_MCAST) {
- flow->dl_dst[0] &= 0xfe;
- }
- if (wc & FWW_NW_PROTO) {
- flow->nw_proto = 0;
- }
- if (wc & FWW_NW_TOS) {
- flow->nw_tos = 0;
- }
- if (wc & FWW_ARP_SHA) {
- memset(flow->arp_sha, 0, sizeof flow->arp_sha);
- }
- if (wc & FWW_ARP_THA) {
- memset(flow->arp_tha, 0, sizeof flow->arp_tha);
- }
+ && (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha))
+ && (wc & FWW_IPV6_LABEL || a->ipv6_label == b->ipv6_label)
+ && ipv6_equal_except(&a->ipv6_src, &b->ipv6_src,
+ &wildcards->ipv6_src_mask)
+ && ipv6_equal_except(&a->ipv6_dst, &b->ipv6_dst,
+ &wildcards->ipv6_dst_mask)
+ && (wc & FWW_ND_TARGET
+ || ipv6_addr_equals(&a->nd_target, &b->nd_target)));
}