/*
- * Copyright (c) 2009, 2010 Nicira Networks.
+ * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <assert.h>
#include <errno.h>
#include <netinet/in.h>
+#include "byte-order.h"
+#include "dynamic-string.h"
#include "flow.h"
#include "hash.h"
+#include "odp-util.h"
+#include "ofp-util.h"
+#include "packets.h"
-const struct cls_field cls_fields[CLS_N_FIELDS + 1] = {
-#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
- { offsetof(flow_t, MEMBER), \
- sizeof ((flow_t *)0)->MEMBER, \
- WILDCARDS, \
- #NAME },
- CLS_FIELDS
-#undef CLS_FIELD
- { sizeof(flow_t), 0, 0, "exact" },
-};
-
-static uint32_t hash_fields(const flow_t *, int table_idx);
-static bool equal_fields(const flow_t *, const flow_t *, int table_idx);
-
-static int table_idx_from_wildcards(uint32_t wildcards);
-static struct cls_rule *table_insert(struct hmap *, struct cls_rule *);
-static struct cls_rule *insert_exact_rule(struct classifier *,
- struct cls_rule *);
-static struct cls_bucket *find_bucket(struct hmap *, size_t hash,
- const struct cls_rule *);
-static struct cls_rule *search_table(const struct hmap *table, int field_idx,
- const struct cls_rule *);
-static struct cls_rule *search_exact_table(const struct classifier *,
- size_t hash, const flow_t *);
-static bool rules_match_1wild(const struct cls_rule *fixed,
- const struct cls_rule *wild, int field_idx);
+static struct cls_table *find_table(const struct classifier *,
+ const struct flow_wildcards *);
+static struct cls_table *insert_table(struct classifier *,
+ const struct flow_wildcards *);
+
+static void destroy_table(struct classifier *, struct cls_table *);
+
+static struct cls_rule *find_match(const struct cls_table *,
+ const struct flow *);
+static struct cls_rule *find_equal(struct cls_table *, const struct flow *,
+ uint32_t hash);
+static struct cls_rule *insert_rule(struct cls_table *, struct cls_rule *);
+
+static bool flow_equal_except(const struct flow *, const struct flow *,
+ const struct flow_wildcards *);
+
+/* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
+#define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
+ for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
+#define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
+ for ((RULE) = (HEAD); \
+ (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
+ (RULE) = (NEXT))
+
+static struct cls_rule *next_rule_in_list__(struct cls_rule *);
+static struct cls_rule *next_rule_in_list(struct cls_rule *);
/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
- * 'wildcards' and 'priority'.*/
+ * 'wildcards' and 'priority'. */
+void
+cls_rule_init(const struct flow *flow, const struct flow_wildcards *wildcards,
+ unsigned int priority, struct cls_rule *rule)
+{
+ rule->flow = *flow;
+ rule->wc = *wildcards;
+ rule->priority = priority;
+ cls_rule_zero_wildcarded_fields(rule);
+}
+
+/* Converts the flow in 'flow' into an exact-match cls_rule in 'rule', with the
+ * given 'priority'. (For OpenFlow 1.0, exact-match rule are always highest
+ * priority, so 'priority' should be at least 65535.) */
void
-cls_rule_from_flow(struct cls_rule *rule, const flow_t *flow,
- uint32_t wildcards, unsigned int priority)
+cls_rule_init_exact(const struct flow *flow,
+ unsigned int priority, struct cls_rule *rule)
{
rule->flow = *flow;
- flow_wildcards_init(&rule->wc, wildcards);
+ rule->flow.skb_priority = 0;
+ flow_wildcards_init_exact(&rule->wc);
rule->priority = priority;
- rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
}
-/* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
- * 'priority'. */
+/* Initializes 'rule' as a "catch-all" rule that matches every packet, with
+ * priority 'priority'. */
void
-cls_rule_from_match(struct cls_rule *rule, const struct ofp_match *match,
- unsigned int priority)
+cls_rule_init_catchall(struct cls_rule *rule, unsigned int priority)
{
- uint32_t wildcards;
- flow_from_match(&rule->flow, &wildcards, match);
- flow_wildcards_init(&rule->wc, wildcards);
- rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
- rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
+ memset(&rule->flow, 0, sizeof rule->flow);
+ flow_wildcards_init_catchall(&rule->wc);
+ rule->priority = priority;
}
-/* Prints cls_rule 'rule', for debugging.
+/* For each bit or field wildcarded in 'rule', sets the corresponding bit or
+ * field in 'flow' to all-0-bits. It is important to maintain this invariant
+ * in a clr_rule that might be inserted into a classifier.
*
- * (The output could be improved and expanded, but this was good enough to
- * debug the classifier.) */
+ * It is never necessary to call this function directly for a cls_rule that is
+ * initialized or modified only by cls_rule_*() functions. It is useful to
+ * restore the invariant in a cls_rule whose 'wc' member is modified by hand.
+ */
void
-cls_rule_print(const struct cls_rule *rule)
+cls_rule_zero_wildcarded_fields(struct cls_rule *rule)
{
- printf("wildcards=%x priority=%u ", rule->wc.wildcards, rule->priority);
- flow_print(stdout, &rule->flow);
- putc('\n', stdout);
+ flow_zero_wildcards(&rule->flow, &rule->wc);
}
-/* Adjusts pointers around 'old', which must be in classifier 'cls', to
- * compensate for it having been moved in memory to 'new' (e.g. due to
- * realloc()).
+void
+cls_rule_set_reg(struct cls_rule *rule, unsigned int reg_idx, uint32_t value)
+{
+ cls_rule_set_reg_masked(rule, reg_idx, value, UINT32_MAX);
+}
+
+void
+cls_rule_set_reg_masked(struct cls_rule *rule, unsigned int reg_idx,
+ uint32_t value, uint32_t mask)
+{
+ assert(reg_idx < FLOW_N_REGS);
+ flow_wildcards_set_reg_mask(&rule->wc, reg_idx, mask);
+ rule->flow.regs[reg_idx] = value & mask;
+}
+
+void
+cls_rule_set_tun_id(struct cls_rule *rule, ovs_be64 tun_id)
+{
+ cls_rule_set_tun_id_masked(rule, tun_id, htonll(UINT64_MAX));
+}
+
+void
+cls_rule_set_tun_id_masked(struct cls_rule *rule,
+ ovs_be64 tun_id, ovs_be64 mask)
+{
+ rule->wc.tun_id_mask = mask;
+ rule->flow.tun_id = tun_id & mask;
+}
+
+void
+cls_rule_set_in_port(struct cls_rule *rule, uint16_t ofp_port)
+{
+ rule->wc.wildcards &= ~FWW_IN_PORT;
+ rule->flow.in_port = ofp_port;
+}
+
+void
+cls_rule_set_dl_type(struct cls_rule *rule, ovs_be16 dl_type)
+{
+ rule->wc.wildcards &= ~FWW_DL_TYPE;
+ rule->flow.dl_type = dl_type;
+}
+
+void
+cls_rule_set_dl_src(struct cls_rule *rule, const uint8_t dl_src[ETH_ADDR_LEN])
+{
+ rule->wc.wildcards &= ~FWW_DL_SRC;
+ memcpy(rule->flow.dl_src, dl_src, ETH_ADDR_LEN);
+}
+
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' exactly. */
+void
+cls_rule_set_dl_dst(struct cls_rule *rule, const uint8_t dl_dst[ETH_ADDR_LEN])
+{
+ rule->wc.wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST);
+ memcpy(rule->flow.dl_dst, dl_dst, ETH_ADDR_LEN);
+}
+
+/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' after each
+ * byte is ANDed with the appropriate byte in 'mask'.
*
- * This function cannot be realized in all possible flow classifier
- * implementations, so we will probably have to change the interface if we
- * change the implementation. Shouldn't be a big deal though. */
+ * This function will assert-fail if 'mask' is invalid. Only 'mask' values
+ * accepted by flow_wildcards_is_dl_dst_mask_valid() are allowed. */
void
-cls_rule_moved(struct classifier *cls, struct cls_rule *old,
- struct cls_rule *new)
+cls_rule_set_dl_dst_masked(struct cls_rule *rule,
+ const uint8_t dl_dst[ETH_ADDR_LEN],
+ const uint8_t mask[ETH_ADDR_LEN])
{
- if (old != new) {
- if (new->wc.wildcards) {
- list_moved(&new->node.list);
- } else {
- hmap_node_moved(&cls->exact_table,
- &old->node.hmap, &new->node.hmap);
- }
+ flow_wildcards_t *wc = &rule->wc.wildcards;
+ size_t i;
+
+ *wc = flow_wildcards_set_dl_dst_mask(*wc, mask);
+ for (i = 0; i < ETH_ADDR_LEN; i++) {
+ rule->flow.dl_dst[i] = dl_dst[i] & mask[i];
}
}
-/* Replaces 'old', which must be in classifier 'cls', by 'new' (e.g. due to
- * realloc()); that is, after calling this function 'new' will be in 'cls' in
- * place of 'old'.
- *
- * 'new' and 'old' must be exactly the same: wildcard the same fields, have the
- * same fixed values for non-wildcarded fields, and have the same priority.
+void
+cls_rule_set_dl_tci(struct cls_rule *rule, ovs_be16 tci)
+{
+ cls_rule_set_dl_tci_masked(rule, tci, htons(0xffff));
+}
+
+void
+cls_rule_set_dl_tci_masked(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask)
+{
+ rule->flow.vlan_tci = tci & mask;
+ rule->wc.vlan_tci_mask = mask;
+}
+
+/* Modifies 'rule' so that the VLAN VID is wildcarded. If the PCP is already
+ * wildcarded, then 'rule' will match a packet regardless of whether it has an
+ * 802.1Q header or not. */
+void
+cls_rule_set_any_vid(struct cls_rule *rule)
+{
+ if (rule->wc.vlan_tci_mask & htons(VLAN_PCP_MASK)) {
+ rule->wc.vlan_tci_mask &= ~htons(VLAN_VID_MASK);
+ rule->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
+ } else {
+ cls_rule_set_dl_tci_masked(rule, htons(0), htons(0));
+ }
+}
+
+/* Modifies 'rule' depending on 'dl_vlan':
*
- * The caller takes ownership of 'old' and is thus responsible for freeing it,
- * etc., as necessary.
+ * - If 'dl_vlan' is htons(OFP_VLAN_NONE), makes 'rule' match only packets
+ * without an 802.1Q header.
*
- * This function cannot be realized in all possible flow classifier
- * implementations, so we will probably have to change the interface if we
- * change the implementation. Shouldn't be a big deal though. */
+ * - Otherwise, makes 'rule' match only packets with an 802.1Q header whose
+ * VID equals the low 12 bits of 'dl_vlan'.
+ */
void
-cls_rule_replace(struct classifier *cls, const struct cls_rule *old,
- struct cls_rule *new)
+cls_rule_set_dl_vlan(struct cls_rule *rule, ovs_be16 dl_vlan)
{
- assert(old != new);
- assert(old->wc.wildcards == new->wc.wildcards);
- assert(old->priority == new->priority);
+ flow_set_vlan_vid(&rule->flow, dl_vlan);
+ if (dl_vlan == htons(OFP_VLAN_NONE)) {
+ rule->wc.vlan_tci_mask = htons(UINT16_MAX);
+ } else {
+ rule->wc.vlan_tci_mask |= htons(VLAN_VID_MASK | VLAN_CFI);
+ }
+}
- if (new->wc.wildcards) {
- list_replace(&new->node.list, &old->node.list);
+/* Modifies 'rule' so that the VLAN PCP is wildcarded. If the VID is already
+ * wildcarded, then 'rule' will match a packet regardless of whether it has an
+ * 802.1Q header or not. */
+void
+cls_rule_set_any_pcp(struct cls_rule *rule)
+{
+ if (rule->wc.vlan_tci_mask & htons(VLAN_VID_MASK)) {
+ rule->wc.vlan_tci_mask &= ~htons(VLAN_PCP_MASK);
+ rule->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
} else {
- hmap_replace(&cls->exact_table, &old->node.hmap, &new->node.hmap);
+ cls_rule_set_dl_tci_masked(rule, htons(0), htons(0));
}
}
+
+/* Modifies 'rule' so that it matches only packets with an 802.1Q header whose
+ * PCP equals the low 3 bits of 'dl_vlan_pcp'. */
+void
+cls_rule_set_dl_vlan_pcp(struct cls_rule *rule, uint8_t dl_vlan_pcp)
+{
+ flow_set_vlan_pcp(&rule->flow, dl_vlan_pcp);
+ rule->wc.vlan_tci_mask |= htons(VLAN_CFI | VLAN_PCP_MASK);
+}
+
+void
+cls_rule_set_tp_src(struct cls_rule *rule, ovs_be16 tp_src)
+{
+ cls_rule_set_tp_src_masked(rule, tp_src, htons(UINT16_MAX));
+}
+
+void
+cls_rule_set_tp_src_masked(struct cls_rule *rule, ovs_be16 port, ovs_be16 mask)
+{
+ rule->flow.tp_src = port & mask;
+ rule->wc.tp_src_mask = mask;
+}
+
+void
+cls_rule_set_tp_dst(struct cls_rule *rule, ovs_be16 tp_dst)
+{
+ cls_rule_set_tp_dst_masked(rule, tp_dst, htons(UINT16_MAX));
+}
+
+void
+cls_rule_set_tp_dst_masked(struct cls_rule *rule, ovs_be16 port, ovs_be16 mask)
+{
+ rule->flow.tp_dst = port & mask;
+ rule->wc.tp_dst_mask = mask;
+}
+
+void
+cls_rule_set_nw_proto(struct cls_rule *rule, uint8_t nw_proto)
+{
+ rule->wc.wildcards &= ~FWW_NW_PROTO;
+ rule->flow.nw_proto = nw_proto;
+}
+
+void
+cls_rule_set_nw_src(struct cls_rule *rule, ovs_be32 nw_src)
+{
+ rule->flow.nw_src = nw_src;
+ rule->wc.nw_src_mask = htonl(UINT32_MAX);
+}
+
+void
+cls_rule_set_nw_src_masked(struct cls_rule *rule,
+ ovs_be32 nw_src, ovs_be32 mask)
+{
+ rule->flow.nw_src = nw_src & mask;
+ rule->wc.nw_src_mask = mask;
+}
+
+void
+cls_rule_set_nw_dst(struct cls_rule *rule, ovs_be32 nw_dst)
+{
+ rule->flow.nw_dst = nw_dst;
+ rule->wc.nw_dst_mask = htonl(UINT32_MAX);
+}
+
+void
+cls_rule_set_nw_dst_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask)
+{
+ rule->flow.nw_dst = ip & mask;
+ rule->wc.nw_dst_mask = mask;
+}
+
+void
+cls_rule_set_nw_dscp(struct cls_rule *rule, uint8_t nw_dscp)
+{
+ rule->wc.wildcards &= ~FWW_NW_DSCP;
+ rule->flow.nw_tos &= ~IP_DSCP_MASK;
+ rule->flow.nw_tos |= nw_dscp & IP_DSCP_MASK;
+}
+
+void
+cls_rule_set_nw_ecn(struct cls_rule *rule, uint8_t nw_ecn)
+{
+ rule->wc.wildcards &= ~FWW_NW_ECN;
+ rule->flow.nw_tos &= ~IP_ECN_MASK;
+ rule->flow.nw_tos |= nw_ecn & IP_ECN_MASK;
+}
+
+void
+cls_rule_set_nw_ttl(struct cls_rule *rule, uint8_t nw_ttl)
+{
+ rule->wc.wildcards &= ~FWW_NW_TTL;
+ rule->flow.nw_ttl = nw_ttl;
+}
+
+void
+cls_rule_set_nw_frag(struct cls_rule *rule, uint8_t nw_frag)
+{
+ rule->wc.nw_frag_mask |= FLOW_NW_FRAG_MASK;
+ rule->flow.nw_frag = nw_frag;
+}
+
+void
+cls_rule_set_nw_frag_masked(struct cls_rule *rule,
+ uint8_t nw_frag, uint8_t mask)
+{
+ rule->flow.nw_frag = nw_frag & mask;
+ rule->wc.nw_frag_mask = mask;
+}
+
+void
+cls_rule_set_icmp_type(struct cls_rule *rule, uint8_t icmp_type)
+{
+ cls_rule_set_tp_src(rule, htons(icmp_type));
+}
+
+void
+cls_rule_set_icmp_code(struct cls_rule *rule, uint8_t icmp_code)
+{
+ cls_rule_set_tp_dst(rule, htons(icmp_code));
+}
+
+void
+cls_rule_set_arp_sha(struct cls_rule *rule, const uint8_t sha[ETH_ADDR_LEN])
+{
+ rule->wc.wildcards &= ~FWW_ARP_SHA;
+ memcpy(rule->flow.arp_sha, sha, ETH_ADDR_LEN);
+}
+
+void
+cls_rule_set_arp_tha(struct cls_rule *rule, const uint8_t tha[ETH_ADDR_LEN])
+{
+ rule->wc.wildcards &= ~FWW_ARP_THA;
+ memcpy(rule->flow.arp_tha, tha, ETH_ADDR_LEN);
+}
+
+void
+cls_rule_set_ipv6_src(struct cls_rule *rule, const struct in6_addr *src)
+{
+ rule->flow.ipv6_src = *src;
+ rule->wc.ipv6_src_mask = in6addr_exact;
+}
+
+void
+cls_rule_set_ipv6_src_masked(struct cls_rule *rule, const struct in6_addr *src,
+ const struct in6_addr *mask)
+{
+ rule->flow.ipv6_src = ipv6_addr_bitand(src, mask);
+ rule->wc.ipv6_src_mask = *mask;
+}
+
+void
+cls_rule_set_ipv6_dst(struct cls_rule *rule, const struct in6_addr *dst)
+{
+ rule->flow.ipv6_dst = *dst;
+ rule->wc.ipv6_dst_mask = in6addr_exact;
+}
+
+void
+cls_rule_set_ipv6_dst_masked(struct cls_rule *rule, const struct in6_addr *dst,
+ const struct in6_addr *mask)
+{
+ rule->flow.ipv6_dst = ipv6_addr_bitand(dst, mask);
+ rule->wc.ipv6_dst_mask = *mask;
+}
+
+void
+cls_rule_set_ipv6_label(struct cls_rule *rule, ovs_be32 ipv6_label)
+{
+ rule->wc.wildcards &= ~FWW_IPV6_LABEL;
+ rule->flow.ipv6_label = ipv6_label;
+}
+
+void
+cls_rule_set_nd_target(struct cls_rule *rule, const struct in6_addr *target)
+{
+ rule->flow.nd_target = *target;
+ rule->wc.nd_target_mask = in6addr_exact;
+}
+
+void
+cls_rule_set_nd_target_masked(struct cls_rule *rule,
+ const struct in6_addr *target,
+ const struct in6_addr *mask)
+{
+ rule->flow.nd_target = ipv6_addr_bitand(target, mask);
+ rule->wc.nd_target_mask = *mask;
+}
+
+/* Returns true if 'a' and 'b' have the same priority, wildcard the same
+ * fields, and have the same values for fixed fields, otherwise false. */
+bool
+cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
+{
+ return (a->priority == b->priority
+ && flow_wildcards_equal(&a->wc, &b->wc)
+ && flow_equal(&a->flow, &b->flow));
+}
+
+/* Returns a hash value for the flow, wildcards, and priority in 'rule',
+ * starting from 'basis'. */
+uint32_t
+cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
+{
+ uint32_t h0 = flow_hash(&rule->flow, basis);
+ uint32_t h1 = flow_wildcards_hash(&rule->wc, h0);
+ return hash_int(rule->priority, h1);
+}
+
+static void
+format_ip_netmask(struct ds *s, const char *name, ovs_be32 ip,
+ ovs_be32 netmask)
+{
+ if (netmask) {
+ ds_put_format(s, "%s=", name);
+ ip_format_masked(ip, netmask, s);
+ ds_put_char(s, ',');
+ }
+}
+
+static void
+format_ipv6_netmask(struct ds *s, const char *name,
+ const struct in6_addr *addr,
+ const struct in6_addr *netmask)
+{
+ if (!ipv6_mask_is_any(netmask)) {
+ ds_put_format(s, "%s=", name);
+ print_ipv6_masked(s, addr, netmask);
+ ds_put_char(s, ',');
+ }
+}
+
+
+static void
+format_be16_masked(struct ds *s, const char *name,
+ ovs_be16 value, ovs_be16 mask)
+{
+ if (mask != htons(0)) {
+ ds_put_format(s, "%s=", name);
+ if (mask == htons(UINT16_MAX)) {
+ ds_put_format(s, "%"PRIu16, ntohs(value));
+ } else {
+ ds_put_format(s, "0x%"PRIx16"/0x%"PRIx16,
+ ntohs(value), ntohs(mask));
+ }
+ ds_put_char(s, ',');
+ }
+}
+
+void
+cls_rule_format(const struct cls_rule *rule, struct ds *s)
+{
+ const struct flow_wildcards *wc = &rule->wc;
+ size_t start_len = s->length;
+ flow_wildcards_t w = wc->wildcards;
+ const struct flow *f = &rule->flow;
+ bool skip_type = false;
+ bool skip_proto = false;
+
+ int i;
+
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 10);
+
+ if (rule->priority != OFP_DEFAULT_PRIORITY) {
+ ds_put_format(s, "priority=%d,", rule->priority);
+ }
+
+ if (!(w & FWW_DL_TYPE)) {
+ skip_type = true;
+ if (f->dl_type == htons(ETH_TYPE_IP)) {
+ if (!(w & FWW_NW_PROTO)) {
+ skip_proto = true;
+ if (f->nw_proto == IPPROTO_ICMP) {
+ ds_put_cstr(s, "icmp,");
+ } else if (f->nw_proto == IPPROTO_TCP) {
+ ds_put_cstr(s, "tcp,");
+ } else if (f->nw_proto == IPPROTO_UDP) {
+ ds_put_cstr(s, "udp,");
+ } else {
+ ds_put_cstr(s, "ip,");
+ skip_proto = false;
+ }
+ } else {
+ ds_put_cstr(s, "ip,");
+ }
+ } else if (f->dl_type == htons(ETH_TYPE_IPV6)) {
+ if (!(w & FWW_NW_PROTO)) {
+ skip_proto = true;
+ if (f->nw_proto == IPPROTO_ICMPV6) {
+ ds_put_cstr(s, "icmp6,");
+ } else if (f->nw_proto == IPPROTO_TCP) {
+ ds_put_cstr(s, "tcp6,");
+ } else if (f->nw_proto == IPPROTO_UDP) {
+ ds_put_cstr(s, "udp6,");
+ } else {
+ ds_put_cstr(s, "ipv6,");
+ skip_proto = false;
+ }
+ } else {
+ ds_put_cstr(s, "ipv6,");
+ }
+ } else if (f->dl_type == htons(ETH_TYPE_ARP)) {
+ ds_put_cstr(s, "arp,");
+ } else {
+ skip_type = false;
+ }
+ }
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ switch (wc->reg_masks[i]) {
+ case 0:
+ break;
+ case UINT32_MAX:
+ ds_put_format(s, "reg%d=0x%"PRIx32",", i, f->regs[i]);
+ break;
+ default:
+ ds_put_format(s, "reg%d=0x%"PRIx32"/0x%"PRIx32",",
+ i, f->regs[i], wc->reg_masks[i]);
+ break;
+ }
+ }
+ switch (wc->tun_id_mask) {
+ case 0:
+ break;
+ case CONSTANT_HTONLL(UINT64_MAX):
+ ds_put_format(s, "tun_id=%#"PRIx64",", ntohll(f->tun_id));
+ break;
+ default:
+ ds_put_format(s, "tun_id=%#"PRIx64"/%#"PRIx64",",
+ ntohll(f->tun_id), ntohll(wc->tun_id_mask));
+ break;
+ }
+ if (!(w & FWW_IN_PORT)) {
+ ds_put_format(s, "in_port=%"PRIu16",", f->in_port);
+ }
+ if (wc->vlan_tci_mask) {
+ ovs_be16 vid_mask = wc->vlan_tci_mask & htons(VLAN_VID_MASK);
+ ovs_be16 pcp_mask = wc->vlan_tci_mask & htons(VLAN_PCP_MASK);
+ ovs_be16 cfi = wc->vlan_tci_mask & htons(VLAN_CFI);
+
+ if (cfi && f->vlan_tci & htons(VLAN_CFI)
+ && (!vid_mask || vid_mask == htons(VLAN_VID_MASK))
+ && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK))
+ && (vid_mask || pcp_mask)) {
+ if (vid_mask) {
+ ds_put_format(s, "dl_vlan=%"PRIu16",",
+ vlan_tci_to_vid(f->vlan_tci));
+ }
+ if (pcp_mask) {
+ ds_put_format(s, "dl_vlan_pcp=%d,",
+ vlan_tci_to_pcp(f->vlan_tci));
+ }
+ } else if (wc->vlan_tci_mask == htons(0xffff)) {
+ ds_put_format(s, "vlan_tci=0x%04"PRIx16",", ntohs(f->vlan_tci));
+ } else {
+ ds_put_format(s, "vlan_tci=0x%04"PRIx16"/0x%04"PRIx16",",
+ ntohs(f->vlan_tci), ntohs(wc->vlan_tci_mask));
+ }
+ }
+ if (!(w & FWW_DL_SRC)) {
+ ds_put_format(s, "dl_src="ETH_ADDR_FMT",", ETH_ADDR_ARGS(f->dl_src));
+ }
+ switch (w & (FWW_DL_DST | FWW_ETH_MCAST)) {
+ case 0:
+ ds_put_format(s, "dl_dst="ETH_ADDR_FMT",", ETH_ADDR_ARGS(f->dl_dst));
+ break;
+ case FWW_DL_DST:
+ ds_put_format(s, "dl_dst="ETH_ADDR_FMT"/01:00:00:00:00:00,",
+ ETH_ADDR_ARGS(f->dl_dst));
+ break;
+ case FWW_ETH_MCAST:
+ ds_put_format(s, "dl_dst="ETH_ADDR_FMT"/fe:ff:ff:ff:ff:ff,",
+ ETH_ADDR_ARGS(f->dl_dst));
+ break;
+ case FWW_DL_DST | FWW_ETH_MCAST:
+ break;
+ }
+ if (!skip_type && !(w & FWW_DL_TYPE)) {
+ ds_put_format(s, "dl_type=0x%04"PRIx16",", ntohs(f->dl_type));
+ }
+ if (f->dl_type == htons(ETH_TYPE_IPV6)) {
+ format_ipv6_netmask(s, "ipv6_src", &f->ipv6_src, &wc->ipv6_src_mask);
+ format_ipv6_netmask(s, "ipv6_dst", &f->ipv6_dst, &wc->ipv6_dst_mask);
+ if (!(w & FWW_IPV6_LABEL)) {
+ ds_put_format(s, "ipv6_label=0x%05"PRIx32",", ntohl(f->ipv6_label));
+ }
+ } else {
+ format_ip_netmask(s, "nw_src", f->nw_src, wc->nw_src_mask);
+ format_ip_netmask(s, "nw_dst", f->nw_dst, wc->nw_dst_mask);
+ }
+ if (!skip_proto && !(w & FWW_NW_PROTO)) {
+ if (f->dl_type == htons(ETH_TYPE_ARP)) {
+ ds_put_format(s, "arp_op=%"PRIu8",", f->nw_proto);
+ } else {
+ ds_put_format(s, "nw_proto=%"PRIu8",", f->nw_proto);
+ }
+ }
+ if (f->dl_type == htons(ETH_TYPE_ARP)) {
+ if (!(w & FWW_ARP_SHA)) {
+ ds_put_format(s, "arp_sha="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_sha));
+ }
+ if (!(w & FWW_ARP_THA)) {
+ ds_put_format(s, "arp_tha="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_tha));
+ }
+ }
+ if (!(w & FWW_NW_DSCP)) {
+ ds_put_format(s, "nw_tos=%"PRIu8",", f->nw_tos & IP_DSCP_MASK);
+ }
+ if (!(w & FWW_NW_ECN)) {
+ ds_put_format(s, "nw_ecn=%"PRIu8",", f->nw_tos & IP_ECN_MASK);
+ }
+ if (!(w & FWW_NW_TTL)) {
+ ds_put_format(s, "nw_ttl=%"PRIu8",", f->nw_ttl);
+ }
+ switch (wc->nw_frag_mask) {
+ case FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_ANY
+ ? (f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "first")
+ : (f->nw_frag & FLOW_NW_FRAG_LATER ? "<error>" : "no"));
+ break;
+
+ case FLOW_NW_FRAG_ANY:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_ANY ? "yes" : "no");
+ break;
+
+ case FLOW_NW_FRAG_LATER:
+ ds_put_format(s, "nw_frag=%s,",
+ f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "not_later");
+ break;
+ }
+ if (f->nw_proto == IPPROTO_ICMP) {
+ format_be16_masked(s, "icmp_type", f->tp_src, wc->tp_src_mask);
+ format_be16_masked(s, "icmp_code", f->tp_dst, wc->tp_dst_mask);
+ } else if (f->nw_proto == IPPROTO_ICMPV6) {
+ format_be16_masked(s, "icmp_type", f->tp_src, wc->tp_src_mask);
+ format_be16_masked(s, "icmp_code", f->tp_dst, wc->tp_dst_mask);
+ format_ipv6_netmask(s, "nd_target", &f->nd_target,
+ &wc->nd_target_mask);
+ if (!(w & FWW_ARP_SHA)) {
+ ds_put_format(s, "nd_sll="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_sha));
+ }
+ if (!(w & FWW_ARP_THA)) {
+ ds_put_format(s, "nd_tll="ETH_ADDR_FMT",",
+ ETH_ADDR_ARGS(f->arp_tha));
+ }
+ } else {
+ format_be16_masked(s, "tp_src", f->tp_src, wc->tp_src_mask);
+ format_be16_masked(s, "tp_dst", f->tp_dst, wc->tp_dst_mask);
+ }
+
+ if (s->length > start_len && ds_last(s) == ',') {
+ s->length--;
+ }
+}
+
+/* Converts 'rule' to a string and returns the string. The caller must free
+ * the string (with free()). */
+char *
+cls_rule_to_string(const struct cls_rule *rule)
+{
+ struct ds s = DS_EMPTY_INITIALIZER;
+ cls_rule_format(rule, &s);
+ return ds_steal_cstr(&s);
+}
+
+void
+cls_rule_print(const struct cls_rule *rule)
+{
+ char *s = cls_rule_to_string(rule);
+ puts(s);
+ free(s);
+}
\f
/* Initializes 'cls' as a classifier that initially contains no classification
* rules. */
void
classifier_init(struct classifier *cls)
{
- int i;
-
cls->n_rules = 0;
- for (i = 0; i < ARRAY_SIZE(cls->tables); i++) {
- hmap_init(&cls->tables[i]);
- }
- hmap_init(&cls->exact_table);
+ hmap_init(&cls->tables);
}
/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
classifier_destroy(struct classifier *cls)
{
if (cls) {
- struct cls_bucket *bucket, *next_bucket;
- struct hmap *tbl;
+ struct cls_table *table, *next_table;
- for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, tbl) {
- free(bucket);
- }
- hmap_destroy(tbl);
+ HMAP_FOR_EACH_SAFE (table, next_table, hmap_node, &cls->tables) {
+ hmap_destroy(&table->rules);
+ hmap_remove(&cls->tables, &table->hmap_node);
+ free(table);
}
- hmap_destroy(&cls->exact_table);
+ hmap_destroy(&cls->tables);
}
}
-/* Returns true if 'cls' does not contain any classification rules, false
- * otherwise. */
+/* Returns true if 'cls' contains no classification rules, false otherwise. */
bool
classifier_is_empty(const struct classifier *cls)
{
return cls->n_rules;
}
-/* Returns the number of rules in 'classifier' that have no wildcards. */
-int
-classifier_count_exact(const struct classifier *cls)
-{
- return hmap_count(&cls->exact_table);
-}
-
-/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
+/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
+ * must not modify or free it.
*
* If 'cls' already contains an identical rule (including wildcards, values of
* fixed fields, and priority), replaces the old rule by 'rule' and returns the
* rule, even rules that cannot have any effect because the new rule matches a
* superset of their flows and has higher priority. */
struct cls_rule *
-classifier_insert(struct classifier *cls, struct cls_rule *rule)
+classifier_replace(struct classifier *cls, struct cls_rule *rule)
{
- struct cls_rule *old;
- assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
- old = (rule->wc.wildcards
- ? table_insert(&cls->tables[rule->table_idx], rule)
- : insert_exact_rule(cls, rule));
- if (!old) {
+ struct cls_rule *old_rule;
+ struct cls_table *table;
+
+ table = find_table(cls, &rule->wc);
+ if (!table) {
+ table = insert_table(cls, &rule->wc);
+ }
+
+ old_rule = insert_rule(table, rule);
+ if (!old_rule) {
+ table->n_table_rules++;
cls->n_rules++;
}
- return old;
+ return old_rule;
}
-/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
+/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
+ * must not modify or free it.
*
- * 'rule' must be an exact-match rule (rule->wc.wildcards must be 0) and 'cls'
- * must not contain any rule with an identical key. */
+ * 'cls' must not contain an identical rule (including wildcards, values of
+ * fixed fields, and priority). Use classifier_find_rule_exactly() to find
+ * such a rule. */
void
-classifier_insert_exact(struct classifier *cls, struct cls_rule *rule)
+classifier_insert(struct classifier *cls, struct cls_rule *rule)
{
- hmap_insert(&cls->exact_table, &rule->node.hmap,
- flow_hash(&rule->flow, 0));
- cls->n_rules++;
+ struct cls_rule *displaced_rule = classifier_replace(cls, rule);
+ assert(!displaced_rule);
}
-/* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if
- * this is desirable. */
+/* Removes 'rule' from 'cls'. It is the caller's responsibility to free
+ * 'rule', if this is desirable. */
void
classifier_remove(struct classifier *cls, struct cls_rule *rule)
{
- if (rule->wc.wildcards) {
- /* Remove 'rule' from bucket. If that empties the bucket, remove the
- * bucket from its table. */
- struct hmap *table = &cls->tables[rule->table_idx];
- struct list *rules = list_remove(&rule->node.list);
- if (list_is_empty(rules)) {
- /* This code is a little tricky. list_remove() returns the list
- * element just after the one removed. Since the list is now
- * empty, this will be the address of the 'rules' member of the
- * bucket that was just emptied, so pointer arithmetic (via
- * CONTAINER_OF) can find that bucket. */
- struct cls_bucket *bucket;
- bucket = CONTAINER_OF(rules, struct cls_bucket, rules);
- hmap_remove(table, &bucket->hmap_node);
- free(bucket);
- }
+ struct cls_rule *head;
+ struct cls_table *table;
+
+ table = find_table(cls, &rule->wc);
+ head = find_equal(table, &rule->flow, rule->hmap_node.hash);
+ if (head != rule) {
+ list_remove(&rule->list);
+ } else if (list_is_empty(&rule->list)) {
+ hmap_remove(&table->rules, &rule->hmap_node);
} else {
- /* Remove 'rule' from cls->exact_table. */
- hmap_remove(&cls->exact_table, &rule->node.hmap);
+ struct cls_rule *next = CONTAINER_OF(rule->list.next,
+ struct cls_rule, list);
+
+ list_remove(&rule->list);
+ hmap_replace(&table->rules, &rule->hmap_node, &next->hmap_node);
}
+
+ if (--table->n_table_rules == 0) {
+ destroy_table(cls, table);
+ }
+
cls->n_rules--;
}
/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
* Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
- * of equal priority match 'flow', returns one arbitrarily.
- *
- * (When multiple rules of equal priority happen to fall into the same bucket,
- * rules added more recently take priority over rules added less recently, but
- * this is subject to change and should not be depended upon.) */
-struct cls_rule *
-classifier_lookup(const struct classifier *cls, const flow_t *flow)
-{
- struct cls_rule *rule = classifier_lookup_exact(cls, flow);
- if (!rule) {
- rule = classifier_lookup_wild(cls, flow);
- }
- return rule;
-}
-
+ * of equal priority match 'flow', returns one arbitrarily. */
struct cls_rule *
-classifier_lookup_exact(const struct classifier *cls, const flow_t *flow)
+classifier_lookup(const struct classifier *cls, const struct flow *flow)
{
- return (!hmap_is_empty(&cls->exact_table)
- ? search_exact_table(cls, flow_hash(flow, 0), flow)
- : NULL);
-}
-
-struct cls_rule *
-classifier_lookup_wild(const struct classifier *cls, const flow_t *flow)
-{
- struct cls_rule *best = NULL;
- if (cls->n_rules > hmap_count(&cls->exact_table)) {
- struct cls_rule target;
- int i;
-
- cls_rule_from_flow(&target, flow, 0, 0);
- for (i = 0; i < CLS_N_FIELDS; i++) {
- struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
- if (rule && (!best || rule->priority > best->priority)) {
- best = rule;
- }
+ struct cls_table *table;
+ struct cls_rule *best;
+
+ best = NULL;
+ HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
+ struct cls_rule *rule = find_match(table, flow);
+ if (rule && (!best || rule->priority > best->priority)) {
+ best = rule;
}
}
return best;
}
+/* Finds and returns a rule in 'cls' with exactly the same priority and
+ * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
+ * contain an exact match. */
struct cls_rule *
classifier_find_rule_exactly(const struct classifier *cls,
- const flow_t *target, uint32_t wildcards,
- unsigned int priority)
-{
- struct cls_bucket *bucket;
- int table_idx;
- uint32_t hash;
-
- if (!wildcards) {
- /* Ignores 'priority'. */
- return search_exact_table(cls, flow_hash(target, 0), target);
- }
-
- assert(wildcards == (wildcards & OFPFW_ALL));
- table_idx = table_idx_from_wildcards(wildcards);
- hash = hash_fields(target, table_idx);
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
- &cls->tables[table_idx]) {
- if (equal_fields(&bucket->fixed, target, table_idx)) {
- struct cls_rule *pos;
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
- if (pos->priority < priority) {
- return NULL;
- } else if (pos->priority == priority &&
- pos->wc.wildcards == wildcards &&
- flow_equal(target, &pos->flow)) {
- return pos;
- }
- }
- }
- }
- return NULL;
-}
+ const struct cls_rule *target)
+{
+ struct cls_rule *head, *rule;
+ struct cls_table *table;
-/* Ignores target->priority.
- *
- * 'callback' is allowed to delete the rule that is passed as its argument, but
- * it must not delete (or move) any other rules in 'cls' that are in the same
- * table as the argument rule. Two rules are in the same table if their
- * cls_rule structs have the same table_idx; as a special case, a rule with
- * wildcards and an exact-match rule will never be in the same table. */
-void
-classifier_for_each_match(const struct classifier *cls,
- const struct cls_rule *target,
- int include, cls_cb_func *callback, void *aux)
-{
- if (include & CLS_INC_WILD) {
- const struct hmap *table;
-
- for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
- table++) {
- struct cls_bucket *bucket, *next_bucket;
-
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, table) {
- /* XXX there is a bit of room for optimization here based on
- * rejecting entire buckets on their fixed fields, but it will
- * only be worthwhile for big buckets (which we hope we won't
- * get anyway, but...) */
- struct cls_rule *prev_rule, *rule;
-
- /* We can't just use LIST_FOR_EACH_SAFE here because, if the
- * callback deletes the last rule in the bucket, then the
- * bucket itself will be destroyed. The bucket contains the
- * list head so that's a use-after-free error. */
- prev_rule = NULL;
- LIST_FOR_EACH (rule, struct cls_rule, node.list,
- &bucket->rules) {
- if (rules_match_1wild(rule, target, 0)) {
- if (prev_rule) {
- callback(prev_rule, aux);
- }
- prev_rule = rule;
- }
- }
- if (prev_rule) {
- callback(prev_rule, aux);
- }
- }
- }
+ table = find_table(cls, &target->wc);
+ if (!table) {
+ return NULL;
}
- if (include & CLS_INC_EXACT) {
- if (target->wc.wildcards) {
- struct cls_rule *rule, *next_rule;
-
- HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
- &cls->exact_table) {
- if (rules_match_1wild(rule, target, 0)) {
- callback(rule, aux);
- }
- }
- } else {
- /* Optimization: there can be at most one match in the exact
- * table. */
- size_t hash = flow_hash(&target->flow, 0);
- struct cls_rule *rule = search_exact_table(cls, hash,
- &target->flow);
- if (rule) {
- callback(rule, aux);
- }
+ head = find_equal(table, &target->flow, flow_hash(&target->flow, 0));
+ FOR_EACH_RULE_IN_LIST (rule, head) {
+ if (target->priority >= rule->priority) {
+ return target->priority == rule->priority ? rule : NULL;
}
}
+ return NULL;
}
-/* 'callback' is allowed to delete the rule that is passed as its argument, but
- * it must not delete (or move) any other rules in 'cls' that are in the same
- * table as the argument rule. Two rules are in the same table if their
- * cls_rule structs have the same table_idx; as a special case, a rule with
- * wildcards and an exact-match rule will never be in the same table. */
-void
-classifier_for_each(const struct classifier *cls, int include,
- void (*callback)(struct cls_rule *, void *aux),
- void *aux)
+/* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
+ * considered to overlap if both rules have the same priority and a packet
+ * could match both. */
+bool
+classifier_rule_overlaps(const struct classifier *cls,
+ const struct cls_rule *target)
{
- if (include & CLS_INC_WILD) {
- const struct hmap *tbl;
+ struct cls_table *table;
- for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
- struct cls_bucket *bucket, *next_bucket;
+ HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
+ struct flow_wildcards wc;
+ struct cls_rule *head;
- HMAP_FOR_EACH_SAFE (bucket, next_bucket,
- struct cls_bucket, hmap_node, tbl) {
- struct cls_rule *prev_rule, *rule;
+ flow_wildcards_combine(&wc, &target->wc, &table->wc);
+ HMAP_FOR_EACH (head, hmap_node, &table->rules) {
+ struct cls_rule *rule;
- /* We can't just use LIST_FOR_EACH_SAFE here because, if the
- * callback deletes the last rule in the bucket, then the
- * bucket itself will be destroyed. The bucket contains the
- * list head so that's a use-after-free error. */
- prev_rule = NULL;
- LIST_FOR_EACH (rule, struct cls_rule, node.list,
- &bucket->rules) {
- if (prev_rule) {
- callback(prev_rule, aux);
- }
- prev_rule = rule;
- }
- if (prev_rule) {
- callback(prev_rule, aux);
+ FOR_EACH_RULE_IN_LIST (rule, head) {
+ if (rule->priority == target->priority
+ && flow_equal_except(&target->flow, &rule->flow, &wc)) {
+ return true;
}
}
}
}
- if (include & CLS_INC_EXACT) {
- struct cls_rule *rule, *next_rule;
-
- HMAP_FOR_EACH_SAFE (rule, next_rule,
- struct cls_rule, node.hmap, &cls->exact_table) {
- callback(rule, aux);
- }
- }
+ return false;
}
\f
-static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
- const flow_t *fixed);
-static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *);
-
-static inline bool equal_bytes(const void *, const void *, size_t n);
-
-/* Returns a hash computed across the fields in 'flow' whose field indexes
- * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is
- * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */
-static uint32_t
-hash_fields(const flow_t *flow, int table_idx)
-{
- /* I just know I'm going to hell for writing code this way.
- *
- * GCC generates pretty good code here, with only a single taken
- * conditional jump per execution. Now the question is, would we be better
- * off marking this function ALWAYS_INLINE and writing a wrapper that
- * switches on the value of 'table_idx' to get rid of all the conditional
- * jumps entirely (except for one in the wrapper)? Honestly I really,
- * really hope that it doesn't matter in practice.
- *
- * We could do better by calculating hashes incrementally, instead of
- * starting over from the top each time. But that would be even uglier. */
- uint32_t a, b, c;
- uint32_t tmp[3];
- size_t n;
-
- a = b = c = 0xdeadbeef + table_idx;
- n = 0;
-
-#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
- if (table_idx == CLS_F_IDX_##NAME) { \
- /* Done. */ \
- memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \
- goto finish; \
- } else { \
- const size_t size = sizeof flow->MEMBER; \
- const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \
- const size_t p1_size = MIN(sizeof tmp - n, size); \
- const uint8_t *p2 = p1 + p1_size; \
- const size_t p2_size = size - p1_size; \
- \
- /* Append to 'tmp' as much data as will fit. */ \
- memcpy((uint8_t *) tmp + n, p1, p1_size); \
- n += p1_size; \
- \
- /* If 'tmp' is full, mix. */ \
- if (n == sizeof tmp) { \
- a += tmp[0]; \
- b += tmp[1]; \
- c += tmp[2]; \
- HASH_MIX(a, b, c); \
- n = 0; \
- } \
- \
- /* Append to 'tmp' any data that didn't fit. */ \
- memcpy(tmp, p2, p2_size); \
- n += p2_size; \
- }
- CLS_FIELDS
-#undef CLS_FIELD
-
-finish:
- a += tmp[0];
- b += tmp[1];
- c += tmp[2];
- HASH_FINAL(a, b, c);
- return c;
-}
-
-/* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are
- * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the
- * fields in 'a' and 'b').
- *
- * Returns true if all the compared fields are equal, false otherwise. */
-static bool
-equal_fields(const flow_t *a, const flow_t *b, int table_idx)
-{
- /* XXX The generated code could be better here. */
-#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
- if (table_idx == CLS_F_IDX_##NAME) { \
- return true; \
- } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \
- return false; \
- }
- CLS_FIELDS
-#undef CLS_FIELD
+/* Iteration. */
- return true;
-}
-
-static int
-table_idx_from_wildcards(uint32_t wildcards)
+static bool
+rule_matches(const struct cls_rule *rule, const struct cls_rule *target)
{
- if (!wildcards) {
- return CLS_F_IDX_EXACT;
- }
-#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
- if (wildcards & WILDCARDS) { \
- return CLS_F_IDX_##NAME; \
- }
- CLS_FIELDS
-#undef CLS_FIELD
- NOT_REACHED();
+ return (!target
+ || flow_equal_except(&rule->flow, &target->flow, &target->wc));
}
-/* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced
- * in favor of 'rule'. */
static struct cls_rule *
-table_insert(struct hmap *table, struct cls_rule *rule)
+search_table(const struct cls_table *table, const struct cls_rule *target)
{
- struct cls_bucket *bucket;
- size_t hash;
+ if (!target || !flow_wildcards_has_extra(&table->wc, &target->wc)) {
+ struct cls_rule *rule;
- hash = hash_fields(&rule->flow, rule->table_idx);
- bucket = find_bucket(table, hash, rule);
- if (!bucket) {
- bucket = create_bucket(table, hash, &rule->flow);
+ HMAP_FOR_EACH (rule, hmap_node, &table->rules) {
+ if (rule_matches(rule, target)) {
+ return rule;
+ }
+ }
}
-
- return bucket_insert(bucket, rule);
+ return NULL;
}
-/* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded
- * field in 'rule'.
+/* Initializes 'cursor' for iterating through 'cls' rules that exactly match
+ * 'target' or are more specific than 'target'. That is, a given 'rule'
+ * matches 'target' if, for every field:
*
- * Returns the rule, if any, that was displaced in favor of 'rule'. */
-static struct cls_rule *
-bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
-{
- struct cls_rule *pos;
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
- if (pos->priority <= rule->priority) {
- if (pos->priority == rule->priority
- && pos->wc.wildcards == rule->wc.wildcards
- && rules_match_1wild(pos, rule, rule->table_idx))
- {
- list_replace(&rule->node.list, &pos->node.list);
- return pos;
- }
- break;
+ * - 'target' and 'rule' specify the same (non-wildcarded) value for the
+ * field, or
+ *
+ * - 'target' wildcards the field,
+ *
+ * but not if:
+ *
+ * - 'target' and 'rule' specify different values for the field, or
+ *
+ * - 'target' specifies a value for the field but 'rule' wildcards it.
+ *
+ * Equivalently, the truth table for whether a field matches is:
+ *
+ * rule
+ *
+ * wildcard exact
+ * +---------+---------+
+ * t wild | yes | yes |
+ * a card | | |
+ * r +---------+---------+
+ * g exact | no |if values|
+ * e | |are equal|
+ * t +---------+---------+
+ *
+ * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
+ * commands and by OpenFlow 1.0 aggregate and flow stats.
+ *
+ * Ignores target->priority.
+ *
+ * 'target' may be NULL to iterate over every rule in 'cls'. */
+void
+cls_cursor_init(struct cls_cursor *cursor, const struct classifier *cls,
+ const struct cls_rule *target)
+{
+ cursor->cls = cls;
+ cursor->target = target;
+}
+
+/* Returns the first matching cls_rule in 'cursor''s iteration, or a null
+ * pointer if there are no matches. */
+struct cls_rule *
+cls_cursor_first(struct cls_cursor *cursor)
+{
+ struct cls_table *table;
+
+ HMAP_FOR_EACH (table, hmap_node, &cursor->cls->tables) {
+ struct cls_rule *rule = search_table(table, cursor->target);
+ if (rule) {
+ cursor->table = table;
+ return rule;
}
}
- list_insert(&pos->node.list, &rule->node.list);
+
return NULL;
}
-static struct cls_rule *
-insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
+/* Returns the next matching cls_rule in 'cursor''s iteration, or a null
+ * pointer if there are no more matches. */
+struct cls_rule *
+cls_cursor_next(struct cls_cursor *cursor, struct cls_rule *rule)
{
- struct cls_rule *old_rule;
- size_t hash;
+ const struct cls_table *table;
+ struct cls_rule *next;
- hash = flow_hash(&rule->flow, 0);
- old_rule = search_exact_table(cls, hash, &rule->flow);
- if (old_rule) {
- hmap_remove(&cls->exact_table, &old_rule->node.hmap);
+ next = next_rule_in_list__(rule);
+ if (next->priority < rule->priority) {
+ return next;
}
- hmap_insert(&cls->exact_table, &rule->node.hmap, hash);
- return old_rule;
-}
-/* Returns the bucket in 'table' that has the given 'hash' and the same fields
- * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket
- * matches. */
-static struct cls_bucket *
-find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
-{
- struct cls_bucket *bucket;
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
- table) {
- if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
- return bucket;
+ /* 'next' is the head of the list, that is, the rule that is included in
+ * the table's hmap. (This is important when the classifier contains rules
+ * that differ only in priority.) */
+ rule = next;
+ HMAP_FOR_EACH_CONTINUE (rule, hmap_node, &cursor->table->rules) {
+ if (rule_matches(rule, cursor->target)) {
+ return rule;
}
}
+
+ table = cursor->table;
+ HMAP_FOR_EACH_CONTINUE (table, hmap_node, &cursor->cls->tables) {
+ rule = search_table(table, cursor->target);
+ if (rule) {
+ cursor->table = table;
+ return rule;
+ }
+ }
+
return NULL;
}
-
-/* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed'
- * values. Returns the new bucket. */
-static struct cls_bucket *
-create_bucket(struct hmap *table, size_t hash, const flow_t *fixed)
+\f
+static struct cls_table *
+find_table(const struct classifier *cls, const struct flow_wildcards *wc)
{
- struct cls_bucket *bucket = xmalloc(sizeof *bucket);
- list_init(&bucket->rules);
- bucket->fixed = *fixed;
- hmap_insert(table, &bucket->hmap_node, hash);
- return bucket;
+ struct cls_table *table;
+
+ HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc, 0),
+ &cls->tables) {
+ if (flow_wildcards_equal(wc, &table->wc)) {
+ return table;
+ }
+ }
+ return NULL;
}
-/* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */
-static inline bool ALWAYS_INLINE
-equal_bytes(const void *a, const void *b, size_t n)
+static struct cls_table *
+insert_table(struct classifier *cls, const struct flow_wildcards *wc)
{
-#ifdef __i386__
- /* For some reason GCC generates stupid code for memcmp() of small
- * constant integer lengths. Help it out.
- *
- * This function is always inlined, and it is always called with 'n' as a
- * compile-time constant, so the switch statement gets optimized out and
- * this whole function just expands to an instruction or two. */
- switch (n) {
- case 1:
- return *(uint8_t *) a == *(uint8_t *) b;
-
- case 2:
- return *(uint16_t *) a == *(uint16_t *) b;
+ struct cls_table *table;
- case 4:
- return *(uint32_t *) a == *(uint32_t *) b;
+ table = xzalloc(sizeof *table);
+ hmap_init(&table->rules);
+ table->wc = *wc;
+ table->is_catchall = flow_wildcards_is_catchall(&table->wc);
+ hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc, 0));
- case 6:
- return (*(uint32_t *) a == *(uint32_t *) b
- && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]);
-
- default:
- abort();
- }
-#else
- /* I hope GCC is smarter on your platform. */
- return !memcmp(a, b, n);
-#endif
+ return table;
}
-/* Returns the 32-bit unsigned integer at 'p'. */
-static inline uint32_t
-read_uint32(const void *p)
+static void
+destroy_table(struct classifier *cls, struct cls_table *table)
{
- /* GCC optimizes this into a single machine instruction on x86. */
- uint32_t x;
- memcpy(&x, p, sizeof x);
- return x;
+ hmap_remove(&cls->tables, &table->hmap_node);
+ hmap_destroy(&table->rules);
+ free(table);
}
-/* Compares the specified field in 'a' and 'b'. Returns true if the fields are
- * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that
- * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be
- * those that would be set for 'wildcards' by cls_rule_set_masks().
- *
- * The compared field is the one with wildcard bit or bits 'field_wc', offset
- * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */
-static inline bool ALWAYS_INLINE
-field_matches(const flow_t *a_, const flow_t *b_,
- uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
- uint32_t field_wc, int ofs, int len)
-{
- /* This function is always inlined, and it is always called with 'field_wc'
- * as a compile-time constant, so the "if" conditionals here generate no
- * code. */
- const void *a = (const uint8_t *) a_ + ofs;
- const void *b = (const uint8_t *) b_ + ofs;
- if (!(field_wc & (field_wc - 1))) {
- /* Handle all the single-bit wildcard cases. */
- return wildcards & field_wc || equal_bytes(a, b, len);
- } else if (field_wc == OFPFW_NW_SRC_MASK ||
- field_wc == OFPFW_NW_DST_MASK) {
- uint32_t a_ip = read_uint32(a);
- uint32_t b_ip = read_uint32(b);
- uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK
- ? nw_src_mask : nw_dst_mask);
- return ((a_ip ^ b_ip) & mask) == 0;
+static struct cls_rule *
+find_match(const struct cls_table *table, const struct flow *flow)
+{
+ struct cls_rule *rule;
+
+ if (table->is_catchall) {
+ HMAP_FOR_EACH (rule, hmap_node, &table->rules) {
+ return rule;
+ }
} else {
- abort();
+ struct flow f;
+
+ f = *flow;
+ flow_zero_wildcards(&f, &table->wc);
+ HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0),
+ &table->rules) {
+ if (flow_equal(&f, &rule->flow)) {
+ return rule;
+ }
+ }
}
-}
-/* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards
- * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that
- * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the
- * index of the first field to be compared; fields before 'field_idx' are
- * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */
-static bool
-rules_match(const struct cls_rule *a, const struct cls_rule *b,
- uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
- int field_idx)
-{
- /* This is related to Duff's device (see
- * http://en.wikipedia.org/wiki/Duff's_device). */
- switch (field_idx) {
-#define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
- case CLS_F_IDX_##NAME: \
- if (!field_matches(&a->flow, &b->flow, \
- wildcards, nw_src_mask, nw_dst_mask, \
- WILDCARDS, offsetof(flow_t, MEMBER), \
- sizeof a->flow.MEMBER)) { \
- return false; \
- } \
- /* Fall though */
- CLS_FIELDS
-#undef CLS_FIELD
- }
- return true;
+ return NULL;
}
-/* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have
- * fixed values; 'wild' may contain wildcards.
- *
- * 'field_idx' is the index of the first field to be compared; fields before
- * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
- * CLS_N_FIELDS. */
-static bool
-rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
- int field_idx)
+static struct cls_rule *
+find_equal(struct cls_table *table, const struct flow *flow, uint32_t hash)
{
- return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask,
- wild->wc.nw_dst_mask, field_idx);
+ struct cls_rule *head;
+
+ HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &table->rules) {
+ if (flow_equal(&head->flow, flow)) {
+ return head;
+ }
+ }
+ return NULL;
}
-/* Searches 'bucket' for a rule that matches 'target'. Returns the
- * highest-priority match, if one is found, or a null pointer if there is no
- * match.
- *
- * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */
static struct cls_rule *
-search_bucket(struct cls_bucket *bucket, int field_idx,
- const struct cls_rule *target)
+insert_rule(struct cls_table *table, struct cls_rule *new)
{
- struct cls_rule *pos;
+ struct cls_rule *head;
- if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) {
+ new->hmap_node.hash = flow_hash(&new->flow, 0);
+
+ head = find_equal(table, &new->flow, new->hmap_node.hash);
+ if (!head) {
+ hmap_insert(&table->rules, &new->hmap_node, new->hmap_node.hash);
+ list_init(&new->list);
return NULL;
- }
+ } else {
+ /* Scan the list for the insertion point that will keep the list in
+ * order of decreasing priority. */
+ struct cls_rule *rule;
+ FOR_EACH_RULE_IN_LIST (rule, head) {
+ if (new->priority >= rule->priority) {
+ if (rule == head) {
+ /* 'new' is the new highest-priority flow in the list. */
+ hmap_replace(&table->rules,
+ &rule->hmap_node, &new->hmap_node);
+ }
- LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
- if (rules_match_1wild(target, pos, field_idx)) {
- return pos;
+ if (new->priority == rule->priority) {
+ list_replace(&new->list, &rule->list);
+ return rule;
+ } else {
+ list_insert(&rule->list, &new->list);
+ return NULL;
+ }
+ }
}
+
+ /* Insert 'new' at the end of the list. */
+ list_push_back(&head->list, &new->list);
+ return NULL;
}
- return NULL;
}
-/* Searches 'table' for a rule that matches 'target'. Returns the
- * highest-priority match, if one is found, or a null pointer if there is no
- * match.
- *
- * 'field_idx' must be the index of the first wildcarded field in 'table'. */
static struct cls_rule *
-search_table(const struct hmap *table, int field_idx,
- const struct cls_rule *target)
+next_rule_in_list__(struct cls_rule *rule)
{
- struct cls_bucket *bucket;
+ struct cls_rule *next = OBJECT_CONTAINING(rule->list.next, next, list);
+ return next;
+}
- switch (hmap_count(table)) {
- /* In these special cases there's no need to hash. */
- case 0:
- return NULL;
- case 1:
- bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node);
- return search_bucket(bucket, field_idx, target);
- }
+static struct cls_rule *
+next_rule_in_list(struct cls_rule *rule)
+{
+ struct cls_rule *next = next_rule_in_list__(rule);
+ return next->priority < rule->priority ? next : NULL;
+}
- HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
- hash_fields(&target->flow, field_idx), table) {
- struct cls_rule *rule = search_bucket(bucket, field_idx, target);
- if (rule) {
- return rule;
+static bool
+ipv6_equal_except(const struct in6_addr *a, const struct in6_addr *b,
+ const struct in6_addr *mask)
+{
+ int i;
+
+#ifdef s6_addr32
+ for (i=0; i<4; i++) {
+ if ((a->s6_addr32[i] ^ b->s6_addr32[i]) & mask->s6_addr32[i]) {
+ return false;
}
}
- return NULL;
+#else
+ for (i=0; i<16; i++) {
+ if ((a->s6_addr[i] ^ b->s6_addr[i]) & mask->s6_addr[i]) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
}
-static struct cls_rule *
-search_exact_table(const struct classifier *cls, size_t hash,
- const flow_t *target)
+
+static bool
+flow_equal_except(const struct flow *a, const struct flow *b,
+ const struct flow_wildcards *wildcards)
{
- struct cls_rule *rule;
+ const flow_wildcards_t wc = wildcards->wildcards;
+ int i;
- HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
- hash, &cls->exact_table) {
- if (flow_equal(&rule->flow, target)) {
- return rule;
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 10);
+
+ for (i = 0; i < FLOW_N_REGS; i++) {
+ if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) {
+ return false;
}
}
- return NULL;
+
+ return (!((a->tun_id ^ b->tun_id) & wildcards->tun_id_mask)
+ && !((a->nw_src ^ b->nw_src) & wildcards->nw_src_mask)
+ && !((a->nw_dst ^ b->nw_dst) & wildcards->nw_dst_mask)
+ && (wc & FWW_IN_PORT || a->in_port == b->in_port)
+ && !((a->vlan_tci ^ b->vlan_tci) & wildcards->vlan_tci_mask)
+ && (wc & FWW_DL_TYPE || a->dl_type == b->dl_type)
+ && !((a->tp_src ^ b->tp_src) & wildcards->tp_src_mask)
+ && !((a->tp_dst ^ b->tp_dst) & wildcards->tp_dst_mask)
+ && (wc & FWW_DL_SRC || eth_addr_equals(a->dl_src, b->dl_src))
+ && (wc & FWW_DL_DST
+ || (!((a->dl_dst[0] ^ b->dl_dst[0]) & 0xfe)
+ && a->dl_dst[1] == b->dl_dst[1]
+ && a->dl_dst[2] == b->dl_dst[2]
+ && a->dl_dst[3] == b->dl_dst[3]
+ && a->dl_dst[4] == b->dl_dst[4]
+ && a->dl_dst[5] == b->dl_dst[5]))
+ && (wc & FWW_ETH_MCAST
+ || !((a->dl_dst[0] ^ b->dl_dst[0]) & 0x01))
+ && (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto)
+ && (wc & FWW_NW_TTL || a->nw_ttl == b->nw_ttl)
+ && (wc & FWW_NW_DSCP || !((a->nw_tos ^ b->nw_tos) & IP_DSCP_MASK))
+ && (wc & FWW_NW_ECN || !((a->nw_tos ^ b->nw_tos) & IP_ECN_MASK))
+ && !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask)
+ && (wc & FWW_ARP_SHA || eth_addr_equals(a->arp_sha, b->arp_sha))
+ && (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha))
+ && (wc & FWW_IPV6_LABEL || a->ipv6_label == b->ipv6_label)
+ && ipv6_equal_except(&a->ipv6_src, &b->ipv6_src,
+ &wildcards->ipv6_src_mask)
+ && ipv6_equal_except(&a->ipv6_dst, &b->ipv6_dst,
+ &wildcards->ipv6_dst_mask)
+ && ipv6_equal_except(&a->nd_target, &b->nd_target,
+ &wildcards->nd_target_mask));
}