VLOG_DEFINE_THIS_MODULE(classifier);
struct trie_node;
+struct trie_ctx;
+
+/* Ports trie depends on both ports sharing the same ovs_be32. */
+#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
+BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
/* Prefix trie for a 'field' */
struct cls_trie {
struct cls_subtable_entry {
struct cls_subtable *subtable;
- uint32_t *mask_values;
tag_type tag;
unsigned int max_priority;
};
struct hmap_node hmap_node; /* Within struct cls_classifier 'subtables'
* hmap. */
struct hmap rules; /* Contains "struct cls_rule"s. */
- struct minimask mask; /* Wildcards for fields. */
int n_rules; /* Number of rules, including duplicates. */
unsigned int max_priority; /* Max priority of any rule in the subtable. */
unsigned int max_count; /* Count of max_priority rules. */
uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 flow segment boundaries. */
struct hindex indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'. */
+ int ports_mask_len;
+ struct trie_node *ports_trie; /* NULL if none. */
+ struct minimask mask; /* Wildcards for fields. */
+ /* 'mask' must be the last field. */
};
/* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
unsigned int priority; /* Larger numbers are higher priorities. */
struct cls_partition *partition;
struct list list; /* List of identical, lower-priority rules. */
- struct minimatch match; /* Matching rule. */
+ struct miniflow flow; /* Matching rule. Mask is in the subtable. */
+ /* 'flow' must be the last field. */
};
static struct cls_match *
cls_match_alloc(struct cls_rule *rule)
{
- struct cls_match *cls_match = xmalloc(sizeof *cls_match);
+ int count = count_1bits(rule->match.flow.map);
+
+ struct cls_match *cls_match
+ = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
+ + MINIFLOW_VALUES_SIZE(count));
cls_match->cls_rule = rule;
- minimatch_clone(&cls_match->match, &rule->match);
+ miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count);
cls_match->priority = rule->priority;
rule->cls_match = cls_match;
return cls_match;
}
-struct trie_ctx;
static struct cls_subtable *find_subtable(const struct cls_classifier *,
const struct minimask *);
static struct cls_subtable *insert_subtable(struct cls_classifier *,
const struct mf_field *);
static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
unsigned int *checkbits);
-
+static unsigned int trie_lookup_value(const struct trie_node *,
+ const ovs_be32 value[],
+ unsigned int *checkbits);
static void trie_destroy(struct trie_node *);
static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
+static void trie_insert_prefix(struct trie_node **, const ovs_be32 *prefix,
+ int mlen);
static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
+static void trie_remove_prefix(struct trie_node **, const ovs_be32 *prefix,
+ int mlen);
static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
unsigned int nbits);
static bool mask_prefix_bits_set(const struct flow_wildcards *,
flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
uint32_t basis)
{
+ const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
const uint32_t *flow_u32 = (const uint32_t *)flow;
- const uint32_t *p = mask->masks.values;
+ const uint32_t *p = mask_values;
uint32_t hash;
uint64_t map;
hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
}
- return mhash_finish(hash, (p - mask->masks.values) * 4);
+ return mhash_finish(hash, (p - mask_values) * 4);
}
/* Returns a hash value for the bits of 'flow' where there are 1-bits in
miniflow_hash_in_minimask(const struct miniflow *flow,
const struct minimask *mask, uint32_t basis)
{
- const uint32_t *p = mask->masks.values;
+ const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
+ const uint32_t *p = mask_values;
uint32_t hash = basis;
uint32_t flow_u32;
hash = mhash_add(hash, flow_u32 & *p++);
}
- return mhash_finish(hash, (p - mask->masks.values) * 4);
+ return mhash_finish(hash, (p - mask_values) * 4);
}
/* Returns a hash value for the bits of range [start, end) in 'flow',
const struct minimask *mask,
uint8_t start, uint8_t end, uint32_t *basis)
{
+ const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
const uint32_t *flow_u32 = (const uint32_t *)flow;
unsigned int offset;
uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
&offset);
- const uint32_t *p = mask->masks.values + offset;
+ const uint32_t *p = mask_values + offset;
uint32_t hash = *basis;
for (; map; map = zero_rightmost_1bit(map)) {
}
*basis = hash; /* Allow continuation from the unfinished value. */
- return mhash_finish(hash, (p - mask->masks.values) * 4);
+ return mhash_finish(hash, (p - mask_values) * 4);
}
/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
unsigned int offset;
uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
&offset);
- const uint32_t *p = mask->masks.values + offset;
+ const uint32_t *p = miniflow_get_u32_values(&mask->masks) + offset;
for (; map; map = zero_rightmost_1bit(map)) {
dst_u32[raw_ctz(map)] |= *p++;
static inline uint32_t
miniflow_hash(const struct miniflow *flow, uint32_t basis)
{
- const uint32_t *p = flow->values;
+ const uint32_t *values = miniflow_get_u32_values(flow);
+ const uint32_t *p = values;
uint32_t hash = basis;
uint64_t hash_map = 0;
uint64_t map;
hash = mhash_add(hash, hash_map);
hash = mhash_add(hash, hash_map >> 32);
- return mhash_finish(hash, p - flow->values);
+ return mhash_finish(hash, p - values);
}
/* Returns a hash value for 'mask', given 'basis'. */
n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
&offset));
- q = match->mask.masks.values + offset;
- p = match->flow.values + offset;
+ q = miniflow_get_u32_values(&match->mask.masks) + offset;
+ p = miniflow_get_u32_values(&match->flow) + offset;
for (i = 0; i < n; i++) {
hash = mhash_add(hash, p[i] & q[i]);
return partition;
}
+static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
+{
+ /* Could optimize to use the same map if needed for fast path. */
+ return MINIFLOW_GET_BE32(&match->flow, tp_src)
+ & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
+}
+
/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
* must not modify or free it.
*
trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
}
}
+
+ /* Ports trie. */
+ if (subtable->ports_mask_len) {
+ /* We mask the value to be inserted to always have the wildcarded
+ * bits in known (zero) state, so we can include them in comparison
+ * and they will always match (== their original value does not
+ * matter). */
+ ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
+
+ trie_insert_prefix(&subtable->ports_trie, &masked_ports,
+ subtable->ports_mask_len);
+ }
+
return NULL;
} else {
struct cls_rule *old_cls_rule = old_rule->cls_rule;
ovs_assert(cls_match);
subtable = find_subtable(cls, &rule->match.mask);
-
ovs_assert(subtable);
+ if (subtable->ports_mask_len) {
+ ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
+
+ trie_remove_prefix(&subtable->ports_trie,
+ &masked_ports, subtable->ports_mask_len);
+ }
for (i = 0; i < cls->n_tries; i++) {
if (subtable->trie_plen[i]) {
trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
lookahead_subtable(const struct cls_subtable_entry *subtables)
{
ovs_prefetch_range(subtables->subtable, sizeof *subtables->subtable);
- ovs_prefetch_range(subtables->mask_values, 1);
}
/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
}
/* Returns true if 'target' satisifies 'match', that is, if each bit for which
- * 'match' specifies a particular value has the correct value in 'target'. */
+ * 'match' specifies a particular value has the correct value in 'target'.
+ *
+ * 'flow' and 'mask' have the same mask! */
static bool
-minimatch_matches_miniflow(const struct minimatch *match,
- const struct miniflow *target)
+miniflow_and_mask_matches_miniflow(const struct miniflow *flow,
+ const struct minimask *mask,
+ const struct miniflow *target)
{
- const uint32_t *flowp = (const uint32_t *)match->flow.values;
- const uint32_t *maskp = (const uint32_t *)match->mask.masks.values;
+ const uint32_t *flowp = miniflow_get_u32_values(flow);
+ const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
uint32_t target_u32;
- MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, match->mask.masks.map) {
+ MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
if ((*flowp++ ^ target_u32) & *maskp++) {
return false;
}
struct cls_match *rule;
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
- if (minimatch_matches_miniflow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_miniflow(&rule->flow, &subtable->mask,
+ flow)) {
return rule;
}
}
}
if (rule->priority == target->priority
&& miniflow_equal_in_minimask(&target->match.flow,
- &rule->match.flow, &mask)) {
+ &rule->flow, &mask)) {
return true;
}
}
rule_matches(const struct cls_match *rule, const struct cls_rule *target)
{
return (!target
- || miniflow_equal_in_minimask(&rule->match.flow,
+ || miniflow_equal_in_minimask(&rule->flow,
&target->match.flow,
&target->match.mask));
}
struct flow_wildcards old, new;
uint8_t prev;
struct cls_subtable_entry elem;
+ int count = count_1bits(mask->masks.map);
- subtable = xzalloc(sizeof *subtable);
+ subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
+ + MINIFLOW_VALUES_SIZE(count));
hmap_init(&subtable->rules);
- minimask_clone(&subtable->mask, mask);
+ miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count);
/* Init indices for segmented lookup, if any. */
flow_wildcards_init_catchall(&new);
cls->tries[i].field);
}
+ /* Ports trie. */
+ subtable->ports_trie = NULL;
+ subtable->ports_mask_len
+ = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
+
hmap_insert(&cls->subtables, &subtable->hmap_node, hash);
elem.subtable = subtable;
- elem.mask_values = subtable->mask.masks.values;
elem.tag = subtable->tag;
elem.max_priority = subtable->max_priority;
cls_subtable_cache_push_back(&cls->subtables_priority, elem);
}
}
+ trie_destroy(subtable->ports_trie);
+
for (i = 0; i < subtable->n_indices; i++) {
hindex_destroy(&subtable->indices[i]);
}
return false;
}
+/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
+ * for which 'flow', for which 'mask' has a bit set, specifies a particular
+ * value has the correct value in 'target'.
+ *
+ * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
+ * target, mask) but it is faster because of the invariant that
+ * flow->map and mask->masks.map are the same. */
+static inline bool
+miniflow_and_mask_matches_flow(const struct miniflow *flow,
+ const struct minimask *mask,
+ const struct flow *target)
+{
+ const uint32_t *flowp = miniflow_get_u32_values(flow);
+ const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
+ uint32_t target_u32;
+
+ FLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
+ if ((*flowp++ ^ target_u32) & *maskp++) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
static inline struct cls_match *
find_match(const struct cls_subtable *subtable, const struct flow *flow,
uint32_t hash)
struct cls_match *rule;
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
- if (minimatch_matches_flow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
+ flow)) {
return rule;
}
}
* not match, then we know that we will never get a match, but we do
* not yet know how many wildcards we need to fold into 'wc' so we
* continue iterating through indices to find that out. (We won't
- * waste time calling minimatch_matches_flow() again because we've set
- * 'rule' nonnull.)
+ * waste time calling miniflow_and_mask_matches_flow() again because
+ * we've set 'rule' nonnull.)
*
* This check shows a measurable benefit with non-trivial flow tables.
*
* optimization. */
if (!inode->s && !rule) {
ASSIGN_CONTAINER(rule, inode - i, index_nodes);
- if (minimatch_matches_flow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
+ flow)) {
goto out;
}
}
* but it didn't match. */
rule = NULL;
}
+ if (!rule && subtable->ports_mask_len) {
+ /* Ports are always part of the final range, if any.
+ * No match was found for the ports. Use the ports trie to figure out
+ * which ports bits to unwildcard. */
+ unsigned int mbits;
+ ovs_be32 value, mask;
+
+ mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
+ value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
+ trie_lookup_value(subtable->ports_trie, &value, &mbits);
+
+ ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
+ mask & htonl(~0 << (32 - mbits));
+
+ ofs.start = TP_PORTS_OFS32;
+ goto range_out;
+ }
out:
/* Must unwildcard all the fields, as they were looked at. */
flow_wildcards_fold_minimask(wc, &subtable->mask);
struct cls_match *head;
HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &subtable->rules) {
- if (miniflow_equal(&head->match.flow, flow)) {
+ if (miniflow_equal(&head->flow, flow)) {
return head;
}
}
static const ovs_be32 *
minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
{
- return match->flow.values +
+ return miniflow_get_be32_values(&match->flow) +
count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
}
static void
trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
{
- const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field);
+ trie_insert_prefix(&trie->root,
+ minimatch_get_prefix(&rule->match, trie->field), mlen);
+}
+
+static void
+trie_insert_prefix(struct trie_node **edge, const ovs_be32 *prefix, int mlen)
+{
struct trie_node *node;
- struct trie_node **edge;
int ofs = 0;
/* Walk the tree. */
- for (edge = &trie->root;
- (node = *edge) != NULL;
+ for (; (node = *edge) != NULL;
edge = trie_next_edge(node, prefix, ofs)) {
unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
ofs += eqbits;
static void
trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
{
- const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field);
+ trie_remove_prefix(&trie->root,
+ minimatch_get_prefix(&rule->match, trie->field), mlen);
+}
+
+/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
+ * in 'rule'. */
+static void
+trie_remove_prefix(struct trie_node **root, const ovs_be32 *prefix, int mlen)
+{
struct trie_node *node;
struct trie_node **edges[sizeof(union mf_value) * 8];
int depth = 0, ofs = 0;
/* Walk the tree. */
- for (edges[depth] = &trie->root;
+ for (edges[0] = root;
(node = *edges[depth]) != NULL;
edges[++depth] = trie_next_edge(node, prefix, ofs)) {
unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
+
if (eqbits < node->nbits) {
/* Mismatch, nothing to be removed. This should never happen, as
* only rules in the classifier are ever removed. */