struct cls_subtable_entry {
struct cls_subtable *subtable;
- const uint32_t *mask_values;
tag_type tag;
unsigned int max_priority;
};
struct hmap_node hmap_node; /* Within struct cls_classifier 'subtables'
* hmap. */
struct hmap rules; /* Contains "struct cls_rule"s. */
- struct minimask mask; /* Wildcards for fields. */
int n_rules; /* Number of rules, including duplicates. */
unsigned int max_priority; /* Max priority of any rule in the subtable. */
unsigned int max_count; /* Count of max_priority rules. */
uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 flow segment boundaries. */
struct hindex indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'. */
+ struct minimask mask; /* Wildcards for fields. */
+ /* 'mask' must be the last field. */
};
/* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
unsigned int priority; /* Larger numbers are higher priorities. */
struct cls_partition *partition;
struct list list; /* List of identical, lower-priority rules. */
- struct minimatch match; /* Matching rule. */
+ struct miniflow flow; /* Matching rule. Mask is in the subtable. */
+ /* 'flow' must be the last field. */
};
static struct cls_match *
cls_match_alloc(struct cls_rule *rule)
{
- struct cls_match *cls_match = xmalloc(sizeof *cls_match);
+ int count = count_1bits(rule->match.flow.map);
+
+ struct cls_match *cls_match
+ = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
+ + MINIFLOW_VALUES_SIZE(count));
cls_match->cls_rule = rule;
- minimatch_clone(&cls_match->match, &rule->match);
+ miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count);
cls_match->priority = rule->priority;
rule->cls_match = cls_match;
lookahead_subtable(const struct cls_subtable_entry *subtables)
{
ovs_prefetch_range(subtables->subtable, sizeof *subtables->subtable);
- ovs_prefetch_range(subtables->mask_values, 1);
}
/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
}
/* Returns true if 'target' satisifies 'match', that is, if each bit for which
- * 'match' specifies a particular value has the correct value in 'target'. */
+ * 'match' specifies a particular value has the correct value in 'target'.
+ *
+ * 'flow' and 'mask' have the same mask! */
static bool
-minimatch_matches_miniflow(const struct minimatch *match,
- const struct miniflow *target)
+miniflow_and_mask_matches_miniflow(const struct miniflow *flow,
+ const struct minimask *mask,
+ const struct miniflow *target)
{
- const uint32_t *flowp = miniflow_get_u32_values(&match->flow);
- const uint32_t *maskp = miniflow_get_u32_values(&match->mask.masks);
+ const uint32_t *flowp = miniflow_get_u32_values(flow);
+ const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
uint32_t target_u32;
- MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, match->mask.masks.map) {
+ MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
if ((*flowp++ ^ target_u32) & *maskp++) {
return false;
}
struct cls_match *rule;
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
- if (minimatch_matches_miniflow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_miniflow(&rule->flow, &subtable->mask,
+ flow)) {
return rule;
}
}
}
if (rule->priority == target->priority
&& miniflow_equal_in_minimask(&target->match.flow,
- &rule->match.flow, &mask)) {
+ &rule->flow, &mask)) {
return true;
}
}
rule_matches(const struct cls_match *rule, const struct cls_rule *target)
{
return (!target
- || miniflow_equal_in_minimask(&rule->match.flow,
+ || miniflow_equal_in_minimask(&rule->flow,
&target->match.flow,
&target->match.mask));
}
struct flow_wildcards old, new;
uint8_t prev;
struct cls_subtable_entry elem;
+ int count = count_1bits(mask->masks.map);
- subtable = xzalloc(sizeof *subtable);
+ subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
+ + MINIFLOW_VALUES_SIZE(count));
hmap_init(&subtable->rules);
- minimask_clone(&subtable->mask, mask);
+ miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count);
/* Init indices for segmented lookup, if any. */
flow_wildcards_init_catchall(&new);
hmap_insert(&cls->subtables, &subtable->hmap_node, hash);
elem.subtable = subtable;
- elem.mask_values = miniflow_get_values(&subtable->mask.masks);
elem.tag = subtable->tag;
elem.max_priority = subtable->max_priority;
cls_subtable_cache_push_back(&cls->subtables_priority, elem);
return false;
}
+/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
+ * for which 'flow', for which 'mask' has a bit set, specifies a particular
+ * value has the correct value in 'target'.
+ *
+ * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
+ * target, mask) but it is faster because of the invariant that
+ * flow->map and mask->masks.map are the same. */
+static inline bool
+miniflow_and_mask_matches_flow(const struct miniflow *flow,
+ const struct minimask *mask,
+ const struct flow *target)
+{
+ const uint32_t *flowp = miniflow_get_u32_values(flow);
+ const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
+ uint32_t target_u32;
+
+ FLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
+ if ((*flowp++ ^ target_u32) & *maskp++) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
static inline struct cls_match *
find_match(const struct cls_subtable *subtable, const struct flow *flow,
uint32_t hash)
struct cls_match *rule;
HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
- if (minimatch_matches_flow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
+ flow)) {
return rule;
}
}
* not match, then we know that we will never get a match, but we do
* not yet know how many wildcards we need to fold into 'wc' so we
* continue iterating through indices to find that out. (We won't
- * waste time calling minimatch_matches_flow() again because we've set
- * 'rule' nonnull.)
+ * waste time calling miniflow_and_mask_matches_flow() again because
+ * we've set 'rule' nonnull.)
*
* This check shows a measurable benefit with non-trivial flow tables.
*
* optimization. */
if (!inode->s && !rule) {
ASSIGN_CONTAINER(rule, inode - i, index_nodes);
- if (minimatch_matches_flow(&rule->match, flow)) {
+ if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
+ flow)) {
goto out;
}
}
struct cls_match *head;
HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &subtable->rules) {
- if (miniflow_equal(&head->match.flow, flow)) {
+ if (miniflow_equal(&head->flow, flow)) {
return head;
}
}
static uint32_t *
miniflow_alloc_values(struct miniflow *flow, int n)
{
- if (n <= sizeof flow->inline_values / sizeof(uint32_t)) {
+ int size = MINIFLOW_VALUES_SIZE(n);
+
+ if (size <= sizeof flow->inline_values) {
flow->values_inline = true;
return flow->inline_values;
} else {
COVERAGE_INC(miniflow_malloc);
flow->values_inline = false;
- flow->offline_values = xmalloc(n * sizeof(uint32_t));
+ flow->offline_values = xmalloc(size);
return flow->offline_values;
}
}
void
miniflow_clone(struct miniflow *dst, const struct miniflow *src)
{
- int n = miniflow_n_values(src);
+ int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
+ uint32_t *values;
+
dst->map = src->map;
- memcpy(miniflow_alloc_values(dst, n), miniflow_get_values(src),
- n * sizeof(uint32_t));
+ if (size <= sizeof dst->inline_values) {
+ dst->values_inline = true;
+ values = dst->inline_values;
+ } else {
+ dst->values_inline = false;
+ COVERAGE_INC(miniflow_malloc);
+ dst->offline_values = xmalloc(size);
+ values = dst->offline_values;
+ }
+ memcpy(values, miniflow_get_values(src), size);
+}
+
+/* Initializes 'dst' as a copy of 'src'. The caller must have allocated
+ * 'dst' to have inline space all data in 'src'. */
+void
+miniflow_clone_inline(struct miniflow *dst, const struct miniflow *src,
+ size_t n_values)
+{
+ dst->values_inline = true;
+ dst->map = src->map;
+ memcpy(dst->inline_values, miniflow_get_values(src),
+ MINIFLOW_VALUES_SIZE(n_values));
}
/* Initializes 'dst' with the data in 'src', destroying 'src'.
- * The caller must eventually free 'dst' with miniflow_destroy(). */
+ * The caller must eventually free 'dst' with miniflow_destroy().
+ * 'dst' must be regularly sized miniflow, but 'src' can have
+ * larger than default inline values. */
void
miniflow_move(struct miniflow *dst, struct miniflow *src)
{
- *dst = *src;
+ int size = MINIFLOW_VALUES_SIZE(miniflow_n_values(src));
+
+ dst->map = src->map;
+ if (size <= sizeof dst->inline_values) {
+ dst->values_inline = true;
+ memcpy(dst->inline_values, miniflow_get_values(src), size);
+ miniflow_destroy(src);
+ } else if (src->values_inline) {
+ dst->values_inline = false;
+ COVERAGE_INC(miniflow_malloc);
+ dst->offline_values = xmalloc(size);
+ memcpy(dst->offline_values, src->inline_values, size);
+ } else {
+ dst->values_inline = false;
+ dst->offline_values = src->offline_values;
+ }
}
/* Frees any memory owned by 'flow'. Does not free the storage in which 'flow'
};
};
+#define MINIFLOW_VALUES_SIZE(COUNT) ((COUNT) * sizeof(uint32_t))
+
static inline uint32_t *miniflow_values(struct miniflow *mf)
{
- return mf->values_inline ? mf->inline_values : mf->offline_values;
+ return OVS_LIKELY(mf->values_inline)
+ ? mf->inline_values : mf->offline_values;
}
static inline const uint32_t *miniflow_get_values(const struct miniflow *mf)
{
- return mf->values_inline ? mf->inline_values : mf->offline_values;
+ return OVS_LIKELY(mf->values_inline)
+ ? mf->inline_values : mf->offline_values;
}
static inline const uint32_t *miniflow_get_u32_values(const struct miniflow *mf)
void miniflow_init_with_minimask(struct miniflow *, const struct flow *,
const struct minimask *);
void miniflow_clone(struct miniflow *, const struct miniflow *);
+void miniflow_clone_inline(struct miniflow *, const struct miniflow *,
+ size_t n_values);
void miniflow_move(struct miniflow *dst, struct miniflow *);
void miniflow_destroy(struct miniflow *);
void miniflow_expand(const struct miniflow *, struct flow *);
+static inline uint32_t
+flow_get_next_in_map(const struct flow *flow, uint64_t map, uint32_t *value)
+{
+ if (map) {
+ *value = ((const uint32_t *)flow)[raw_ctz(map)];
+ return true;
+ }
+ return false;
+}
+
+/* Iterate through all flow u32 values specified by 'MAP'.
+ * This works as the first statement in a block.*/
+#define FLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
+ uint64_t map_; \
+ for (map_ = (MAP); \
+ flow_get_next_in_map(FLOW, map_, &(VALUE)); \
+ map_ = zero_rightmost_1bit(map_))
+
#define FLOW_U32_SIZE(FIELD) \
DIV_ROUND_UP(sizeof(((struct flow *)0)->FIELD), sizeof(uint32_t))
return rm1bit != 0;
}
-/* Iterate through all miniflow u32 values specified by the 'MAP'.
+/* Iterate through all miniflow u32 values specified by 'MAP'.
* This works as the first statement in a block.*/
#define MINIFLOW_FOR_EACH_IN_MAP(VALUE, FLOW, MAP) \
const uint32_t *fp_ = miniflow_get_u32_values(FLOW); \