+ int i;
+ struct cls_subtable *table = NULL;
+ struct cls_subtable_entry *iter;
+
+ CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
+ if (table == subtable) {
+ cls_subtable_cache_remove(&cls->subtables_priority, iter);
+ break;
+ }
+ }
+
+ trie_destroy(subtable->ports_trie);
+
+ for (i = 0; i < subtable->n_indices; i++) {
+ hindex_destroy(&subtable->indices[i]);
+ }
+ minimask_destroy(&subtable->mask);
+ hmap_remove(&cls->subtables, &subtable->hmap_node);
+ hmap_destroy(&subtable->rules);
+ free(subtable);
+}
+
+/* This function performs the following updates for 'subtable' in 'cls'
+ * following the addition of a new rule with priority 'new_priority' to
+ * 'subtable':
+ *
+ * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
+ *
+ * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
+ *
+ * This function should only be called after adding a new rule, not after
+ * replacing a rule by an identical one or modifying a rule in-place. */
+static void
+update_subtables_after_insertion(struct cls_classifier *cls,
+ struct cls_subtable *subtable,
+ unsigned int new_priority)
+{
+ if (new_priority == subtable->max_priority) {
+ ++subtable->max_count;
+ } else if (new_priority > subtable->max_priority) {
+ struct cls_subtable *table;
+ struct cls_subtable_entry *iter, *subtable_iter = NULL;
+
+ subtable->max_priority = new_priority;
+ subtable->max_count = 1;
+
+ /* Possibly move 'subtable' earlier in the priority list. If we break
+ * out of the loop, then 'subtable_iter' should be moved just before
+ * 'iter'. If the loop terminates normally, then 'iter' will be the
+ * first list element and we'll move subtable just before that
+ * (e.g. to the front of the list). */
+ CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE (table, iter, &cls->subtables_priority) {
+ if (table == subtable) {
+ subtable_iter = iter; /* Locate the subtable as we go. */
+ iter->max_priority = new_priority;
+ } else if (table->max_priority >= new_priority) {
+ ovs_assert(subtable_iter != NULL);
+ iter++;
+ break;
+ }
+ }
+
+ /* Move 'subtable' just before 'iter' (unless it's already there). */
+ if (iter != subtable_iter) {
+ cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
+ }
+ }
+}
+
+/* This function performs the following updates for 'subtable' in 'cls'
+ * following the deletion of a rule with priority 'del_priority' from
+ * 'subtable':
+ *
+ * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
+ *
+ * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
+ *
+ * This function should only be called after removing a rule, not after
+ * replacing a rule by an identical one or modifying a rule in-place. */
+static void
+update_subtables_after_removal(struct cls_classifier *cls,
+ struct cls_subtable *subtable,
+ unsigned int del_priority)
+{
+ if (del_priority == subtable->max_priority && --subtable->max_count == 0) {
+ struct cls_match *head;
+ struct cls_subtable *table;
+ struct cls_subtable_entry *iter, *subtable_iter = NULL;
+
+ subtable->max_priority = 0;
+ HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
+ if (head->priority > subtable->max_priority) {
+ subtable->max_priority = head->priority;
+ subtable->max_count = 1;
+ } else if (head->priority == subtable->max_priority) {
+ ++subtable->max_count;
+ }
+ }
+
+ /* Possibly move 'subtable' later in the priority list. If we break
+ * out of the loop, then 'subtable' should be moved just before that
+ * 'iter'. If the loop terminates normally, then 'iter' will be the
+ * list head and we'll move subtable just before that (e.g. to the back
+ * of the list). */
+ CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
+ if (table == subtable) {
+ subtable_iter = iter; /* Locate the subtable as we go. */
+ iter->max_priority = subtable->max_priority;
+ } else if (table->max_priority <= subtable->max_priority) {
+ ovs_assert(subtable_iter != NULL);
+ break;
+ }
+ }
+
+ /* Move 'subtable' just before 'iter' (unless it's already there). */
+ if (iter != subtable_iter) {
+ cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
+ }
+ }
+}
+
+struct range {
+ uint8_t start;
+ uint8_t end;
+};
+
+/* Return 'true' if can skip rest of the subtable based on the prefix trie
+ * lookup results. */
+static inline bool
+check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
+ const unsigned int field_plen[CLS_MAX_TRIES],
+ const struct range ofs, const struct flow *flow,
+ struct flow_wildcards *wc)
+{
+ int j;
+
+ /* Check if we could avoid fully unwildcarding the next level of
+ * fields using the prefix tries. The trie checks are done only as
+ * needed to avoid folding in additional bits to the wildcards mask. */
+ for (j = 0; j < n_tries; j++) {
+ /* Is the trie field relevant for this subtable? */
+ if (field_plen[j]) {
+ struct trie_ctx *ctx = &trie_ctx[j];
+ uint8_t be32ofs = ctx->be32ofs;
+
+ /* Is the trie field within the current range of fields? */
+ if (be32ofs >= ofs.start && be32ofs < ofs.end) {
+ /* On-demand trie lookup. */
+ if (!ctx->lookup_done) {
+ ctx->match_plen = trie_lookup(ctx->trie, flow,
+ &ctx->maskbits);
+ ctx->lookup_done = true;
+ }
+ /* Possible to skip the rest of the subtable if subtable's
+ * prefix on the field is longer than what is known to match
+ * based on the trie lookup. */
+ if (field_plen[j] > ctx->match_plen) {
+ /* RFC: We want the trie lookup to never result in
+ * unwildcarding any bits that would not be unwildcarded
+ * otherwise. Since the trie is shared by the whole
+ * classifier, it is possible that the 'maskbits' contain
+ * bits that are irrelevant for the partition of the
+ * classifier relevant for the current flow. */
+
+ /* Can skip if the field is already unwildcarded. */
+ if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
+ return true;
+ }
+ /* Check that the trie result will not unwildcard more bits
+ * than this stage will. */
+ if (ctx->maskbits <= field_plen[j]) {
+ /* Unwildcard the bits and skip the rest. */
+ mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
+ /* Note: Prerequisite already unwildcarded, as the only
+ * prerequisite of the supported trie lookup fields is
+ * the ethertype, which is currently always
+ * unwildcarded.
+ */
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+}
+
+/* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
+ * for which 'flow', for which 'mask' has a bit set, specifies a particular
+ * value has the correct value in 'target'.
+ *
+ * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
+ * target, mask) but it is faster because of the invariant that
+ * flow->map and mask->masks.map are the same. */
+static inline bool
+miniflow_and_mask_matches_flow(const struct miniflow *flow,
+ const struct minimask *mask,
+ const struct flow *target)
+{
+ const uint32_t *flowp = miniflow_get_u32_values(flow);
+ const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
+ uint32_t target_u32;
+
+ FLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
+ if ((*flowp++ ^ target_u32) & *maskp++) {
+ return false;
+ }
+ }
+
+ return true;