Merge "master" into "wdp".
[sliver-openvswitch.git] / lib / classifier.c
index 1aadfe5..f6f0b5a 100644 (file)
@@ -176,8 +176,7 @@ classifier_destroy(struct classifier *cls)
         struct hmap *tbl;
 
         for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
-            HMAP_FOR_EACH_SAFE (bucket, next_bucket,
-                                struct cls_bucket, hmap_node, tbl) {
+            HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
                 free(bucket);
             }
             hmap_destroy(tbl);
@@ -249,7 +248,7 @@ void
 classifier_insert_exact(struct classifier *cls, struct cls_rule *rule)
 {
     hmap_insert(&cls->exact_table, &rule->node.hmap,
-                flow_hash(&rule->flow, 0));
+                flow_hash_headers(&rule->flow, 0));
     cls->n_rules++;
 }
 
@@ -302,7 +301,7 @@ struct cls_rule *
 classifier_lookup_exact(const struct classifier *cls, const flow_t *flow)
 {
     return (!hmap_is_empty(&cls->exact_table)
-            ? search_exact_table(cls, flow_hash(flow, 0), flow)
+            ? search_exact_table(cls, flow_hash_headers(flow, 0), flow)
             : NULL);
 }
 
@@ -335,22 +334,22 @@ classifier_find_rule_exactly(const struct classifier *cls,
 
     if (!target->wildcards) {
         /* Ignores 'priority'. */
-        return search_exact_table(cls, flow_hash(target, 0), target);
+        return search_exact_table(cls, flow_hash_headers(target, 0), target);
     }
 
     assert(target->wildcards == (target->wildcards & OVSFW_ALL));
     table_idx = table_idx_from_wildcards(target->wildcards);
     hash = hash_fields(target, table_idx);
-    HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
+    HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash,
                              &cls->tables[table_idx]) {
         if (equal_fields(&bucket->fixed, target, table_idx)) {
             struct cls_rule *pos;
-            LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+            LIST_FOR_EACH (pos, node.list, &bucket->rules) {
                 if (pos->flow.priority < target->priority) {
                     return NULL;
                 } else if (pos->flow.priority == target->priority &&
                            pos->flow.wildcards == target->wildcards &&
-                           flow_equal(target, &pos->flow)) {
+                           flow_equal_headers(target, &pos->flow)) {
                     return pos;
                 }
             }
@@ -369,7 +368,7 @@ classifier_rule_overlaps(const struct classifier *cls, const flow_t *target)
     const struct hmap *tbl;
 
     if (!target->wildcards) {
-        return search_exact_table(cls, flow_hash(target, 0), target) ?
+        return search_exact_table(cls, flow_hash_headers(target, 0), target) ?
             true : false;
     }
 
@@ -378,13 +377,12 @@ classifier_rule_overlaps(const struct classifier *cls, const flow_t *target)
     for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
         struct cls_bucket *bucket;
 
-        HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, tbl) {
+        HMAP_FOR_EACH (bucket, hmap_node, tbl) {
             struct cls_rule *rule;
 
-            LIST_FOR_EACH (rule, struct cls_rule, node.list,
-                           &bucket->rules) {
-                if (rule->flow.priority == target->priority 
-                        && rules_match_2wild(rule, &target_rule, 0)) {
+            LIST_FOR_EACH (rule, node.list, &bucket->rules) {
+                if (rule->flow.priority == target->priority
+                    && rules_match_2wild(rule, &target_rule, 0)) {
                     return true;
                 }
             }
@@ -400,8 +398,12 @@ classifier_rule_overlaps(const struct classifier *cls, const flow_t *target)
  * it must not delete (or move) any other rules in 'cls' that are in the same
  * table as the argument rule.  Two rules are in the same table if their
  * cls_rule structs have the same table_idx; as a special case, a rule with
- * wildcards and an exact-match rule will never be in the same table. */
-void
+ * wildcards and an exact-match rule will never be in the same table.
+ *
+ * If 'callback' returns nonzero then the iteration stops immediately and
+ * classifier_for_each_match() passes up the return value.  Otherwise,
+ * classifier_for_each_match() returns 0 after completing the iteration. */
+int
 classifier_for_each_match(const struct classifier *cls,
                           const flow_t *target_flow,
                           int include, cls_cb_func *callback, void *aux)
@@ -416,8 +418,7 @@ classifier_for_each_match(const struct classifier *cls,
              table++) {
             struct cls_bucket *bucket, *next_bucket;
 
-            HMAP_FOR_EACH_SAFE (bucket, next_bucket,
-                                struct cls_bucket, hmap_node, table) {
+            HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, table) {
                 /* XXX there is a bit of room for optimization here based on
                  * rejecting entire buckets on their fixed fields, but it will
                  * only be worthwhile for big buckets (which we hope we won't
@@ -429,17 +430,22 @@ classifier_for_each_match(const struct classifier *cls,
                  * bucket itself will be destroyed.  The bucket contains the
                  * list head so that's a use-after-free error. */
                 prev_rule = NULL;
-                LIST_FOR_EACH (rule, struct cls_rule, node.list,
-                               &bucket->rules) {
+                LIST_FOR_EACH (rule, node.list, &bucket->rules) {
                     if (rules_match_1wild(rule, &target, 0)) {
                         if (prev_rule) {
-                            callback(prev_rule, aux);
+                            int retval = callback(prev_rule, aux);
+                            if (retval) {
+                                return retval;
+                            }
                         }
                         prev_rule = rule;
                     }
                 }
                 if (prev_rule) {
-                    callback(prev_rule, aux);
+                    int retval = callback(prev_rule, aux);
+                    if (retval) {
+                        return retval;
+                    }
                 }
             }
         }
@@ -449,33 +455,45 @@ classifier_for_each_match(const struct classifier *cls,
         if (target.flow.wildcards) {
             struct cls_rule *rule, *next_rule;
 
-            HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
+            HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap,
                                 &cls->exact_table) {
                 if (rules_match_1wild(rule, &target, 0)) {
-                    callback(rule, aux);
+                    int retval = callback(rule, aux);
+                    if (retval) {
+                        return retval;
+                    }
                 }
             }
         } else {
             /* Optimization: there can be at most one match in the exact
              * table. */
-            size_t hash = flow_hash(&target.flow, 0);
+            size_t hash = flow_hash_headers(&target.flow, 0);
             struct cls_rule *rule = search_exact_table(cls, hash,
                                                        &target.flow);
             if (rule) {
-                callback(rule, aux);
+                int retval = callback(rule, aux);
+                if (retval) {
+                    return retval;
+                }
             }
         }
     }
+
+    return 0;
 }
 
 /* 'callback' is allowed to delete the rule that is passed as its argument, but
  * it must not delete (or move) any other rules in 'cls' that are in the same
  * table as the argument rule.  Two rules are in the same table if their
  * cls_rule structs have the same table_idx; as a special case, a rule with
- * wildcards and an exact-match rule will never be in the same table. */
-void
+ * wildcards and an exact-match rule will never be in the same table.
+ *
+ * If 'callback' returns nonzero then the iteration stops immediately and
+ * classifier_for_each() passes up the return value.  Otherwise,
+ * classifier_for_each() returns 0 after completing the iteration. */
+int
 classifier_for_each(const struct classifier *cls, int include,
-                    void (*callback)(struct cls_rule *, void *aux),
+                    int (*callback)(struct cls_rule *, void *aux),
                     void *aux)
 {
     if (include & CLS_INC_WILD) {
@@ -484,8 +502,7 @@ classifier_for_each(const struct classifier *cls, int include,
         for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
             struct cls_bucket *bucket, *next_bucket;
 
-            HMAP_FOR_EACH_SAFE (bucket, next_bucket,
-                                struct cls_bucket, hmap_node, tbl) {
+            HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
                 struct cls_rule *prev_rule, *rule;
 
                 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
@@ -493,15 +510,20 @@ classifier_for_each(const struct classifier *cls, int include,
                  * bucket itself will be destroyed.  The bucket contains the
                  * list head so that's a use-after-free error. */
                 prev_rule = NULL;
-                LIST_FOR_EACH (rule, struct cls_rule, node.list,
-                               &bucket->rules) {
+                LIST_FOR_EACH (rule, node.list, &bucket->rules) {
                     if (prev_rule) {
-                        callback(prev_rule, aux);
+                        int retval = callback(prev_rule, aux);
+                        if (retval) {
+                            return retval;
+                        }
                     }
                     prev_rule = rule;
                 }
                 if (prev_rule) {
-                    callback(prev_rule, aux);
+                    int retval = callback(prev_rule, aux);
+                    if (retval) {
+                        return retval;
+                    }
                 }
             }
         }
@@ -510,11 +532,15 @@ classifier_for_each(const struct classifier *cls, int include,
     if (include & CLS_INC_EXACT) {
         struct cls_rule *rule, *next_rule;
 
-        HMAP_FOR_EACH_SAFE (rule, next_rule,
-                            struct cls_rule, node.hmap, &cls->exact_table) {
-            callback(rule, aux);
+        HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap, &cls->exact_table) {
+            int retval = callback(rule, aux);
+            if (retval) {
+                return retval;
+            }
         }
     }
+
+    return 0;
 }
 \f
 static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
@@ -648,7 +674,7 @@ static struct cls_rule *
 bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
 {
     struct cls_rule *pos;
-    LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+    LIST_FOR_EACH (pos, node.list, &bucket->rules) {
         if (pos->flow.priority == rule->flow.priority) {
             if (pos->flow.wildcards == rule->flow.wildcards
                 && rules_match_1wild(pos, rule, rule->table_idx))
@@ -670,7 +696,7 @@ insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
     struct cls_rule *old_rule;
     size_t hash;
 
-    hash = flow_hash(&rule->flow, 0);
+    hash = flow_hash_headers(&rule->flow, 0);
     old_rule = search_exact_table(cls, hash, &rule->flow);
     if (old_rule) {
         hmap_remove(&cls->exact_table, &old_rule->node.hmap);
@@ -686,8 +712,7 @@ static struct cls_bucket *
 find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
 {
     struct cls_bucket *bucket;
-    HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
-                             table) {
+    HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash, table) {
         if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
             return bucket;
         }
@@ -835,10 +860,10 @@ static bool
 rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2,
                   int field_idx)
 {
-    return rules_match(wild1, wild2, 
-                       wild1->flow.wildcards | wild2->flow.wildcards, 
+    return rules_match(wild1, wild2,
+                       wild1->flow.wildcards | wild2->flow.wildcards,
                        wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
-                       wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask, 
+                       wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
                        field_idx);
 }
 
@@ -857,7 +882,7 @@ search_bucket(struct cls_bucket *bucket, int field_idx,
         return NULL;
     }
 
-    LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
+    LIST_FOR_EACH (pos, node.list, &bucket->rules) {
         if (rules_match_1wild(target, pos, field_idx)) {
             return pos;
         }
@@ -885,7 +910,7 @@ search_table(const struct hmap *table, int field_idx,
         return search_bucket(bucket, field_idx, target);
     }
 
-    HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
+    HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node,
                              hash_fields(&target->flow, field_idx), table) {
         struct cls_rule *rule = search_bucket(bucket, field_idx, target);
         if (rule) {
@@ -901,9 +926,8 @@ search_exact_table(const struct classifier *cls, size_t hash,
 {
     struct cls_rule *rule;
 
-    HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
-                             hash, &cls->exact_table) {
-        if (flow_equal(&rule->flow, target)) {
+    HMAP_FOR_EACH_WITH_HASH (rule, node.hmap, hash, &cls->exact_table) {
+        if (flow_equal_headers(&rule->flow, target)) {
             return rule;
         }
     }