2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
21 #include <netinet/in.h>
22 #include "dynamic-string.h"
26 const struct cls_field cls_fields[CLS_N_FIELDS + 1] = {
27 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
28 { offsetof(flow_t, MEMBER), \
29 sizeof ((flow_t *)0)->MEMBER, \
34 { sizeof(flow_t), 0, 0, "exact" },
37 static uint32_t hash_fields(const flow_t *, int table_idx);
38 static bool equal_fields(const flow_t *, const flow_t *, int table_idx);
40 static int table_idx_from_wildcards(uint32_t wildcards);
41 static struct cls_rule *table_insert(struct hmap *, struct cls_rule *);
42 static struct cls_rule *insert_exact_rule(struct classifier *,
44 static struct cls_bucket *find_bucket(struct hmap *, size_t hash,
45 const struct cls_rule *);
46 static struct cls_rule *search_table(const struct hmap *table, int field_idx,
47 const struct cls_rule *);
48 static struct cls_rule *search_exact_table(const struct classifier *,
49 size_t hash, const flow_t *);
50 static bool rules_match_1wild(const struct cls_rule *fixed,
51 const struct cls_rule *wild, int field_idx);
52 static bool rules_match_2wild(const struct cls_rule *wild1,
53 const struct cls_rule *wild2, int field_idx);
55 /* Converts the flow in 'flow' into a cls_rule in 'rule'. */
57 cls_rule_from_flow(const flow_t *flow, struct cls_rule *rule)
60 if (!rule->flow.wildcards && rule->flow.priority < UINT16_MAX) {
61 rule->flow.priority = UINT16_MAX;
63 flow_wildcards_init(&rule->wc, flow->wildcards);
64 rule->table_idx = table_idx_from_wildcards(flow->wildcards);
67 /* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
68 * 'priority'. If 'tun_id_from_cookie' is set then the upper 32 bits of
69 * 'cookie' are stored in the rule as the tunnel ID. */
71 cls_rule_from_match(const struct ofp_match *match, unsigned int priority,
72 bool tun_id_from_cookie, uint64_t cookie,
73 struct cls_rule *rule)
75 flow_from_match(match, rule->flow.wildcards ? priority : UINT16_MAX,
76 tun_id_from_cookie, cookie, &rule->flow);
77 flow_wildcards_init(&rule->wc, rule->flow.wildcards);
78 rule->table_idx = table_idx_from_wildcards(rule->flow.wildcards);
81 /* Converts 'rule' to a string and returns the string. The caller must free
82 * the string (with free()). */
84 cls_rule_to_string(const struct cls_rule *rule)
86 struct ds s = DS_EMPTY_INITIALIZER;
87 ds_put_format(&s, "wildcards=%x priority=%u ",
88 rule->flow.wildcards, rule->flow.priority);
89 flow_format(&s, &rule->flow);
93 /* Prints cls_rule 'rule', for debugging.
95 * (The output could be improved and expanded, but this was good enough to
96 * debug the classifier.) */
98 cls_rule_print(const struct cls_rule *rule)
100 printf("wildcards=%x priority=%u ",
101 rule->flow.wildcards, rule->flow.priority);
102 flow_print(stdout, &rule->flow);
106 /* Adjusts pointers around 'old', which must be in classifier 'cls', to
107 * compensate for it having been moved in memory to 'new' (e.g. due to
110 * This function cannot be realized in all possible flow classifier
111 * implementations, so we will probably have to change the interface if we
112 * change the implementation. Shouldn't be a big deal though. */
114 cls_rule_moved(struct classifier *cls, struct cls_rule *old,
115 struct cls_rule *new)
118 if (new->flow.wildcards) {
119 list_moved(&new->node.list);
121 hmap_node_moved(&cls->exact_table,
122 &old->node.hmap, &new->node.hmap);
127 /* Replaces 'old', which must be in classifier 'cls', by 'new' (e.g. due to
128 * realloc()); that is, after calling this function 'new' will be in 'cls' in
131 * 'new' and 'old' must be exactly the same: wildcard the same fields, have the
132 * same fixed values for non-wildcarded fields, and have the same priority.
134 * The caller takes ownership of 'old' and is thus responsible for freeing it,
135 * etc., as necessary.
137 * This function cannot be realized in all possible flow classifier
138 * implementations, so we will probably have to change the interface if we
139 * change the implementation. Shouldn't be a big deal though. */
141 cls_rule_replace(struct classifier *cls, const struct cls_rule *old,
142 struct cls_rule *new)
145 assert(old->flow.wildcards == new->flow.wildcards);
146 assert(old->flow.priority == new->flow.priority);
148 if (new->flow.wildcards) {
149 list_replace(&new->node.list, &old->node.list);
151 hmap_replace(&cls->exact_table, &old->node.hmap, &new->node.hmap);
155 /* Initializes 'cls' as a classifier that initially contains no classification
158 classifier_init(struct classifier *cls)
163 for (i = 0; i < ARRAY_SIZE(cls->tables); i++) {
164 hmap_init(&cls->tables[i]);
166 hmap_init(&cls->exact_table);
169 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
170 * caller's responsibility. */
172 classifier_destroy(struct classifier *cls)
175 struct cls_bucket *bucket, *next_bucket;
178 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
179 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
180 struct cls_bucket, hmap_node, tbl) {
185 hmap_destroy(&cls->exact_table);
189 /* Returns true if 'cls' does not contain any classification rules, false
192 classifier_is_empty(const struct classifier *cls)
194 return cls->n_rules == 0;
197 /* Returns the number of rules in 'classifier'. */
199 classifier_count(const struct classifier *cls)
204 /* Returns the number of rules in 'classifier' that have no wildcards. */
206 classifier_count_exact(const struct classifier *cls)
208 return hmap_count(&cls->exact_table);
211 /* Returns the number of rules in 'classifier' that have at least one
214 classifier_count_wild(const struct classifier *cls)
216 return classifier_count(cls) - classifier_count_exact(cls);
219 /* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
221 * If 'cls' already contains an identical rule (including wildcards, values of
222 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
223 * rule that was replaced. The caller takes ownership of the returned rule and
224 * is thus responsible for freeing it, etc., as necessary.
226 * Returns NULL if 'cls' does not contain a rule with an identical key, after
227 * inserting the new rule. In this case, no rules are displaced by the new
228 * rule, even rules that cannot have any effect because the new rule matches a
229 * superset of their flows and has higher priority. */
231 classifier_insert(struct classifier *cls, struct cls_rule *rule)
233 struct cls_rule *old;
234 assert((rule->flow.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
235 old = (rule->flow.wildcards
236 ? table_insert(&cls->tables[rule->table_idx], rule)
237 : insert_exact_rule(cls, rule));
244 /* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
246 * 'rule' must be an exact-match rule (rule->flow.wildcards must be 0) and 'cls'
247 * must not contain any rule with an identical key. */
249 classifier_insert_exact(struct classifier *cls, struct cls_rule *rule)
251 hmap_insert(&cls->exact_table, &rule->node.hmap,
252 flow_hash_headers(&rule->flow, 0));
256 /* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if
257 * this is desirable. */
259 classifier_remove(struct classifier *cls, struct cls_rule *rule)
261 if (rule->flow.wildcards) {
262 /* Remove 'rule' from bucket. If that empties the bucket, remove the
263 * bucket from its table. */
264 struct hmap *table = &cls->tables[rule->table_idx];
265 struct list *rules = list_remove(&rule->node.list);
266 if (list_is_empty(rules)) {
267 /* This code is a little tricky. list_remove() returns the list
268 * element just after the one removed. Since the list is now
269 * empty, this will be the address of the 'rules' member of the
270 * bucket that was just emptied, so pointer arithmetic (via
271 * CONTAINER_OF) can find that bucket. */
272 struct cls_bucket *bucket;
273 bucket = CONTAINER_OF(rules, struct cls_bucket, rules);
274 hmap_remove(table, &bucket->hmap_node);
278 /* Remove 'rule' from cls->exact_table. */
279 hmap_remove(&cls->exact_table, &rule->node.hmap);
284 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
285 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
286 * of equal priority match 'flow', returns one arbitrarily.
288 * (When multiple rules of equal priority happen to fall into the same bucket,
289 * rules added more recently take priority over rules added less recently, but
290 * this is subject to change and should not be depended upon.) */
292 classifier_lookup(const struct classifier *cls, const flow_t *flow)
294 struct cls_rule *rule = classifier_lookup_exact(cls, flow);
296 rule = classifier_lookup_wild(cls, flow);
302 classifier_lookup_exact(const struct classifier *cls, const flow_t *flow)
304 return (!hmap_is_empty(&cls->exact_table)
305 ? search_exact_table(cls, flow_hash_headers(flow, 0), flow)
310 classifier_lookup_wild(const struct classifier *cls, const flow_t *flow)
312 struct cls_rule *best = NULL;
313 if (cls->n_rules > hmap_count(&cls->exact_table)) {
314 struct cls_rule target;
317 cls_rule_from_flow(flow, &target);
318 for (i = 0; i < CLS_N_FIELDS; i++) {
319 struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
320 if (rule && (!best || rule->flow.priority > best->flow.priority)) {
329 classifier_find_rule_exactly(const struct classifier *cls,
330 const flow_t *target)
332 struct cls_bucket *bucket;
336 if (!target->wildcards) {
337 /* Ignores 'priority'. */
338 return search_exact_table(cls, flow_hash_headers(target, 0), target);
341 assert(target->wildcards == (target->wildcards & OVSFW_ALL));
342 table_idx = table_idx_from_wildcards(target->wildcards);
343 hash = hash_fields(target, table_idx);
344 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
345 &cls->tables[table_idx]) {
346 if (equal_fields(&bucket->fixed, target, table_idx)) {
347 struct cls_rule *pos;
348 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
349 if (pos->flow.priority < target->priority) {
351 } else if (pos->flow.priority == target->priority &&
352 pos->flow.wildcards == target->wildcards &&
353 flow_equal_headers(target, &pos->flow)) {
362 /* Checks if the flow defined by 'target' overlaps with any other rule at the
363 * same priority in the classifier. Two rules are considered overlapping if a
364 * packet could match both. */
366 classifier_rule_overlaps(const struct classifier *cls, const flow_t *target)
368 struct cls_rule target_rule;
369 const struct hmap *tbl;
371 if (!target->wildcards) {
372 return search_exact_table(cls, flow_hash_headers(target, 0), target) ?
376 cls_rule_from_flow(target, &target_rule);
378 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
379 struct cls_bucket *bucket;
381 HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, tbl) {
382 struct cls_rule *rule;
384 LIST_FOR_EACH (rule, struct cls_rule, node.list,
386 if (rule->flow.priority == target->priority
387 && rules_match_2wild(rule, &target_rule, 0)) {
397 /* Ignores target->flow.priority.
399 * 'callback' is allowed to delete the rule that is passed as its argument, but
400 * it must not delete (or move) any other rules in 'cls' that are in the same
401 * table as the argument rule. Two rules are in the same table if their
402 * cls_rule structs have the same table_idx; as a special case, a rule with
403 * wildcards and an exact-match rule will never be in the same table.
405 * If 'callback' returns nonzero then the iteration stops immediately and
406 * classifier_for_each_match() passes up the return value. Otherwise,
407 * classifier_for_each_match() returns 0 after completing the iteration. */
409 classifier_for_each_match(const struct classifier *cls,
410 const flow_t *target_flow,
411 int include, cls_cb_func *callback, void *aux)
413 struct cls_rule target;
415 cls_rule_from_flow(target_flow, &target);
416 if (include & CLS_INC_WILD) {
417 const struct hmap *table;
419 for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
421 struct cls_bucket *bucket, *next_bucket;
423 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
424 struct cls_bucket, hmap_node, table) {
425 /* XXX there is a bit of room for optimization here based on
426 * rejecting entire buckets on their fixed fields, but it will
427 * only be worthwhile for big buckets (which we hope we won't
428 * get anyway, but...) */
429 struct cls_rule *prev_rule, *rule;
431 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
432 * callback deletes the last rule in the bucket, then the
433 * bucket itself will be destroyed. The bucket contains the
434 * list head so that's a use-after-free error. */
436 LIST_FOR_EACH (rule, struct cls_rule, node.list,
438 if (rules_match_1wild(rule, &target, 0)) {
440 int retval = callback(prev_rule, aux);
449 int retval = callback(prev_rule, aux);
458 if (include & CLS_INC_EXACT) {
459 if (target.flow.wildcards) {
460 struct cls_rule *rule, *next_rule;
462 HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
464 if (rules_match_1wild(rule, &target, 0)) {
465 int retval = callback(rule, aux);
472 /* Optimization: there can be at most one match in the exact
474 size_t hash = flow_hash_headers(&target.flow, 0);
475 struct cls_rule *rule = search_exact_table(cls, hash,
478 int retval = callback(rule, aux);
489 /* 'callback' is allowed to delete the rule that is passed as its argument, but
490 * it must not delete (or move) any other rules in 'cls' that are in the same
491 * table as the argument rule. Two rules are in the same table if their
492 * cls_rule structs have the same table_idx; as a special case, a rule with
493 * wildcards and an exact-match rule will never be in the same table.
495 * If 'callback' returns nonzero then the iteration stops immediately and
496 * classifier_for_each() passes up the return value. Otherwise,
497 * classifier_for_each() returns 0 after completing the iteration. */
499 classifier_for_each(const struct classifier *cls, int include,
500 int (*callback)(struct cls_rule *, void *aux),
503 if (include & CLS_INC_WILD) {
504 const struct hmap *tbl;
506 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
507 struct cls_bucket *bucket, *next_bucket;
509 HMAP_FOR_EACH_SAFE (bucket, next_bucket,
510 struct cls_bucket, hmap_node, tbl) {
511 struct cls_rule *prev_rule, *rule;
513 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
514 * callback deletes the last rule in the bucket, then the
515 * bucket itself will be destroyed. The bucket contains the
516 * list head so that's a use-after-free error. */
518 LIST_FOR_EACH (rule, struct cls_rule, node.list,
521 int retval = callback(prev_rule, aux);
529 int retval = callback(prev_rule, aux);
538 if (include & CLS_INC_EXACT) {
539 struct cls_rule *rule, *next_rule;
541 HMAP_FOR_EACH_SAFE (rule, next_rule,
542 struct cls_rule, node.hmap, &cls->exact_table) {
543 int retval = callback(rule, aux);
553 static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
554 const flow_t *fixed);
555 static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *);
557 static inline bool equal_bytes(const void *, const void *, size_t n);
559 /* Returns a hash computed across the fields in 'flow' whose field indexes
560 * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is
561 * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */
563 hash_fields(const flow_t *flow, int table_idx)
565 /* I just know I'm going to hell for writing code this way.
567 * GCC generates pretty good code here, with only a single taken
568 * conditional jump per execution. Now the question is, would we be better
569 * off marking this function ALWAYS_INLINE and writing a wrapper that
570 * switches on the value of 'table_idx' to get rid of all the conditional
571 * jumps entirely (except for one in the wrapper)? Honestly I really,
572 * really hope that it doesn't matter in practice.
574 * We could do better by calculating hashes incrementally, instead of
575 * starting over from the top each time. But that would be even uglier. */
580 a = b = c = 0xdeadbeef + table_idx;
583 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
584 if (table_idx == CLS_F_IDX_##NAME) { \
586 memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \
589 const size_t size = sizeof flow->MEMBER; \
590 const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \
591 const size_t p1_size = MIN(sizeof tmp - n, size); \
592 const uint8_t *p2 = p1 + p1_size; \
593 const size_t p2_size = size - p1_size; \
595 /* Append to 'tmp' as much data as will fit. */ \
596 memcpy((uint8_t *) tmp + n, p1, p1_size); \
599 /* If 'tmp' is full, mix. */ \
600 if (n == sizeof tmp) { \
608 /* Append to 'tmp' any data that didn't fit. */ \
609 memcpy(tmp, p2, p2_size); \
623 /* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are
624 * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the
625 * fields in 'a' and 'b').
627 * Returns true if all the compared fields are equal, false otherwise. */
629 equal_fields(const flow_t *a, const flow_t *b, int table_idx)
631 /* XXX The generated code could be better here. */
632 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
633 if (table_idx == CLS_F_IDX_##NAME) { \
635 } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \
645 table_idx_from_wildcards(uint32_t wildcards)
648 return CLS_F_IDX_EXACT;
650 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
651 if (wildcards & WILDCARDS) { \
652 return CLS_F_IDX_##NAME; \
659 /* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced
660 * in favor of 'rule'. */
661 static struct cls_rule *
662 table_insert(struct hmap *table, struct cls_rule *rule)
664 struct cls_bucket *bucket;
667 hash = hash_fields(&rule->flow, rule->table_idx);
668 bucket = find_bucket(table, hash, rule);
670 bucket = create_bucket(table, hash, &rule->flow);
673 return bucket_insert(bucket, rule);
676 /* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded
679 * Returns the rule, if any, that was displaced in favor of 'rule'. */
680 static struct cls_rule *
681 bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
683 struct cls_rule *pos;
684 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
685 if (pos->flow.priority == rule->flow.priority) {
686 if (pos->flow.wildcards == rule->flow.wildcards
687 && rules_match_1wild(pos, rule, rule->table_idx))
689 list_replace(&rule->node.list, &pos->node.list);
692 } else if (pos->flow.priority < rule->flow.priority) {
696 list_insert(&pos->node.list, &rule->node.list);
700 static struct cls_rule *
701 insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
703 struct cls_rule *old_rule;
706 hash = flow_hash_headers(&rule->flow, 0);
707 old_rule = search_exact_table(cls, hash, &rule->flow);
709 hmap_remove(&cls->exact_table, &old_rule->node.hmap);
711 hmap_insert(&cls->exact_table, &rule->node.hmap, hash);
715 /* Returns the bucket in 'table' that has the given 'hash' and the same fields
716 * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket
718 static struct cls_bucket *
719 find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
721 struct cls_bucket *bucket;
722 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
724 if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
731 /* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed'
732 * values. Returns the new bucket. */
733 static struct cls_bucket *
734 create_bucket(struct hmap *table, size_t hash, const flow_t *fixed)
736 struct cls_bucket *bucket = xmalloc(sizeof *bucket);
737 list_init(&bucket->rules);
738 bucket->fixed = *fixed;
739 hmap_insert(table, &bucket->hmap_node, hash);
743 /* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */
744 static inline bool ALWAYS_INLINE
745 equal_bytes(const void *a, const void *b, size_t n)
748 /* For some reason GCC generates stupid code for memcmp() of small
749 * constant integer lengths. Help it out.
751 * This function is always inlined, and it is always called with 'n' as a
752 * compile-time constant, so the switch statement gets optimized out and
753 * this whole function just expands to an instruction or two. */
756 return *(uint8_t *) a == *(uint8_t *) b;
759 return *(uint16_t *) a == *(uint16_t *) b;
762 return *(uint32_t *) a == *(uint32_t *) b;
765 return (*(uint32_t *) a == *(uint32_t *) b
766 && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]);
772 /* I hope GCC is smarter on your platform. */
773 return !memcmp(a, b, n);
777 /* Returns the 32-bit unsigned integer at 'p'. */
778 static inline uint32_t
779 read_uint32(const void *p)
781 /* GCC optimizes this into a single machine instruction on x86. */
783 memcpy(&x, p, sizeof x);
787 /* Compares the specified field in 'a' and 'b'. Returns true if the fields are
788 * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that
789 * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be
790 * those that would be set for 'wildcards' by cls_rule_set_masks().
792 * The compared field is the one with wildcard bit or bits 'field_wc', offset
793 * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */
794 static inline bool ALWAYS_INLINE
795 field_matches(const flow_t *a_, const flow_t *b_,
796 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
797 uint32_t field_wc, int ofs, int len)
799 /* This function is always inlined, and it is always called with 'field_wc'
800 * as a compile-time constant, so the "if" conditionals here generate no
802 const void *a = (const uint8_t *) a_ + ofs;
803 const void *b = (const uint8_t *) b_ + ofs;
804 if (!(field_wc & (field_wc - 1))) {
805 /* Handle all the single-bit wildcard cases. */
806 return wildcards & field_wc || equal_bytes(a, b, len);
807 } else if (field_wc == OFPFW_NW_SRC_MASK ||
808 field_wc == OFPFW_NW_DST_MASK) {
809 uint32_t a_ip = read_uint32(a);
810 uint32_t b_ip = read_uint32(b);
811 uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK
812 ? nw_src_mask : nw_dst_mask);
813 return ((a_ip ^ b_ip) & mask) == 0;
819 /* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards
820 * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that
821 * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the
822 * index of the first field to be compared; fields before 'field_idx' are
823 * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */
825 rules_match(const struct cls_rule *a, const struct cls_rule *b,
826 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
829 /* This is related to Duff's device (see
830 * http://en.wikipedia.org/wiki/Duff's_device). */
832 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
833 case CLS_F_IDX_##NAME: \
834 if (!field_matches(&a->flow, &b->flow, \
835 wildcards, nw_src_mask, nw_dst_mask, \
836 WILDCARDS, offsetof(flow_t, MEMBER), \
837 sizeof a->flow.MEMBER)) { \
847 /* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have
848 * fixed values; 'wild' may contain wildcards.
850 * 'field_idx' is the index of the first field to be compared; fields before
851 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
854 rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
857 return rules_match(fixed, wild, wild->flow.wildcards, wild->wc.nw_src_mask,
858 wild->wc.nw_dst_mask, field_idx);
861 /* Returns true if 'wild1' and 'wild2' match, that is, if their fields
862 * are equal modulo wildcards in 'wild1' or 'wild2'.
864 * 'field_idx' is the index of the first field to be compared; fields before
865 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
868 rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2,
871 return rules_match(wild1, wild2,
872 wild1->flow.wildcards | wild2->flow.wildcards,
873 wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
874 wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
878 /* Searches 'bucket' for a rule that matches 'target'. Returns the
879 * highest-priority match, if one is found, or a null pointer if there is no
882 * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */
883 static struct cls_rule *
884 search_bucket(struct cls_bucket *bucket, int field_idx,
885 const struct cls_rule *target)
887 struct cls_rule *pos;
889 if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) {
893 LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
894 if (rules_match_1wild(target, pos, field_idx)) {
901 /* Searches 'table' for a rule that matches 'target'. Returns the
902 * highest-priority match, if one is found, or a null pointer if there is no
905 * 'field_idx' must be the index of the first wildcarded field in 'table'. */
906 static struct cls_rule *
907 search_table(const struct hmap *table, int field_idx,
908 const struct cls_rule *target)
910 struct cls_bucket *bucket;
912 switch (hmap_count(table)) {
913 /* In these special cases there's no need to hash. */
917 bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node);
918 return search_bucket(bucket, field_idx, target);
921 HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node,
922 hash_fields(&target->flow, field_idx), table) {
923 struct cls_rule *rule = search_bucket(bucket, field_idx, target);
931 static struct cls_rule *
932 search_exact_table(const struct classifier *cls, size_t hash,
933 const flow_t *target)
935 struct cls_rule *rule;
937 HMAP_FOR_EACH_WITH_HASH (rule, struct cls_rule, node.hmap,
938 hash, &cls->exact_table) {
939 if (flow_equal_headers(&rule->flow, target)) {