2 * Copyright (c) 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
21 #include <netinet/in.h>
22 #include "dynamic-string.h"
26 const struct cls_field cls_fields[CLS_N_FIELDS + 1] = {
27 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
28 { offsetof(struct flow, MEMBER), \
29 sizeof ((struct flow *)0)->MEMBER, \
34 { sizeof(struct flow), 0, 0, "exact" },
37 static uint32_t hash_fields(const struct flow *, int table_idx);
38 static bool equal_fields(const struct flow *, const struct flow *,
41 static int table_idx_from_wildcards(uint32_t wildcards);
42 static struct cls_rule *table_insert(struct hmap *, struct cls_rule *);
43 static struct cls_rule *insert_exact_rule(struct classifier *,
45 static struct cls_bucket *find_bucket(struct hmap *, size_t hash,
46 const struct cls_rule *);
47 static struct cls_rule *search_table(const struct hmap *table, int field_idx,
48 const struct cls_rule *);
49 static struct cls_rule *search_exact_table(const struct classifier *,
50 size_t hash, const struct flow *);
51 static bool rules_match_1wild(const struct cls_rule *fixed,
52 const struct cls_rule *wild, int field_idx);
53 static bool rules_match_2wild(const struct cls_rule *wild1,
54 const struct cls_rule *wild2, int field_idx);
56 /* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
57 * 'wildcards' and 'priority'.*/
59 cls_rule_from_flow(const struct flow *flow, uint32_t wildcards,
60 unsigned int priority, struct cls_rule *rule)
63 flow_wildcards_init(&rule->wc, wildcards);
64 rule->priority = priority;
65 rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
68 /* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
69 * 'priority'. If 'tun_id_from_cookie' is set then the upper 32 bits of
70 * 'cookie' are stored in the rule as the tunnel ID. */
72 cls_rule_from_match(const struct ofp_match *match, unsigned int priority,
73 bool tun_id_from_cookie, uint64_t cookie,
74 struct cls_rule *rule)
77 flow_from_match(match, tun_id_from_cookie, cookie, &rule->flow, &wildcards);
78 flow_wildcards_init(&rule->wc, wildcards);
79 rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
80 rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
83 /* Converts 'rule' to a string and returns the string. The caller must free
84 * the string (with free()). */
86 cls_rule_to_string(const struct cls_rule *rule)
88 struct ds s = DS_EMPTY_INITIALIZER;
89 ds_put_format(&s, "wildcards=%x priority=%u ",
90 rule->wc.wildcards, rule->priority);
91 flow_format(&s, &rule->flow);
95 /* Prints cls_rule 'rule', for debugging.
97 * (The output could be improved and expanded, but this was good enough to
98 * debug the classifier.) */
100 cls_rule_print(const struct cls_rule *rule)
102 printf("wildcards=%x priority=%u ", rule->wc.wildcards, rule->priority);
103 flow_print(stdout, &rule->flow);
107 /* Initializes 'cls' as a classifier that initially contains no classification
110 classifier_init(struct classifier *cls)
115 for (i = 0; i < ARRAY_SIZE(cls->tables); i++) {
116 hmap_init(&cls->tables[i]);
118 hmap_init(&cls->exact_table);
121 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
122 * caller's responsibility. */
124 classifier_destroy(struct classifier *cls)
127 struct cls_bucket *bucket, *next_bucket;
130 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
131 HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
136 hmap_destroy(&cls->exact_table);
140 /* Returns true if 'cls' does not contain any classification rules, false
143 classifier_is_empty(const struct classifier *cls)
145 return cls->n_rules == 0;
148 /* Returns the number of rules in 'classifier'. */
150 classifier_count(const struct classifier *cls)
155 /* Returns the number of rules in 'classifier' that have no wildcards. */
157 classifier_count_exact(const struct classifier *cls)
159 return hmap_count(&cls->exact_table);
162 /* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
164 * If 'cls' already contains an identical rule (including wildcards, values of
165 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
166 * rule that was replaced. The caller takes ownership of the returned rule and
167 * is thus responsible for freeing it, etc., as necessary.
169 * Returns NULL if 'cls' does not contain a rule with an identical key, after
170 * inserting the new rule. In this case, no rules are displaced by the new
171 * rule, even rules that cannot have any effect because the new rule matches a
172 * superset of their flows and has higher priority. */
174 classifier_insert(struct classifier *cls, struct cls_rule *rule)
176 struct cls_rule *old;
177 assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
178 old = (rule->wc.wildcards
179 ? table_insert(&cls->tables[rule->table_idx], rule)
180 : insert_exact_rule(cls, rule));
187 /* Removes 'rule' from 'cls'. It is caller's responsibility to free 'rule', if
188 * this is desirable. */
190 classifier_remove(struct classifier *cls, struct cls_rule *rule)
192 if (rule->wc.wildcards) {
193 /* Remove 'rule' from bucket. If that empties the bucket, remove the
194 * bucket from its table. */
195 struct hmap *table = &cls->tables[rule->table_idx];
196 struct list *rules = list_remove(&rule->node.list);
197 if (list_is_empty(rules)) {
198 /* This code is a little tricky. list_remove() returns the list
199 * element just after the one removed. Since the list is now
200 * empty, this will be the address of the 'rules' member of the
201 * bucket that was just emptied, so pointer arithmetic (via
202 * CONTAINER_OF) can find that bucket. */
203 struct cls_bucket *bucket;
204 bucket = CONTAINER_OF(rules, struct cls_bucket, rules);
205 hmap_remove(table, &bucket->hmap_node);
209 /* Remove 'rule' from cls->exact_table. */
210 hmap_remove(&cls->exact_table, &rule->node.hmap);
215 static struct cls_rule *
216 classifier_lookup_exact(const struct classifier *cls, const struct flow *flow)
218 return (!hmap_is_empty(&cls->exact_table)
219 ? search_exact_table(cls, flow_hash(flow, 0), flow)
223 static struct cls_rule *
224 classifier_lookup_wild(const struct classifier *cls, const struct flow *flow)
226 struct cls_rule *best = NULL;
227 if (cls->n_rules > hmap_count(&cls->exact_table)) {
228 struct cls_rule target;
231 cls_rule_from_flow(flow, 0, 0, &target);
232 for (i = 0; i < CLS_N_FIELDS; i++) {
233 struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
234 if (rule && (!best || rule->priority > best->priority)) {
242 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
243 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
244 * of equal priority match 'flow', returns one arbitrarily.
246 * (When multiple rules of equal priority happen to fall into the same bucket,
247 * rules added more recently take priority over rules added less recently, but
248 * this is subject to change and should not be depended upon.) */
250 classifier_lookup(const struct classifier *cls, const struct flow *flow,
253 if (include & CLS_INC_EXACT) {
254 struct cls_rule *rule = classifier_lookup_exact(cls, flow);
260 if (include & CLS_INC_WILD) {
261 return classifier_lookup_wild(cls, flow);
268 classifier_find_rule_exactly(const struct classifier *cls,
269 const struct cls_rule *target)
271 struct cls_bucket *bucket;
275 if (!target->wc.wildcards) {
276 /* Ignores 'target->priority'. */
277 return search_exact_table(cls, flow_hash(&target->flow, 0),
281 assert(target->wc.wildcards == (target->wc.wildcards & OVSFW_ALL));
282 table_idx = table_idx_from_wildcards(target->wc.wildcards);
283 hash = hash_fields(&target->flow, table_idx);
284 HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash,
285 &cls->tables[table_idx]) {
286 if (equal_fields(&bucket->fixed, &target->flow, table_idx)) {
287 struct cls_rule *pos;
288 LIST_FOR_EACH (pos, node.list, &bucket->rules) {
289 if (pos->priority < target->priority) {
291 } else if (pos->priority == target->priority &&
292 pos->wc.wildcards == target->wc.wildcards &&
293 flow_equal(&target->flow, &pos->flow)) {
302 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
303 * considered to overlap if both rules have the same priority and a packet
304 * could match both. */
306 classifier_rule_overlaps(const struct classifier *cls,
307 const struct cls_rule *target)
309 const struct hmap *tbl;
311 if (!target->wc.wildcards) {
312 return (search_exact_table(cls, flow_hash(&target->flow, 0),
313 &target->flow) != NULL);
316 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
317 struct cls_bucket *bucket;
319 HMAP_FOR_EACH (bucket, hmap_node, tbl) {
320 struct cls_rule *rule;
322 LIST_FOR_EACH (rule, node.list, &bucket->rules) {
323 if (rule->priority == target->priority
324 && rules_match_2wild(rule, target, 0)) {
334 /* Ignores target->priority.
336 * 'callback' is allowed to delete the rule that is passed as its argument, but
337 * it must not delete (or move) any other rules in 'cls' that are in the same
338 * table as the argument rule. Two rules are in the same table if their
339 * cls_rule structs have the same table_idx; as a special case, a rule with
340 * wildcards and an exact-match rule will never be in the same table. */
342 classifier_for_each_match(const struct classifier *cls,
343 const struct cls_rule *target,
344 int include, cls_cb_func *callback, void *aux)
346 if (include & CLS_INC_WILD) {
347 const struct hmap *table;
349 for (table = &cls->tables[0]; table < &cls->tables[CLS_N_FIELDS];
351 struct cls_bucket *bucket, *next_bucket;
353 HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, table) {
354 /* XXX there is a bit of room for optimization here based on
355 * rejecting entire buckets on their fixed fields, but it will
356 * only be worthwhile for big buckets (which we hope we won't
357 * get anyway, but...) */
358 struct cls_rule *prev_rule, *rule;
360 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
361 * callback deletes the last rule in the bucket, then the
362 * bucket itself will be destroyed. The bucket contains the
363 * list head so that's a use-after-free error. */
365 LIST_FOR_EACH (rule, node.list, &bucket->rules) {
366 if (rules_match_1wild(rule, target, 0)) {
368 callback(prev_rule, aux);
374 callback(prev_rule, aux);
380 if (include & CLS_INC_EXACT) {
381 if (target->wc.wildcards) {
382 struct cls_rule *rule, *next_rule;
384 HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap,
386 if (rules_match_1wild(rule, target, 0)) {
391 /* Optimization: there can be at most one match in the exact
393 size_t hash = flow_hash(&target->flow, 0);
394 struct cls_rule *rule = search_exact_table(cls, hash,
403 /* 'callback' is allowed to delete the rule that is passed as its argument, but
404 * it must not delete (or move) any other rules in 'cls' that are in the same
405 * table as the argument rule. Two rules are in the same table if their
406 * cls_rule structs have the same table_idx; as a special case, a rule with
407 * wildcards and an exact-match rule will never be in the same table.
409 * If 'include' is CLS_INC_EXACT then CLASSIFIER_FOR_EACH_EXACT_RULE(_SAFE) is
410 * probably easier to use. */
412 classifier_for_each(const struct classifier *cls, int include,
413 void (*callback)(struct cls_rule *, void *aux),
416 if (include & CLS_INC_WILD) {
417 const struct hmap *tbl;
419 for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
420 struct cls_bucket *bucket, *next_bucket;
422 HMAP_FOR_EACH_SAFE (bucket, next_bucket, hmap_node, tbl) {
423 struct cls_rule *prev_rule, *rule;
425 /* We can't just use LIST_FOR_EACH_SAFE here because, if the
426 * callback deletes the last rule in the bucket, then the
427 * bucket itself will be destroyed. The bucket contains the
428 * list head so that's a use-after-free error. */
430 LIST_FOR_EACH (rule, node.list, &bucket->rules) {
432 callback(prev_rule, aux);
437 callback(prev_rule, aux);
443 if (include & CLS_INC_EXACT) {
444 struct cls_rule *rule, *next_rule;
446 HMAP_FOR_EACH_SAFE (rule, next_rule, node.hmap, &cls->exact_table) {
452 static struct cls_bucket *create_bucket(struct hmap *, size_t hash,
453 const struct flow *fixed);
454 static struct cls_rule *bucket_insert(struct cls_bucket *, struct cls_rule *);
456 static inline bool equal_bytes(const void *, const void *, size_t n);
458 /* Returns a hash computed across the fields in 'flow' whose field indexes
459 * (CLS_F_IDX_*) are less than 'table_idx'. (If 'table_idx' is
460 * CLS_F_IDX_EXACT, hashes all the fields in 'flow'). */
462 hash_fields(const struct flow *flow, int table_idx)
464 /* I just know I'm going to hell for writing code this way.
466 * GCC generates pretty good code here, with only a single taken
467 * conditional jump per execution. Now the question is, would we be better
468 * off marking this function ALWAYS_INLINE and writing a wrapper that
469 * switches on the value of 'table_idx' to get rid of all the conditional
470 * jumps entirely (except for one in the wrapper)? Honestly I really,
471 * really hope that it doesn't matter in practice.
473 * We could do better by calculating hashes incrementally, instead of
474 * starting over from the top each time. But that would be even uglier. */
479 a = b = c = 0xdeadbeef + table_idx;
482 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
483 if (table_idx == CLS_F_IDX_##NAME) { \
485 memset((uint8_t *) tmp + n, 0, sizeof tmp - n); \
488 const size_t size = sizeof flow->MEMBER; \
489 const uint8_t *p1 = (const uint8_t *) &flow->MEMBER; \
490 const size_t p1_size = MIN(sizeof tmp - n, size); \
491 const uint8_t *p2 = p1 + p1_size; \
492 const size_t p2_size = size - p1_size; \
494 /* Append to 'tmp' as much data as will fit. */ \
495 memcpy((uint8_t *) tmp + n, p1, p1_size); \
498 /* If 'tmp' is full, mix. */ \
499 if (n == sizeof tmp) { \
507 /* Append to 'tmp' any data that didn't fit. */ \
508 memcpy(tmp, p2, p2_size); \
522 /* Compares the fields in 'a' and 'b' whose field indexes (CLS_F_IDX_*) are
523 * less than 'table_idx'. (If 'table_idx' is CLS_F_IDX_EXACT, compares all the
524 * fields in 'a' and 'b').
526 * Returns true if all the compared fields are equal, false otherwise. */
528 equal_fields(const struct flow *a, const struct flow *b, int table_idx)
530 /* XXX The generated code could be better here. */
531 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
532 if (table_idx == CLS_F_IDX_##NAME) { \
534 } else if (!equal_bytes(&a->MEMBER, &b->MEMBER, sizeof a->MEMBER)) { \
544 table_idx_from_wildcards(uint32_t wildcards)
547 return CLS_F_IDX_EXACT;
549 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
550 if (wildcards & WILDCARDS) { \
551 return CLS_F_IDX_##NAME; \
558 /* Inserts 'rule' into 'table'. Returns the rule, if any, that was displaced
559 * in favor of 'rule'. */
560 static struct cls_rule *
561 table_insert(struct hmap *table, struct cls_rule *rule)
563 struct cls_bucket *bucket;
566 hash = hash_fields(&rule->flow, rule->table_idx);
567 bucket = find_bucket(table, hash, rule);
569 bucket = create_bucket(table, hash, &rule->flow);
572 return bucket_insert(bucket, rule);
575 /* Inserts 'rule' into 'bucket', given that 'field' is the first wildcarded
578 * Returns the rule, if any, that was displaced in favor of 'rule'. */
579 static struct cls_rule *
580 bucket_insert(struct cls_bucket *bucket, struct cls_rule *rule)
582 struct cls_rule *pos;
583 LIST_FOR_EACH (pos, node.list, &bucket->rules) {
584 if (pos->priority == rule->priority) {
585 if (pos->wc.wildcards == rule->wc.wildcards
586 && rules_match_1wild(pos, rule, rule->table_idx))
588 list_replace(&rule->node.list, &pos->node.list);
591 } else if (pos->priority < rule->priority) {
595 list_insert(&pos->node.list, &rule->node.list);
599 static struct cls_rule *
600 insert_exact_rule(struct classifier *cls, struct cls_rule *rule)
602 struct cls_rule *old_rule;
605 hash = flow_hash(&rule->flow, 0);
606 old_rule = search_exact_table(cls, hash, &rule->flow);
608 hmap_remove(&cls->exact_table, &old_rule->node.hmap);
610 hmap_insert(&cls->exact_table, &rule->node.hmap, hash);
614 /* Returns the bucket in 'table' that has the given 'hash' and the same fields
615 * as 'rule->flow' (up to 'rule->table_idx'), or a null pointer if no bucket
617 static struct cls_bucket *
618 find_bucket(struct hmap *table, size_t hash, const struct cls_rule *rule)
620 struct cls_bucket *bucket;
621 HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node, hash, table) {
622 if (equal_fields(&bucket->fixed, &rule->flow, rule->table_idx)) {
629 /* Creates a bucket and inserts it in 'table' with the given 'hash' and 'fixed'
630 * values. Returns the new bucket. */
631 static struct cls_bucket *
632 create_bucket(struct hmap *table, size_t hash, const struct flow *fixed)
634 struct cls_bucket *bucket = xmalloc(sizeof *bucket);
635 list_init(&bucket->rules);
636 bucket->fixed = *fixed;
637 hmap_insert(table, &bucket->hmap_node, hash);
641 /* Returns true if the 'n' bytes in 'a' and 'b' are equal, false otherwise. */
642 static inline bool ALWAYS_INLINE
643 equal_bytes(const void *a, const void *b, size_t n)
646 /* For some reason GCC generates stupid code for memcmp() of small
647 * constant integer lengths. Help it out.
649 * This function is always inlined, and it is always called with 'n' as a
650 * compile-time constant, so the switch statement gets optimized out and
651 * this whole function just expands to an instruction or two. */
654 return *(uint8_t *) a == *(uint8_t *) b;
657 return *(uint16_t *) a == *(uint16_t *) b;
660 return *(uint32_t *) a == *(uint32_t *) b;
663 return (*(uint32_t *) a == *(uint32_t *) b
664 && ((uint16_t *) a)[2] == ((uint16_t *) b)[2]);
670 /* I hope GCC is smarter on your platform. */
671 return !memcmp(a, b, n);
675 /* Returns the 32-bit unsigned integer at 'p'. */
676 static inline uint32_t
677 read_uint32(const void *p)
679 /* GCC optimizes this into a single machine instruction on x86. */
681 memcpy(&x, p, sizeof x);
685 /* Compares the specified field in 'a' and 'b'. Returns true if the fields are
686 * equal, or if the ofp_match wildcard bits in 'wildcards' are set such that
687 * non-equal values may be ignored. 'nw_src_mask' and 'nw_dst_mask' must be
688 * those that would be set for 'wildcards' by cls_rule_set_masks().
690 * The compared field is the one with wildcard bit or bits 'field_wc', offset
691 * 'rule_ofs' within cls_rule's "fields" member, and length 'len', in bytes. */
692 static inline bool ALWAYS_INLINE
693 field_matches(const struct flow *a_, const struct flow *b_,
694 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
695 uint32_t field_wc, int ofs, int len)
697 /* This function is always inlined, and it is always called with 'field_wc'
698 * as a compile-time constant, so the "if" conditionals here generate no
700 const void *a = (const uint8_t *) a_ + ofs;
701 const void *b = (const uint8_t *) b_ + ofs;
702 if (!(field_wc & (field_wc - 1))) {
703 /* Handle all the single-bit wildcard cases. */
704 return wildcards & field_wc || equal_bytes(a, b, len);
705 } else if (field_wc == OFPFW_NW_SRC_MASK ||
706 field_wc == OFPFW_NW_DST_MASK) {
707 uint32_t a_ip = read_uint32(a);
708 uint32_t b_ip = read_uint32(b);
709 uint32_t mask = (field_wc == OFPFW_NW_SRC_MASK
710 ? nw_src_mask : nw_dst_mask);
711 return ((a_ip ^ b_ip) & mask) == 0;
717 /* Returns true if 'a' and 'b' match, ignoring fields for which the wildcards
718 * in 'wildcards' are set. 'nw_src_mask' and 'nw_dst_mask' must be those that
719 * would be set for 'wildcards' by cls_rule_set_masks(). 'field_idx' is the
720 * index of the first field to be compared; fields before 'field_idx' are
721 * assumed to match. (Always returns true if 'field_idx' is CLS_N_FIELDS.) */
723 rules_match(const struct cls_rule *a, const struct cls_rule *b,
724 uint32_t wildcards, uint32_t nw_src_mask, uint32_t nw_dst_mask,
727 /* This is related to Duff's device (see
728 * http://en.wikipedia.org/wiki/Duff's_device). */
730 #define CLS_FIELD(WILDCARDS, MEMBER, NAME) \
731 case CLS_F_IDX_##NAME: \
732 if (!field_matches(&a->flow, &b->flow, \
733 wildcards, nw_src_mask, nw_dst_mask, \
734 WILDCARDS, offsetof(struct flow, MEMBER), \
735 sizeof a->flow.MEMBER)) { \
745 /* Returns true if 'fixed' and 'wild' match. All fields in 'fixed' must have
746 * fixed values; 'wild' may contain wildcards.
748 * 'field_idx' is the index of the first field to be compared; fields before
749 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
752 rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
755 return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask,
756 wild->wc.nw_dst_mask, field_idx);
759 /* Returns true if 'wild1' and 'wild2' match, that is, if their fields
760 * are equal modulo wildcards in 'wild1' or 'wild2'.
762 * 'field_idx' is the index of the first field to be compared; fields before
763 * 'field_idx' are assumed to match. Always returns true if 'field_idx' is
766 rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2,
769 return rules_match(wild1, wild2,
770 wild1->wc.wildcards | wild2->wc.wildcards,
771 wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
772 wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
776 /* Searches 'bucket' for a rule that matches 'target'. Returns the
777 * highest-priority match, if one is found, or a null pointer if there is no
780 * 'field_idx' must be the index of the first wildcarded field in 'bucket'. */
781 static struct cls_rule *
782 search_bucket(struct cls_bucket *bucket, int field_idx,
783 const struct cls_rule *target)
785 struct cls_rule *pos;
787 if (!equal_fields(&bucket->fixed, &target->flow, field_idx)) {
791 LIST_FOR_EACH (pos, node.list, &bucket->rules) {
792 if (rules_match_1wild(target, pos, field_idx)) {
799 /* Searches 'table' for a rule that matches 'target'. Returns the
800 * highest-priority match, if one is found, or a null pointer if there is no
803 * 'field_idx' must be the index of the first wildcarded field in 'table'. */
804 static struct cls_rule *
805 search_table(const struct hmap *table, int field_idx,
806 const struct cls_rule *target)
808 struct cls_bucket *bucket;
810 switch (hmap_count(table)) {
811 /* In these special cases there's no need to hash. */
815 bucket = CONTAINER_OF(hmap_first(table), struct cls_bucket, hmap_node);
816 return search_bucket(bucket, field_idx, target);
819 HMAP_FOR_EACH_WITH_HASH (bucket, hmap_node,
820 hash_fields(&target->flow, field_idx), table) {
821 struct cls_rule *rule = search_bucket(bucket, field_idx, target);
829 static struct cls_rule *
830 search_exact_table(const struct classifier *cls, size_t hash,
831 const struct flow *target)
833 struct cls_rule *rule;
835 HMAP_FOR_EACH_WITH_HASH (rule, node.hmap, hash, &cls->exact_table) {
836 if (flow_equal(&rule->flow, target)) {