X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=tests%2Ftest-classifier.c;h=84f9367248e14334619f40defbdf887d0820b91f;hb=cabd4c43854275943792a8b1bb4c7b719e210259;hp=6c81cd60f090b0c23b411c942c0af51d2901e1db;hpb=5f55c39b21e69025045437ffbd3bb98fe6ce2e89;p=sliver-openvswitch.git diff --git a/tests/test-classifier.c b/tests/test-classifier.c index 6c81cd60f..84f936724 100644 --- a/tests/test-classifier.c +++ b/tests/test-classifier.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010 Nicira Networks. + * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,17 +26,69 @@ */ #include -#include -#include "classifier.h" #include #include +#include "byte-order.h" +#include "command-line.h" #include "flow.h" -#include +#include "ofp-util.h" #include "packets.h" - +#include "random.h" +#include "unaligned.h" +#include "ovstest.h" #undef NDEBUG #include +/* We need access to classifier internal definitions to be able to fully + * test them. The alternative would be to expose them all in the classifier + * API. */ +#include "classifier.c" + +/* Fields in a rule. */ +#define CLS_FIELDS \ + /* struct flow all-caps */ \ + /* member name name */ \ + /* ----------- -------- */ \ + CLS_FIELD(tunnel.tun_id, TUN_ID) \ + CLS_FIELD(metadata, METADATA) \ + CLS_FIELD(nw_src, NW_SRC) \ + CLS_FIELD(nw_dst, NW_DST) \ + CLS_FIELD(in_port, IN_PORT) \ + CLS_FIELD(vlan_tci, VLAN_TCI) \ + CLS_FIELD(dl_type, DL_TYPE) \ + CLS_FIELD(tp_src, TP_SRC) \ + CLS_FIELD(tp_dst, TP_DST) \ + CLS_FIELD(dl_src, DL_SRC) \ + CLS_FIELD(dl_dst, DL_DST) \ + CLS_FIELD(nw_proto, NW_PROTO) \ + CLS_FIELD(nw_tos, NW_DSCP) + +/* Field indexes. + * + * (These are also indexed into struct classifier's 'tables' array.) */ +enum { +#define CLS_FIELD(MEMBER, NAME) CLS_F_IDX_##NAME, + CLS_FIELDS +#undef CLS_FIELD + CLS_N_FIELDS +}; + +/* Field information. */ +struct cls_field { + int ofs; /* Offset in struct flow. */ + int len; /* Length in bytes. */ + const char *name; /* Name (for debugging). */ +}; + +static const struct cls_field cls_fields[CLS_N_FIELDS] = { +#define CLS_FIELD(MEMBER, NAME) \ + { offsetof(struct flow, MEMBER), \ + sizeof ((struct flow *)0)->MEMBER, \ + #NAME }, + CLS_FIELDS +#undef CLS_FIELD +}; + struct test_rule { int aux; /* Auxiliary data. */ struct cls_rule cls_rule; /* Classifier rule data. */ @@ -48,6 +100,20 @@ test_rule_from_cls_rule(const struct cls_rule *rule) return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL; } +static void +test_rule_destroy(struct test_rule *rule) +{ + if (rule) { + cls_rule_destroy(&rule->cls_rule); + free(rule); + } +} + +static struct test_rule *make_rule(int wc_fields, unsigned int priority, + int value_pat); +static void free_rule(struct test_rule *); +static struct test_rule *clone_rule(const struct test_rule *); + /* Trivial (linear) classifier. */ struct tcls { size_t n_rules; @@ -70,25 +136,12 @@ tcls_destroy(struct tcls *tcls) size_t i; for (i = 0; i < tcls->n_rules; i++) { - free(tcls->rules[i]); + test_rule_destroy(tcls->rules[i]); } free(tcls->rules); } } -static int -tcls_count_exact(const struct tcls *tcls) -{ - int n_exact; - size_t i; - - n_exact = 0; - for (i = 0; i < tcls->n_rules; i++) { - n_exact += tcls->rules[i]->cls_rule.wc.wildcards == 0; - } - return n_exact; -} - static bool tcls_is_empty(const struct tcls *tcls) { @@ -100,18 +153,14 @@ tcls_insert(struct tcls *tcls, const struct test_rule *rule) { size_t i; - assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT_MAX); for (i = 0; i < tcls->n_rules; i++) { const struct cls_rule *pos = &tcls->rules[i]->cls_rule; - if (pos->priority == rule->cls_rule.priority - && pos->wc.wildcards == rule->cls_rule.wc.wildcards - && flow_equal(&pos->flow, &rule->cls_rule.flow)) { - /* Exact match. - * XXX flow_equal should ignore wildcarded fields */ - free(tcls->rules[i]); - tcls->rules[i] = xmemdup(rule, sizeof *rule); + if (cls_rule_equal(pos, &rule->cls_rule)) { + /* Exact match. */ + free_rule(tcls->rules[i]); + tcls->rules[i] = clone_rule(rule); return tcls->rules[i]; - } else if (pos->priority <= rule->cls_rule.priority) { + } else if (pos->priority < rule->cls_rule.priority) { break; } } @@ -124,7 +173,7 @@ tcls_insert(struct tcls *tcls, const struct test_rule *rule) memmove(&tcls->rules[i + 1], &tcls->rules[i], sizeof *tcls->rules * (tcls->n_rules - i)); } - tcls->rules[i] = xmemdup(rule, sizeof *rule); + tcls->rules[i] = clone_rule(rule); tcls->n_rules++; return tcls->rules[i]; } @@ -137,66 +186,87 @@ tcls_remove(struct tcls *cls, const struct test_rule *rule) for (i = 0; i < cls->n_rules; i++) { struct test_rule *pos = cls->rules[i]; if (pos == rule) { - free(pos); + test_rule_destroy(pos); + memmove(&cls->rules[i], &cls->rules[i + 1], sizeof *cls->rules * (cls->n_rules - i - 1)); + cls->n_rules--; return; } } - NOT_REACHED(); -} - -static uint32_t -read_uint32(const void *p) -{ - uint32_t x; - memcpy(&x, p, sizeof x); - return x; + OVS_NOT_REACHED(); } static bool -match(const struct cls_rule *wild, const flow_t *fixed) +match(const struct cls_rule *wild_, const struct flow *fixed) { + struct match wild; int f_idx; + minimatch_expand(&wild_->match, &wild); for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) { - const struct cls_field *f = &cls_fields[f_idx]; - void *wild_field = (char *) &wild->flow + f->ofs; - void *fixed_field = (char *) fixed + f->ofs; - - if ((wild->wc.wildcards & f->wildcards) == f->wildcards || - !memcmp(wild_field, fixed_field, f->len)) { - /* Definite match. */ - continue; + bool eq; + + if (f_idx == CLS_F_IDX_NW_SRC) { + eq = !((fixed->nw_src ^ wild.flow.nw_src) + & wild.wc.masks.nw_src); + } else if (f_idx == CLS_F_IDX_NW_DST) { + eq = !((fixed->nw_dst ^ wild.flow.nw_dst) + & wild.wc.masks.nw_dst); + } else if (f_idx == CLS_F_IDX_TP_SRC) { + eq = !((fixed->tp_src ^ wild.flow.tp_src) + & wild.wc.masks.tp_src); + } else if (f_idx == CLS_F_IDX_TP_DST) { + eq = !((fixed->tp_dst ^ wild.flow.tp_dst) + & wild.wc.masks.tp_dst); + } else if (f_idx == CLS_F_IDX_DL_SRC) { + eq = eth_addr_equal_except(fixed->dl_src, wild.flow.dl_src, + wild.wc.masks.dl_src); + } else if (f_idx == CLS_F_IDX_DL_DST) { + eq = eth_addr_equal_except(fixed->dl_dst, wild.flow.dl_dst, + wild.wc.masks.dl_dst); + } else if (f_idx == CLS_F_IDX_VLAN_TCI) { + eq = !((fixed->vlan_tci ^ wild.flow.vlan_tci) + & wild.wc.masks.vlan_tci); + } else if (f_idx == CLS_F_IDX_TUN_ID) { + eq = !((fixed->tunnel.tun_id ^ wild.flow.tunnel.tun_id) + & wild.wc.masks.tunnel.tun_id); + } else if (f_idx == CLS_F_IDX_METADATA) { + eq = !((fixed->metadata ^ wild.flow.metadata) + & wild.wc.masks.metadata); + } else if (f_idx == CLS_F_IDX_NW_DSCP) { + eq = !((fixed->nw_tos ^ wild.flow.nw_tos) & + (wild.wc.masks.nw_tos & IP_DSCP_MASK)); + } else if (f_idx == CLS_F_IDX_NW_PROTO) { + eq = !((fixed->nw_proto ^ wild.flow.nw_proto) + & wild.wc.masks.nw_proto); + } else if (f_idx == CLS_F_IDX_DL_TYPE) { + eq = !((fixed->dl_type ^ wild.flow.dl_type) + & wild.wc.masks.dl_type); + } else if (f_idx == CLS_F_IDX_IN_PORT) { + eq = !((fixed->in_port.ofp_port + ^ wild.flow.in_port.ofp_port) + & wild.wc.masks.in_port.ofp_port); + } else { + OVS_NOT_REACHED(); } - if (wild->wc.wildcards & f->wildcards) { - uint32_t test = read_uint32(wild_field); - uint32_t ip = read_uint32(fixed_field); - int shift = (f_idx == CLS_F_IDX_NW_SRC - ? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT); - uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift); - if (!((test ^ ip) & mask)) { - continue; - } + if (!eq) { + return false; } - - return false; } return true; } static struct cls_rule * -tcls_lookup(const struct tcls *cls, const flow_t *flow, int include) +tcls_lookup(const struct tcls *cls, const struct flow *flow) { size_t i; for (i = 0; i < cls->n_rules; i++) { struct test_rule *pos = cls->rules[i]; - uint32_t wildcards = pos->cls_rule.wc.wildcards; - if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT) - && match(&pos->cls_rule, flow)) { + if (match(&pos->cls_rule, flow)) { return &pos->cls_rule; } } @@ -204,67 +274,66 @@ tcls_lookup(const struct tcls *cls, const flow_t *flow, int include) } static void -tcls_delete_matches(struct tcls *cls, - const struct cls_rule *target, - int include) +tcls_delete_matches(struct tcls *cls, const struct cls_rule *target) { size_t i; for (i = 0; i < cls->n_rules; ) { struct test_rule *pos = cls->rules[i]; - uint32_t wildcards = pos->cls_rule.wc.wildcards; - if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT) - && match(target, &pos->cls_rule.flow)) { - tcls_remove(cls, pos); - } else { - i++; + if (!minimask_has_extra(&pos->cls_rule.match.mask, + &target->match.mask)) { + struct flow flow; + + miniflow_expand(&pos->cls_rule.match.flow, &flow); + if (match(target, &flow)) { + tcls_remove(cls, pos); + continue; + } } + i++; } } -#ifdef WORDS_BIGENDIAN -#define T_HTONL(VALUE) ((uint32_t) (VALUE)) -#define T_HTONS(VALUE) ((uint32_t) (VALUE)) -#else -#define T_HTONL(VALUE) (((((uint32_t) (VALUE)) & 0x000000ff) << 24) | \ - ((((uint32_t) (VALUE)) & 0x0000ff00) << 8) | \ - ((((uint32_t) (VALUE)) & 0x00ff0000) >> 8) | \ - ((((uint32_t) (VALUE)) & 0xff000000) >> 24)) -#define T_HTONS(VALUE) (((((uint16_t) (VALUE)) & 0xff00) >> 8) | \ - ((((uint16_t) (VALUE)) & 0x00ff) << 8)) -#endif - -static uint32_t nw_src_values[] = { T_HTONL(0xc0a80001), - T_HTONL(0xc0a04455) }; -static uint32_t nw_dst_values[] = { T_HTONL(0xc0a80002), - T_HTONL(0xc0a04455) }; -static uint16_t in_port_values[] = { T_HTONS(1), T_HTONS(OFPP_LOCAL) }; -static uint16_t dl_vlan_values[] = { T_HTONS(101), T_HTONS(0) }; -static uint8_t dl_vlan_pcp_values[] = { 7, 0 }; -static uint16_t dl_type_values[] - = { T_HTONS(ETH_TYPE_IP), T_HTONS(ETH_TYPE_ARP) }; -static uint16_t tp_src_values[] = { T_HTONS(49362), T_HTONS(80) }; -static uint16_t tp_dst_values[] = { T_HTONS(6667), T_HTONS(22) }; +static ovs_be32 nw_src_values[] = { CONSTANT_HTONL(0xc0a80001), + CONSTANT_HTONL(0xc0a04455) }; +static ovs_be32 nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002), + CONSTANT_HTONL(0xc0a04455) }; +static ovs_be64 tun_id_values[] = { + 0, + CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) }; +static ovs_be64 metadata_values[] = { + 0, + CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) }; +static ofp_port_t in_port_values[] = { OFP_PORT_C(1), OFPP_LOCAL }; +static ovs_be16 vlan_tci_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) }; +static ovs_be16 dl_type_values[] + = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) }; +static ovs_be16 tp_src_values[] = { CONSTANT_HTONS(49362), + CONSTANT_HTONS(80) }; +static ovs_be16 tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) }; static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 }, { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } }; static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }; -static uint8_t nw_proto_values[] = { IP_TYPE_TCP, IP_TYPE_ICMP }; -static uint8_t nw_tos_values[] = { 49, 0 }; +static uint8_t nw_proto_values[] = { IPPROTO_TCP, IPPROTO_ICMP }; +static uint8_t nw_dscp_values[] = { 48, 0 }; static void *values[CLS_N_FIELDS][2]; static void init_values(void) { + values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0]; + values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1]; + + values[CLS_F_IDX_METADATA][0] = &metadata_values[0]; + values[CLS_F_IDX_METADATA][1] = &metadata_values[1]; + values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0]; values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1]; - values[CLS_F_IDX_DL_VLAN][0] = &dl_vlan_values[0]; - values[CLS_F_IDX_DL_VLAN][1] = &dl_vlan_values[1]; - - values[CLS_F_IDX_DL_VLAN_PCP][0] = &dl_vlan_pcp_values[0]; - values[CLS_F_IDX_DL_VLAN_PCP][1] = &dl_vlan_pcp_values[1]; + values[CLS_F_IDX_VLAN_TCI][0] = &vlan_tci_values[0]; + values[CLS_F_IDX_VLAN_TCI][1] = &vlan_tci_values[1]; values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0]; values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1]; @@ -284,8 +353,8 @@ init_values(void) values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0]; values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1]; - values[CLS_F_IDX_NW_TOS][0] = &nw_tos_values[0]; - values[CLS_F_IDX_NW_TOS][1] = &nw_tos_values[1]; + values[CLS_F_IDX_NW_DSCP][0] = &nw_dscp_values[0]; + values[CLS_F_IDX_NW_DSCP][1] = &nw_dscp_values[1]; values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0]; values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1]; @@ -296,29 +365,30 @@ init_values(void) #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values) #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values) +#define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values) +#define N_METADATA_VALUES ARRAY_SIZE(metadata_values) #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values) -#define N_DL_VLAN_VALUES ARRAY_SIZE(dl_vlan_values) -#define N_DL_VLAN_PCP_VALUES ARRAY_SIZE(dl_vlan_pcp_values) +#define N_VLAN_TCI_VALUES ARRAY_SIZE(vlan_tci_values) #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values) #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values) #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values) #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values) #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values) #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values) -#define N_NW_TOS_VALUES ARRAY_SIZE(nw_tos_values) +#define N_NW_DSCP_VALUES ARRAY_SIZE(nw_dscp_values) #define N_FLOW_VALUES (N_NW_SRC_VALUES * \ N_NW_DST_VALUES * \ + N_TUN_ID_VALUES * \ N_IN_PORT_VALUES * \ - N_DL_VLAN_VALUES * \ - N_DL_VLAN_PCP_VALUES * \ + N_VLAN_TCI_VALUES * \ N_DL_TYPE_VALUES * \ N_TP_SRC_VALUES * \ N_TP_DST_VALUES * \ N_DL_SRC_VALUES * \ N_DL_DST_VALUES * \ N_NW_PROTO_VALUES * \ - N_NW_TOS_VALUES) + N_NW_DSCP_VALUES) static unsigned int get_value(unsigned int *x, unsigned n_values) @@ -328,42 +398,30 @@ get_value(unsigned int *x, unsigned n_values) return rem; } -static struct cls_rule * -lookup_with_include_bits(const struct classifier *cls, - const flow_t *flow, int include) -{ - switch (include) { - case CLS_INC_WILD: - return classifier_lookup_wild(cls, flow); - case CLS_INC_EXACT: - return classifier_lookup_exact(cls, flow); - case CLS_INC_WILD | CLS_INC_EXACT: - return classifier_lookup(cls, flow); - default: - abort(); - } -} - static void compare_classifiers(struct classifier *cls, struct tcls *tcls) + OVS_REQ_RDLOCK(cls->rwlock) { + static const int confidence = 500; unsigned int i; assert(classifier_count(cls) == tcls->n_rules); - assert(classifier_count_exact(cls) == tcls_count_exact(tcls)); - for (i = 0; i < N_FLOW_VALUES; i++) { - struct cls_rule *cr0, *cr1; - flow_t flow; + for (i = 0; i < confidence; i++) { + struct cls_rule *cr0, *cr1, *cr2; + struct flow flow; + struct flow_wildcards wc; unsigned int x; - int include; - x = i; + flow_wildcards_init_catchall(&wc); + x = random_range(N_FLOW_VALUES); + memset(&flow, 0, sizeof flow); flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)]; flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)]; - flow.in_port = in_port_values[get_value(&x, N_IN_PORT_VALUES)]; - flow.dl_vlan = dl_vlan_values[get_value(&x, N_DL_VLAN_VALUES)]; - flow.dl_vlan_pcp = dl_vlan_pcp_values[get_value(&x, - N_DL_VLAN_PCP_VALUES)]; + flow.tunnel.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)]; + flow.metadata = metadata_values[get_value(&x, N_METADATA_VALUES)]; + flow.in_port.ofp_port = in_port_values[get_value(&x, + N_IN_PORT_VALUES)]; + flow.vlan_tci = vlan_tci_values[get_value(&x, N_VLAN_TCI_VALUES)]; flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)]; flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)]; flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)]; @@ -372,73 +430,95 @@ compare_classifiers(struct classifier *cls, struct tcls *tcls) memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)], ETH_ADDR_LEN); flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)]; - flow.nw_tos = nw_tos_values[get_value(&x, N_NW_TOS_VALUES)]; - memset(flow.reserved, 0, sizeof flow.reserved); - - for (include = 1; include <= 3; include++) { - cr0 = lookup_with_include_bits(cls, &flow, include); - cr1 = tcls_lookup(tcls, &flow, include); - assert((cr0 == NULL) == (cr1 == NULL)); - if (cr0 != NULL) { - const struct test_rule *tr0 = test_rule_from_cls_rule(cr0); - const struct test_rule *tr1 = test_rule_from_cls_rule(cr1); - - assert(flow_equal(&cr0->flow, &cr1->flow)); - assert(cr0->wc.wildcards == cr1->wc.wildcards); - assert(cr0->priority == cr1->priority); - /* Skip nw_src_mask and nw_dst_mask, because they are derived - * members whose values are used only for optimization. */ - assert(tr0->aux == tr1->aux); - } + flow.nw_tos = nw_dscp_values[get_value(&x, N_NW_DSCP_VALUES)]; + + cr0 = classifier_lookup(cls, &flow, &wc); + cr1 = tcls_lookup(tcls, &flow); + assert((cr0 == NULL) == (cr1 == NULL)); + if (cr0 != NULL) { + const struct test_rule *tr0 = test_rule_from_cls_rule(cr0); + const struct test_rule *tr1 = test_rule_from_cls_rule(cr1); + + assert(cls_rule_equal(cr0, cr1)); + assert(tr0->aux == tr1->aux); } + cr2 = classifier_lookup(cls, &flow, NULL); + assert(cr2 == cr0); } } -static void -free_rule(struct cls_rule *cls_rule, void *cls) -{ - classifier_remove(cls, cls_rule); - free(test_rule_from_cls_rule(cls_rule)); -} - static void destroy_classifier(struct classifier *cls) { - classifier_for_each(cls, CLS_INC_ALL, free_rule, cls); + struct test_rule *rule, *next_rule; + struct cls_cursor cursor; + + fat_rwlock_wrlock(&cls->rwlock); + cls_cursor_init(&cursor, cls, NULL); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) { + classifier_remove(cls, &rule->cls_rule); + free_rule(rule); + } + fat_rwlock_unlock(&cls->rwlock); classifier_destroy(cls); } static void -check_tables(const struct classifier *cls, - int n_tables, int n_buckets, int n_rules) +check_tables(const struct classifier *cls, int n_tables, int n_rules, + int n_dups) OVS_REQ_RDLOCK(cls->rwlock) { + const struct cls_subtable *table; + struct test_rule *test_rule; + struct cls_cursor cursor; int found_tables = 0; - int found_buckets = 0; int found_rules = 0; - int i; + int found_dups = 0; + int found_rules2 = 0; - BUILD_ASSERT(CLS_N_FIELDS == ARRAY_SIZE(cls->tables)); - for (i = 0; i < CLS_N_FIELDS; i++) { - const struct cls_bucket *bucket; - if (!hmap_is_empty(&cls->tables[i])) { - found_tables++; - } - HMAP_FOR_EACH (bucket, struct cls_bucket, hmap_node, &cls->tables[i]) { - found_buckets++; - assert(!list_is_empty(&bucket->rules)); - found_rules += list_size(&bucket->rules); - } - } + HMAP_FOR_EACH (table, hmap_node, &cls->cls->subtables) { + const struct cls_rule *head; + unsigned int max_priority = 0; + unsigned int max_count = 0; + + assert(!hmap_is_empty(&table->rules)); - if (!hmap_is_empty(&cls->exact_table)) { found_tables++; - found_buckets++; - found_rules += hmap_count(&cls->exact_table); + HMAP_FOR_EACH (head, hmap_node, &table->rules) { + unsigned int prev_priority = UINT_MAX; + const struct cls_rule *rule; + + if (head->priority > max_priority) { + max_priority = head->priority; + max_count = 1; + } else if (head->priority == max_priority) { + ++max_count; + } + + found_rules++; + LIST_FOR_EACH (rule, list, &head->list) { + assert(rule->priority < prev_priority); + assert(rule->priority <= table->max_priority); + + prev_priority = rule->priority; + found_rules++; + found_dups++; + assert(classifier_find_rule_exactly(cls, rule) == rule); + } + } + assert(table->max_priority == max_priority); + assert(table->max_count == max_count); } - assert(n_tables == -1 || found_tables == n_tables); + assert(found_tables == hmap_count(&cls->cls->subtables)); + assert(n_tables == -1 || n_tables == hmap_count(&cls->cls->subtables)); assert(n_rules == -1 || found_rules == n_rules); - assert(n_buckets == -1 || found_buckets == n_buckets); + assert(n_dups == -1 || found_dups == n_dups); + + cls_cursor_init(&cursor, cls, NULL); + CLS_CURSOR_FOR_EACH (test_rule, cls_rule, &cursor) { + found_rules2++; + } + assert(found_rules == found_rules2); } static struct test_rule * @@ -446,64 +526,126 @@ make_rule(int wc_fields, unsigned int priority, int value_pat) { const struct cls_field *f; struct test_rule *rule; - uint32_t wildcards; - flow_t flow; + struct match match; - wildcards = 0; - memset(&flow, 0, sizeof flow); + match_init_catchall(&match); for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) { int f_idx = f - cls_fields; - if (wc_fields & (1u << f_idx)) { - wildcards |= f->wildcards; + int value_idx = (value_pat & (1u << f_idx)) != 0; + memcpy((char *) &match.flow + f->ofs, + values[f_idx][value_idx], f->len); + + if (f_idx == CLS_F_IDX_NW_SRC) { + match.wc.masks.nw_src = OVS_BE32_MAX; + } else if (f_idx == CLS_F_IDX_NW_DST) { + match.wc.masks.nw_dst = OVS_BE32_MAX; + } else if (f_idx == CLS_F_IDX_TP_SRC) { + match.wc.masks.tp_src = OVS_BE16_MAX; + } else if (f_idx == CLS_F_IDX_TP_DST) { + match.wc.masks.tp_dst = OVS_BE16_MAX; + } else if (f_idx == CLS_F_IDX_DL_SRC) { + memset(match.wc.masks.dl_src, 0xff, ETH_ADDR_LEN); + } else if (f_idx == CLS_F_IDX_DL_DST) { + memset(match.wc.masks.dl_dst, 0xff, ETH_ADDR_LEN); + } else if (f_idx == CLS_F_IDX_VLAN_TCI) { + match.wc.masks.vlan_tci = OVS_BE16_MAX; + } else if (f_idx == CLS_F_IDX_TUN_ID) { + match.wc.masks.tunnel.tun_id = OVS_BE64_MAX; + } else if (f_idx == CLS_F_IDX_METADATA) { + match.wc.masks.metadata = OVS_BE64_MAX; + } else if (f_idx == CLS_F_IDX_NW_DSCP) { + match.wc.masks.nw_tos |= IP_DSCP_MASK; + } else if (f_idx == CLS_F_IDX_NW_PROTO) { + match.wc.masks.nw_proto = UINT8_MAX; + } else if (f_idx == CLS_F_IDX_DL_TYPE) { + match.wc.masks.dl_type = OVS_BE16_MAX; + } else if (f_idx == CLS_F_IDX_IN_PORT) { + match.wc.masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX); } else { - int value_idx = (value_pat & (1u << f_idx)) != 0; - memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len); + OVS_NOT_REACHED(); } } rule = xzalloc(sizeof *rule); - cls_rule_from_flow(&rule->cls_rule, &flow, wildcards, - !wildcards ? UINT_MAX : priority); + cls_rule_init(&rule->cls_rule, &match, wc_fields ? priority : UINT_MAX); return rule; } +static struct test_rule * +clone_rule(const struct test_rule *src) +{ + struct test_rule *dst; + + dst = xmalloc(sizeof *dst); + dst->aux = src->aux; + cls_rule_clone(&dst->cls_rule, &src->cls_rule); + return dst; +} + +static void +free_rule(struct test_rule *rule) +{ + cls_rule_destroy(&rule->cls_rule); + free(rule); +} + static void shuffle(unsigned int *p, size_t n) { for (; n > 1; n--, p++) { - unsigned int *q = &p[rand() % n]; + unsigned int *q = &p[random_range(n)]; unsigned int tmp = *p; *p = *q; *q = tmp; } } + +static void +shuffle_u32s(uint32_t *p, size_t n) +{ + for (; n > 1; n--, p++) { + uint32_t *q = &p[random_range(n)]; + uint32_t tmp = *p; + *p = *q; + *q = tmp; + } +} +/* Classifier tests. */ + +static enum mf_field_id trie_fields[2] = { + MFF_IPV4_DST, MFF_IPV4_SRC +}; + /* Tests an empty classifier. */ static void -test_empty(void) +test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { struct classifier cls; struct tcls tcls; - classifier_init(&cls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); assert(classifier_is_empty(&cls)); assert(tcls_is_empty(&tcls)); compare_classifiers(&cls, &tcls); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); } /* Destroys a null classifier. */ static void -test_destroy_null(void) +test_destroy_null(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { classifier_destroy(NULL); } /* Tests classification with one rule at a time. */ static void -test_single_rule(void) +test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { unsigned int wc_fields; /* Hilarious. */ @@ -515,16 +657,15 @@ test_single_rule(void) rule = make_rule(wc_fields, hash_bytes(&wc_fields, sizeof wc_fields, 0), 0); - classifier_init(&cls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, + ARRAY_SIZE(trie_fields)); tcls_init(&tcls); tcls_rule = tcls_insert(&tcls, rule); - if (wc_fields) { - assert(!classifier_insert(&cls, &rule->cls_rule)); - } else { - classifier_insert_exact(&cls, &rule->cls_rule); - } - check_tables(&cls, 1, 1, 1); + classifier_insert(&cls, &rule->cls_rule); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); classifier_remove(&cls, &rule->cls_rule); @@ -533,7 +674,8 @@ test_single_rule(void) assert(tcls_is_empty(&tcls)); compare_classifiers(&cls, &tcls); - free(rule); + free_rule(rule); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); } @@ -541,7 +683,7 @@ test_single_rule(void) /* Tests replacing one rule by another. */ static void -test_rule_replacement(void) +test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { unsigned int wc_fields; @@ -556,438 +698,665 @@ test_rule_replacement(void) rule2->aux += 5; rule2->aux += 5; - classifier_init(&cls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, + ARRAY_SIZE(trie_fields)); tcls_init(&tcls); tcls_insert(&tcls, rule1); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - check_tables(&cls, 1, 1, 1); + classifier_insert(&cls, &rule1->cls_rule); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); tcls_destroy(&tcls); tcls_init(&tcls); tcls_insert(&tcls, rule2); assert(test_rule_from_cls_rule( - classifier_insert(&cls, &rule2->cls_rule)) == rule1); - free(rule1); - check_tables(&cls, 1, 1, 1); + classifier_replace(&cls, &rule2->cls_rule)) == rule1); + free_rule(rule1); + check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); tcls_destroy(&tcls); + fat_rwlock_unlock(&cls.rwlock); destroy_classifier(&cls); } } static int -table_mask(int table) +factorial(int n_items) { - return ((1u << CLS_N_FIELDS) - 1) & ~((1u << table) - 1); + int n, i; + + n = 1; + for (i = 2; i <= n_items; i++) { + n *= i; + } + return n; } -static int -random_wcf_in_table(int table, int seed) +static void +swap(int *a, int *b) { - int wc_fields = (1u << table) | hash_int(seed, 0); - return wc_fields & table_mask(table); + int tmp = *a; + *a = *b; + *b = tmp; } -/* Tests classification with two rules at a time that fall into the same - * bucket. */ static void -test_two_rules_in_one_bucket(void) +reverse(int *a, int n) { - int table, rel_pri, wcf_pat, value_pat; - - for (table = 0; table <= CLS_N_FIELDS; table++) { - for (rel_pri = -1; rel_pri <= +1; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) { - int n_value_pats = table == CLS_N_FIELDS - 1 ? 1 : 2; - for (value_pat = 0; value_pat < n_value_pats; value_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct test_rule *displaced_rule; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - - if (table != CLS_F_IDX_EXACT) { - /* We can use identical priorities in this test because - * the classifier always chooses the rule added later - * for equal-priority rules that fall into the same - * bucket. */ - pri1 = table * 257 + 50; - pri2 = pri1 + rel_pri; - - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table, pri1) - : 1u << table); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table, pri2) - : 1u << table); - if (value_pat) { - wcf1 &= ~(1u << (CLS_N_FIELDS - 1)); - wcf2 &= ~(1u << (CLS_N_FIELDS - 1)); - } - } else { - /* This classifier always puts exact-match rules at - * maximum priority. */ - pri1 = pri2 = UINT_MAX; - - /* No wildcard fields. */ - wcf1 = wcf2 = 0; - } - - rule1 = make_rule(wcf1, pri1, 0); - rule2 = make_rule(wcf2, pri2, - value_pat << (CLS_N_FIELDS - 1)); - - classifier_init(&cls); - tcls_init(&tcls); - - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - displaced_rule = test_rule_from_cls_rule( - classifier_insert(&cls, &rule2->cls_rule)); - if (wcf1 != wcf2 || pri1 != pri2 || value_pat) { - assert(!displaced_rule); + int i; - check_tables(&cls, 1, 1, 2); - compare_classifiers(&cls, &tcls); + for (i = 0; i < n / 2; i++) { + int j = n - (i + 1); + swap(&a[i], &a[j]); + } +} - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - } else { - assert(displaced_rule == rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - } - free(rule1); +static bool +next_permutation(int *a, int n) +{ + int k; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + for (k = n - 2; k >= 0; k--) { + if (a[k] < a[k + 1]) { + int l; - destroy_classifier(&cls); - tcls_destroy(&tcls); + for (l = n - 1; ; l--) { + if (a[l] > a[k]) { + swap(&a[k], &a[l]); + reverse(a + (k + 1), n - (k + 1)); + return true; } } } } + return false; } -/* Tests classification with two rules at a time that fall into the same - * table but different buckets. */ +/* Tests classification with rules that have the same matching criteria. */ static void -test_two_rules_in_one_table(void) -{ - int table, rel_pri, wcf_pat; - - /* Skip tables 0 and CLS_F_IDX_EXACT because they have one bucket. */ - for (table = 1; table < CLS_N_FIELDS; table++) { - for (rel_pri = -1; rel_pri <= +1; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 5; wcf_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - int value_mask, value_pat1, value_pat2; - int i; - - /* We can use identical priorities in this test because the - * classifier always chooses the rule added later for - * equal-priority rules that fall into the same table. */ - pri1 = table * 257 + 50; - pri2 = pri1 + rel_pri; - - if (wcf_pat & 4) { - wcf1 = wcf2 = random_wcf_in_table(table, pri1); - } else { - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table, pri1) - : 1u << table); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table, pri2) - : 1u << table); - } - - /* Generate value patterns that will put the two rules into - * different buckets. */ - value_mask = ((1u << table) - 1); - value_pat1 = hash_int(pri1, 1) & value_mask; - i = 0; - do { - value_pat2 = (hash_int(pri2, i++) & value_mask); - } while (value_pat1 == value_pat2); - rule1 = make_rule(wcf1, pri1, value_pat1); - rule2 = make_rule(wcf2, pri2, value_pat2); - - classifier_init(&cls); - tcls_init(&tcls); - - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - assert(!classifier_insert(&cls, &rule2->cls_rule)); - check_tables(&cls, 1, 2, 2); - compare_classifiers(&cls, &tcls); +test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + enum { N_RULES = 3 }; + int n_pris; - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - free(rule1); + for (n_pris = N_RULES; n_pris >= 1; n_pris--) { + int ops[N_RULES * 2]; + int pris[N_RULES]; + int n_permutations; + int i; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + pris[0] = 0; + for (i = 1; i < N_RULES; i++) { + pris[i] = pris[i - 1] + (n_pris > i); + } - classifier_destroy(&cls); - tcls_destroy(&tcls); - } + for (i = 0; i < N_RULES * 2; i++) { + ops[i] = i / 2; } - } -} -/* Tests classification with two rules at a time that fall into different - * tables. */ -static void -test_two_rules_in_different_tables(void) -{ - int table1, table2, rel_pri, wcf_pat; - - for (table1 = 0; table1 < CLS_N_FIELDS; table1++) { - for (table2 = table1 + 1; table2 <= CLS_N_FIELDS; table2++) { - for (rel_pri = 0; rel_pri < 2; rel_pri++) { - for (wcf_pat = 0; wcf_pat < 4; wcf_pat++) { - struct test_rule *rule1, *tcls_rule1; - struct test_rule *rule2, *tcls_rule2; - struct classifier cls; - struct tcls tcls; - unsigned int pri1, pri2; - int wcf1, wcf2; - - /* We must use unique priorities in this test because the - * classifier makes the rule choice undefined for rules of - * equal priority that fall into different tables. (In - * practice, lower-numbered tables win.) */ - pri1 = table1 * 257 + 50; - pri2 = rel_pri ? pri1 - 1 : pri1 + 1; - - wcf1 = (wcf_pat & 1 - ? random_wcf_in_table(table1, pri1) - : 1u << table1); - wcf2 = (wcf_pat & 2 - ? random_wcf_in_table(table2, pri2) - : 1u << table2); - - if (table2 == CLS_F_IDX_EXACT) { - pri2 = UINT16_MAX; - wcf2 = 0; - } + n_permutations = 0; + do { + struct test_rule *rules[N_RULES]; + struct test_rule *tcls_rules[N_RULES]; + int pri_rules[N_RULES]; + struct classifier cls; + struct tcls tcls; - rule1 = make_rule(wcf1, pri1, 0); - rule2 = make_rule(wcf2, pri2, 0); + n_permutations++; - classifier_init(&cls); - tcls_init(&tcls); + for (i = 0; i < N_RULES; i++) { + rules[i] = make_rule(456, pris[i], 0); + tcls_rules[i] = NULL; + pri_rules[i] = -1; + } - tcls_rule1 = tcls_insert(&tcls, rule1); - tcls_rule2 = tcls_insert(&tcls, rule2); - assert(!classifier_insert(&cls, &rule1->cls_rule)); - assert(!classifier_insert(&cls, &rule2->cls_rule)); - check_tables(&cls, 2, 2, 2); - compare_classifiers(&cls, &tcls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, + ARRAY_SIZE(trie_fields)); + tcls_init(&tcls); - classifier_remove(&cls, &rule1->cls_rule); - tcls_remove(&tcls, tcls_rule1); - check_tables(&cls, 1, 1, 1); - compare_classifiers(&cls, &tcls); - free(rule1); + for (i = 0; i < ARRAY_SIZE(ops); i++) { + int j = ops[i]; + int m, n; - classifier_remove(&cls, &rule2->cls_rule); - tcls_remove(&tcls, tcls_rule2); - compare_classifiers(&cls, &tcls); - free(rule2); + if (!tcls_rules[j]) { + struct test_rule *displaced_rule; - classifier_destroy(&cls); - tcls_destroy(&tcls); + tcls_rules[j] = tcls_insert(&tcls, rules[j]); + displaced_rule = test_rule_from_cls_rule( + classifier_replace(&cls, &rules[j]->cls_rule)); + if (pri_rules[pris[j]] >= 0) { + int k = pri_rules[pris[j]]; + assert(displaced_rule != NULL); + assert(displaced_rule != rules[j]); + assert(pris[j] == displaced_rule->cls_rule.priority); + tcls_rules[k] = NULL; + } else { + assert(displaced_rule == NULL); + } + pri_rules[pris[j]] = j; + } else { + classifier_remove(&cls, &rules[j]->cls_rule); + tcls_remove(&tcls, tcls_rules[j]); + tcls_rules[j] = NULL; + pri_rules[pris[j]] = -1; } + + n = 0; + for (m = 0; m < N_RULES; m++) { + n += tcls_rules[m] != NULL; + } + check_tables(&cls, n > 0, n, n - 1); + + compare_classifiers(&cls, &tcls); } - } + + fat_rwlock_unlock(&cls.rwlock); + classifier_destroy(&cls); + tcls_destroy(&tcls); + + for (i = 0; i < N_RULES; i++) { + free_rule(rules[i]); + } + } while (next_permutation(ops, ARRAY_SIZE(ops))); + assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES)); } } -/* Tests classification with many rules at a time that fall into the same - * bucket but have unique priorities (and various wildcards). */ -static void -test_many_rules_in_one_bucket(void) +static int +count_ones(unsigned long int x) { - enum { MAX_RULES = 50 }; - int iteration, table; + int n = 0; - for (iteration = 0; iteration < 3; iteration++) { - for (table = 0; table <= CLS_N_FIELDS; table++) { - unsigned int priorities[MAX_RULES]; - struct classifier cls; - struct tcls tcls; - int i; - - srand(hash_int(table, iteration)); - for (i = 0; i < MAX_RULES; i++) { - priorities[i] = i * 129; - } - shuffle(priorities, ARRAY_SIZE(priorities)); + while (x) { + x = zero_rightmost_1bit(x); + n++; + } - classifier_init(&cls); - tcls_init(&tcls); + return n; +} - for (i = 0; i < MAX_RULES; i++) { - struct test_rule *rule; - unsigned int priority = priorities[i]; - int wcf; - - wcf = random_wcf_in_table(table, priority); - rule = make_rule(wcf, priority, - table == CLS_F_IDX_EXACT ? i : 1234); - tcls_insert(&tcls, rule); - assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, 1, 1, i + 1); - compare_classifiers(&cls, &tcls); - } +static bool +array_contains(int *array, int n, int value) +{ + int i; - destroy_classifier(&cls); - tcls_destroy(&tcls); + for (i = 0; i < n; i++) { + if (array[i] == value) { + return true; } } + + return false; } -/* Tests classification with many rules at a time that fall into the same - * table but random buckets. */ +/* Tests classification with two rules at a time that fall into the same + * table but different lists. */ static void -test_many_rules_in_one_table(void) +test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { - enum { MAX_RULES = 50 }; - int iteration, table; + int iteration; - for (iteration = 0; iteration < 3; iteration++) { - for (table = 0; table < CLS_N_FIELDS; table++) { - unsigned int priorities[MAX_RULES]; - struct classifier cls; - struct tcls tcls; - int i; + for (iteration = 0; iteration < 50; iteration++) { + enum { N_RULES = 20 }; + struct test_rule *rules[N_RULES]; + struct test_rule *tcls_rules[N_RULES]; + struct classifier cls; + struct tcls tcls; + int value_pats[N_RULES]; + int value_mask; + int wcf; + int i; - srand(hash_int(table, iteration)); - for (i = 0; i < MAX_RULES; i++) { - priorities[i] = i * 129; - } - shuffle(priorities, ARRAY_SIZE(priorities)); + do { + wcf = random_uint32() & ((1u << CLS_N_FIELDS) - 1); + value_mask = ~wcf & ((1u << CLS_N_FIELDS) - 1); + } while ((1 << count_ones(value_mask)) < N_RULES); - classifier_init(&cls); - tcls_init(&tcls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, + ARRAY_SIZE(trie_fields)); + tcls_init(&tcls); - for (i = 0; i < MAX_RULES; i++) { - struct test_rule *rule; - unsigned int priority = priorities[i]; - int wcf; + for (i = 0; i < N_RULES; i++) { + unsigned int priority = random_uint32(); - wcf = random_wcf_in_table(table, priority); - rule = make_rule(wcf, priority, hash_int(priority, 1)); - tcls_insert(&tcls, rule); - assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, 1, -1, i + 1); - compare_classifiers(&cls, &tcls); - } + do { + value_pats[i] = random_uint32() & value_mask; + } while (array_contains(value_pats, i, value_pats[i])); - destroy_classifier(&cls); - tcls_destroy(&tcls); + rules[i] = make_rule(wcf, priority, value_pats[i]); + tcls_rules[i] = tcls_insert(&tcls, rules[i]); + classifier_insert(&cls, &rules[i]->cls_rule); + + check_tables(&cls, 1, i + 1, 0); + compare_classifiers(&cls, &tcls); } + + for (i = 0; i < N_RULES; i++) { + tcls_remove(&tcls, tcls_rules[i]); + classifier_remove(&cls, &rules[i]->cls_rule); + free_rule(rules[i]); + + check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0); + compare_classifiers(&cls, &tcls); + } + + fat_rwlock_unlock(&cls.rwlock); + classifier_destroy(&cls); + tcls_destroy(&tcls); } } -/* Tests classification with many rules at a time that fall into random buckets - * in random tables. */ +/* Tests classification with many rules at a time that fall into random lists + * in 'n' tables. */ static void -test_many_rules_in_different_tables(void) +test_many_rules_in_n_tables(int n_tables) { enum { MAX_RULES = 50 }; + int wcfs[10]; int iteration; + int i; + + assert(n_tables < 10); + for (i = 0; i < n_tables; i++) { + do { + wcfs[i] = random_uint32() & ((1u << CLS_N_FIELDS) - 1); + } while (array_contains(wcfs, i, wcfs[i])); + } for (iteration = 0; iteration < 30; iteration++) { unsigned int priorities[MAX_RULES]; struct classifier cls; struct tcls tcls; - int i; - srand(iteration); + random_set_seed(iteration + 1); for (i = 0; i < MAX_RULES; i++) { priorities[i] = i * 129; } shuffle(priorities, ARRAY_SIZE(priorities)); - classifier_init(&cls); + classifier_init(&cls, flow_segment_u32s); + fat_rwlock_wrlock(&cls.rwlock); + classifier_set_prefix_fields(&cls, trie_fields, + ARRAY_SIZE(trie_fields)); tcls_init(&tcls); for (i = 0; i < MAX_RULES; i++) { struct test_rule *rule; unsigned int priority = priorities[i]; - int table = rand() % (CLS_N_FIELDS + 1); - int wcf = random_wcf_in_table(table, rand()); - int value_pat = rand() & ((1u << CLS_N_FIELDS) - 1); + int wcf = wcfs[random_range(n_tables)]; + int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1); rule = make_rule(wcf, priority, value_pat); tcls_insert(&tcls, rule); - assert(!classifier_insert(&cls, &rule->cls_rule)); - check_tables(&cls, -1, -1, i + 1); + classifier_insert(&cls, &rule->cls_rule); + check_tables(&cls, -1, i + 1, -1); compare_classifiers(&cls, &tcls); } while (!classifier_is_empty(&cls)) { - struct test_rule *rule = xmemdup(tcls.rules[rand() % tcls.n_rules], - sizeof(struct test_rule)); - int include = rand() % 2 ? CLS_INC_WILD : CLS_INC_EXACT; - include |= (rule->cls_rule.wc.wildcards - ? CLS_INC_WILD : CLS_INC_EXACT); - classifier_for_each_match(&cls, &rule->cls_rule, include, - free_rule, &cls); - tcls_delete_matches(&tcls, &rule->cls_rule, include); + struct test_rule *rule, *next_rule; + struct test_rule *target; + struct cls_cursor cursor; + + target = clone_rule(tcls.rules[random_range(tcls.n_rules)]); + + cls_cursor_init(&cursor, &cls, &target->cls_rule); + CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) { + classifier_remove(&cls, &rule->cls_rule); + free_rule(rule); + } + tcls_delete_matches(&tcls, &target->cls_rule); compare_classifiers(&cls, &tcls); - free(rule); + check_tables(&cls, -1, -1, -1); + free_rule(target); } - putchar('.'); - fflush(stdout); + fat_rwlock_unlock(&cls.rwlock); destroy_classifier(&cls); tcls_destroy(&tcls); } } + +static void +test_many_rules_in_two_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + test_many_rules_in_n_tables(2); +} + +static void +test_many_rules_in_five_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + test_many_rules_in_n_tables(5); +} +/* Miniflow tests. */ + +static uint32_t +random_value(void) +{ + static const uint32_t values[] = + { 0xffffffff, 0xaaaaaaaa, 0x55555555, 0x80000000, + 0x00000001, 0xface0000, 0x00d00d1e, 0xdeadbeef }; + + return values[random_range(ARRAY_SIZE(values))]; +} + +static bool +choose(unsigned int n, unsigned int *idxp) +{ + if (*idxp < n) { + return true; + } else { + *idxp -= n; + return false; + } +} + +static bool +init_consecutive_values(int n_consecutive, struct flow *flow, + unsigned int *idxp) +{ + uint32_t *flow_u32 = (uint32_t *) flow; + + if (choose(FLOW_U32S - n_consecutive + 1, idxp)) { + int i; + + for (i = 0; i < n_consecutive; i++) { + flow_u32[*idxp + i] = random_value(); + } + return true; + } else { + return false; + } +} + +static bool +next_random_flow(struct flow *flow, unsigned int idx) +{ + uint32_t *flow_u32 = (uint32_t *) flow; + int i; + + memset(flow, 0, sizeof *flow); + + /* Empty flow. */ + if (choose(1, &idx)) { + return true; + } + + /* All flows with a small number of consecutive nonzero values. */ + for (i = 1; i <= 4; i++) { + if (init_consecutive_values(i, flow, &idx)) { + return true; + } + } + + /* All flows with a large number of consecutive nonzero values. */ + for (i = FLOW_U32S - 4; i <= FLOW_U32S; i++) { + if (init_consecutive_values(i, flow, &idx)) { + return true; + } + } + + /* All flows with exactly two nonconsecutive nonzero values. */ + if (choose((FLOW_U32S - 1) * (FLOW_U32S - 2) / 2, &idx)) { + int ofs1; + + for (ofs1 = 0; ofs1 < FLOW_U32S - 2; ofs1++) { + int ofs2; + + for (ofs2 = ofs1 + 2; ofs2 < FLOW_U32S; ofs2++) { + if (choose(1, &idx)) { + flow_u32[ofs1] = random_value(); + flow_u32[ofs2] = random_value(); + return true; + } + } + } + OVS_NOT_REACHED(); + } + + /* 16 randomly chosen flows with N >= 3 nonzero values. */ + if (choose(16 * (FLOW_U32S - 4), &idx)) { + int n = idx / 16 + 3; + int i; + + for (i = 0; i < n; i++) { + flow_u32[i] = random_value(); + } + shuffle_u32s(flow_u32, FLOW_U32S); + + return true; + } + + return false; +} + +static void +any_random_flow(struct flow *flow) +{ + static unsigned int max; + if (!max) { + while (next_random_flow(flow, max)) { + max++; + } + } + + next_random_flow(flow, random_range(max)); +} + +static void +toggle_masked_flow_bits(struct flow *flow, const struct flow_wildcards *mask) +{ + const uint32_t *mask_u32 = (const uint32_t *) &mask->masks; + uint32_t *flow_u32 = (uint32_t *) flow; + int i; + + for (i = 0; i < FLOW_U32S; i++) { + if (mask_u32[i] != 0) { + uint32_t bit; + + do { + bit = 1u << random_range(32); + } while (!(bit & mask_u32[i])); + flow_u32[i] ^= bit; + } + } +} + static void -run_test(void (*function)(void)) +wildcard_extra_bits(struct flow_wildcards *mask) { - function(); - putchar('.'); - fflush(stdout); + uint32_t *mask_u32 = (uint32_t *) &mask->masks; + int i; + + for (i = 0; i < FLOW_U32S; i++) { + if (mask_u32[i] != 0) { + uint32_t bit; + + do { + bit = 1u << random_range(32); + } while (!(bit & mask_u32[i])); + mask_u32[i] &= ~bit; + } + } } -int -main(void) +static void +test_miniflow(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) { + struct flow flow; + unsigned int idx; + + random_set_seed(0xb3faca38); + for (idx = 0; next_random_flow(&flow, idx); idx++) { + const uint32_t *flow_u32 = (const uint32_t *) &flow; + struct miniflow miniflow, miniflow2, miniflow3; + struct flow flow2, flow3; + struct flow_wildcards mask; + struct minimask minimask; + int i; + + /* Convert flow to miniflow. */ + miniflow_init(&miniflow, &flow); + + /* Check that the flow equals its miniflow. */ + assert(miniflow_get_vid(&miniflow) == vlan_tci_to_vid(flow.vlan_tci)); + for (i = 0; i < FLOW_U32S; i++) { + assert(MINIFLOW_GET_TYPE(&miniflow, uint32_t, i * 4) + == flow_u32[i]); + } + + /* Check that the miniflow equals itself. */ + assert(miniflow_equal(&miniflow, &miniflow)); + + /* Convert miniflow back to flow and verify that it's the same. */ + miniflow_expand(&miniflow, &flow2); + assert(flow_equal(&flow, &flow2)); + + /* Check that copying a miniflow works properly. */ + miniflow_clone(&miniflow2, &miniflow); + assert(miniflow_equal(&miniflow, &miniflow2)); + assert(miniflow_hash(&miniflow, 0) == miniflow_hash(&miniflow2, 0)); + miniflow_expand(&miniflow2, &flow3); + assert(flow_equal(&flow, &flow3)); + + /* Check that masked matches work as expected for identical flows and + * miniflows. */ + do { + next_random_flow(&mask.masks, 1); + } while (flow_wildcards_is_catchall(&mask)); + minimask_init(&minimask, &mask); + assert(minimask_is_catchall(&minimask) + == flow_wildcards_is_catchall(&mask)); + assert(miniflow_equal_in_minimask(&miniflow, &miniflow2, &minimask)); + assert(miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask)); + assert(miniflow_hash_in_minimask(&miniflow, &minimask, 0x12345678) == + flow_hash_in_minimask(&flow, &minimask, 0x12345678)); + + /* Check that masked matches work as expected for differing flows and + * miniflows. */ + toggle_masked_flow_bits(&flow2, &mask); + assert(!miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask)); + miniflow_init(&miniflow3, &flow2); + assert(!miniflow_equal_in_minimask(&miniflow, &miniflow3, &minimask)); + + /* Clean up. */ + miniflow_destroy(&miniflow); + miniflow_destroy(&miniflow2); + miniflow_destroy(&miniflow3); + minimask_destroy(&minimask); + } +} + +static void +test_minimask_has_extra(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + struct flow_wildcards catchall; + struct minimask minicatchall; + struct flow flow; + unsigned int idx; + + flow_wildcards_init_catchall(&catchall); + minimask_init(&minicatchall, &catchall); + assert(minimask_is_catchall(&minicatchall)); + + random_set_seed(0x2ec7905b); + for (idx = 0; next_random_flow(&flow, idx); idx++) { + struct flow_wildcards mask; + struct minimask minimask; + + mask.masks = flow; + minimask_init(&minimask, &mask); + assert(!minimask_has_extra(&minimask, &minimask)); + assert(minimask_has_extra(&minicatchall, &minimask) + == !minimask_is_catchall(&minimask)); + if (!minimask_is_catchall(&minimask)) { + struct minimask minimask2; + + wildcard_extra_bits(&mask); + minimask_init(&minimask2, &mask); + assert(minimask_has_extra(&minimask2, &minimask)); + assert(!minimask_has_extra(&minimask, &minimask2)); + minimask_destroy(&minimask2); + } + + minimask_destroy(&minimask); + } + + minimask_destroy(&minicatchall); +} + +static void +test_minimask_combine(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) +{ + struct flow_wildcards catchall; + struct minimask minicatchall; + struct flow flow; + unsigned int idx; + + flow_wildcards_init_catchall(&catchall); + minimask_init(&minicatchall, &catchall); + assert(minimask_is_catchall(&minicatchall)); + + random_set_seed(0x181bf0cd); + for (idx = 0; next_random_flow(&flow, idx); idx++) { + struct minimask minimask, minimask2, minicombined; + struct flow_wildcards mask, mask2, combined, combined2; + uint32_t storage[FLOW_U32S]; + struct flow flow2; + + mask.masks = flow; + minimask_init(&minimask, &mask); + + minimask_combine(&minicombined, &minimask, &minicatchall, storage); + assert(minimask_is_catchall(&minicombined)); + + any_random_flow(&flow2); + mask2.masks = flow2; + minimask_init(&minimask2, &mask2); + + minimask_combine(&minicombined, &minimask, &minimask2, storage); + flow_wildcards_and(&combined, &mask, &mask2); + minimask_expand(&minicombined, &combined2); + assert(flow_wildcards_equal(&combined, &combined2)); + + minimask_destroy(&minimask); + minimask_destroy(&minimask2); + } + + minimask_destroy(&minicatchall); +} + +static const struct command commands[] = { + /* Classifier tests. */ + {"empty", 0, 0, test_empty}, + {"destroy-null", 0, 0, test_destroy_null}, + {"single-rule", 0, 0, test_single_rule}, + {"rule-replacement", 0, 0, test_rule_replacement}, + {"many-rules-in-one-list", 0, 0, test_many_rules_in_one_list}, + {"many-rules-in-one-table", 0, 0, test_many_rules_in_one_table}, + {"many-rules-in-two-tables", 0, 0, test_many_rules_in_two_tables}, + {"many-rules-in-five-tables", 0, 0, test_many_rules_in_five_tables}, + + /* Miniflow and minimask tests. */ + {"miniflow", 0, 0, test_miniflow}, + {"minimask_has_extra", 0, 0, test_minimask_has_extra}, + {"minimask_combine", 0, 0, test_minimask_combine}, + + {NULL, 0, 0, NULL}, +}; + +static void +test_classifier_main(int argc, char *argv[]) +{ + set_program_name(argv[0]); init_values(); - run_test(test_empty); - run_test(test_destroy_null); - run_test(test_single_rule); - run_test(test_rule_replacement); - run_test(test_two_rules_in_one_bucket); - run_test(test_two_rules_in_one_table); - run_test(test_two_rules_in_different_tables); - run_test(test_many_rules_in_one_bucket); - run_test(test_many_rules_in_one_table); - run_test(test_many_rules_in_different_tables); - putchar('\n'); - return 0; + run_command(argc - 1, argv + 1, commands); } + +OVSTEST_REGISTER("test-classifier", test_classifier_main);