X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fclassifier.c;h=8ab1f9c3d900e7ce65ce76f01f7cc9bfde82af47;hb=3d91d9094dcf49c210bd4ebae4bd1e0cea9defce;hp=18958a8f73fccafafe18e78df4ee4476c664ecf3;hpb=73f3356323a0f94ffb5b58af4e0312e715b7a3e2;p=sliver-openvswitch.git diff --git a/lib/classifier.c b/lib/classifier.c index 18958a8f7..8ab1f9c3d 100644 --- a/lib/classifier.c +++ b/lib/classifier.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks. + * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ #include #include "classifier.h" -#include #include #include #include "byte-order.h" @@ -25,23 +24,35 @@ #include "hash.h" #include "odp-util.h" #include "ofp-util.h" +#include "ovs-thread.h" #include "packets.h" - -static struct cls_table *find_table(const struct classifier *, - const struct flow_wildcards *); -static struct cls_table *insert_table(struct classifier *, - const struct flow_wildcards *); - -static void destroy_table(struct classifier *, struct cls_table *); - -static struct cls_rule *find_match(const struct cls_table *, - const struct flow *); -static struct cls_rule *find_equal(struct cls_table *, const struct flow *, - uint32_t hash); -static struct cls_rule *insert_rule(struct cls_table *, struct cls_rule *); - -static bool flow_equal_except(const struct flow *, const struct flow *, - const struct flow_wildcards *); +#include "vlog.h" + +VLOG_DEFINE_THIS_MODULE(classifier); + +struct trie_ctx; +static struct cls_subtable *find_subtable(const struct classifier *, + const struct minimask *); +static struct cls_subtable *insert_subtable(struct classifier *, + const struct minimask *); + +static void destroy_subtable(struct classifier *, struct cls_subtable *); + +static void update_subtables_after_insertion(struct classifier *, + struct cls_subtable *, + unsigned int new_priority); +static void update_subtables_after_removal(struct classifier *, + struct cls_subtable *, + unsigned int del_priority); + +static struct cls_rule *find_match_wc(const struct cls_subtable *, + const struct flow *, struct trie_ctx *, + unsigned int n_tries, + struct flow_wildcards *); +static struct cls_rule *find_equal(struct cls_subtable *, + const struct miniflow *, uint32_t hash); +static struct cls_rule *insert_rule(struct classifier *, + struct cls_subtable *, struct cls_rule *); /* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */ #define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \ @@ -54,700 +65,464 @@ static bool flow_equal_except(const struct flow *, const struct flow *, static struct cls_rule *next_rule_in_list__(struct cls_rule *); static struct cls_rule *next_rule_in_list(struct cls_rule *); -/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given - * 'wildcards' and 'priority'. */ -void -cls_rule_init(const struct flow *flow, const struct flow_wildcards *wildcards, - unsigned int priority, struct cls_rule *rule) -{ - rule->flow = *flow; - rule->wc = *wildcards; - rule->priority = priority; - cls_rule_zero_wildcarded_fields(rule); -} +static unsigned int minimask_get_prefix_len(const struct minimask *, + const struct mf_field *); +static void trie_init(struct classifier *, int trie_idx, + const struct mf_field *); +static unsigned int trie_lookup(const struct cls_trie *, const struct flow *, + unsigned int *checkbits); + +static void trie_destroy(struct trie_node *); +static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen); +static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen); +static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs, + unsigned int nbits); +static bool mask_prefix_bits_set(const struct flow_wildcards *, + uint8_t be32ofs, unsigned int nbits); + +/* flow/miniflow/minimask/minimatch utilities. + * These are only used by the classifier, so place them here to allow + * for better optimization. */ -/* Converts the flow in 'flow' into an exact-match cls_rule in 'rule', with the - * given 'priority'. (For OpenFlow 1.0, exact-match rule are always highest - * priority, so 'priority' should be at least 65535.) */ -void -cls_rule_init_exact(const struct flow *flow, - unsigned int priority, struct cls_rule *rule) +static inline uint64_t +miniflow_get_map_in_range(const struct miniflow *miniflow, + uint8_t start, uint8_t end, unsigned int *offset) { - rule->flow = *flow; - rule->flow.skb_priority = 0; - flow_wildcards_init_exact(&rule->wc); - rule->priority = priority; -} + uint64_t map = miniflow->map; + *offset = 0; -/* Initializes 'rule' as a "catch-all" rule that matches every packet, with - * priority 'priority'. */ -void -cls_rule_init_catchall(struct cls_rule *rule, unsigned int priority) -{ - memset(&rule->flow, 0, sizeof rule->flow); - flow_wildcards_init_catchall(&rule->wc); - rule->priority = priority; + if (start > 0) { + uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */ + *offset = count_1bits(map & msk); + map &= ~msk; + } + if (end < FLOW_U32S) { + uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */ + map &= msk; + } + return map; } -/* For each bit or field wildcarded in 'rule', sets the corresponding bit or - * field in 'flow' to all-0-bits. It is important to maintain this invariant - * in a clr_rule that might be inserted into a classifier. +/* Returns a hash value for the bits of 'flow' where there are 1-bits in + * 'mask', given 'basis'. * - * It is never necessary to call this function directly for a cls_rule that is - * initialized or modified only by cls_rule_*() functions. It is useful to - * restore the invariant in a cls_rule whose 'wc' member is modified by hand. - */ -void -cls_rule_zero_wildcarded_fields(struct cls_rule *rule) -{ - flow_zero_wildcards(&rule->flow, &rule->wc); -} - -void -cls_rule_set_reg(struct cls_rule *rule, unsigned int reg_idx, uint32_t value) -{ - cls_rule_set_reg_masked(rule, reg_idx, value, UINT32_MAX); -} - -void -cls_rule_set_reg_masked(struct cls_rule *rule, unsigned int reg_idx, - uint32_t value, uint32_t mask) -{ - assert(reg_idx < FLOW_N_REGS); - flow_wildcards_set_reg_mask(&rule->wc, reg_idx, mask); - rule->flow.regs[reg_idx] = value & mask; -} - -void -cls_rule_set_tun_id(struct cls_rule *rule, ovs_be64 tun_id) -{ - cls_rule_set_tun_id_masked(rule, tun_id, htonll(UINT64_MAX)); -} - -void -cls_rule_set_tun_id_masked(struct cls_rule *rule, - ovs_be64 tun_id, ovs_be64 mask) -{ - rule->wc.tun_id_mask = mask; - rule->flow.tun_id = tun_id & mask; -} - -void -cls_rule_set_in_port(struct cls_rule *rule, uint16_t ofp_port) -{ - rule->wc.wildcards &= ~FWW_IN_PORT; - rule->flow.in_port = ofp_port; -} - -void -cls_rule_set_dl_type(struct cls_rule *rule, ovs_be16 dl_type) -{ - rule->wc.wildcards &= ~FWW_DL_TYPE; - rule->flow.dl_type = dl_type; -} - -void -cls_rule_set_dl_src(struct cls_rule *rule, const uint8_t dl_src[ETH_ADDR_LEN]) -{ - rule->wc.wildcards &= ~FWW_DL_SRC; - memcpy(rule->flow.dl_src, dl_src, ETH_ADDR_LEN); -} + * The hash values returned by this function are the same as those returned by + * miniflow_hash_in_minimask(), only the form of the arguments differ. */ +static inline uint32_t +flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask, + uint32_t basis) +{ + const uint32_t *flow_u32 = (const uint32_t *)flow; + const uint32_t *p = mask->masks.values; + uint32_t hash; + uint64_t map; + + hash = basis; + for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) { + hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++); + } -/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' exactly. */ -void -cls_rule_set_dl_dst(struct cls_rule *rule, const uint8_t dl_dst[ETH_ADDR_LEN]) -{ - rule->wc.wildcards &= ~(FWW_DL_DST | FWW_ETH_MCAST); - memcpy(rule->flow.dl_dst, dl_dst, ETH_ADDR_LEN); + return mhash_finish(hash, (p - mask->masks.values) * 4); } -/* Modifies 'rule' so that the Ethernet address must match 'dl_dst' after each - * byte is ANDed with the appropriate byte in 'mask'. +/* Returns a hash value for the bits of 'flow' where there are 1-bits in + * 'mask', given 'basis'. * - * This function will assert-fail if 'mask' is invalid. Only 'mask' values - * accepted by flow_wildcards_is_dl_dst_mask_valid() are allowed. */ -void -cls_rule_set_dl_dst_masked(struct cls_rule *rule, - const uint8_t dl_dst[ETH_ADDR_LEN], - const uint8_t mask[ETH_ADDR_LEN]) -{ - flow_wildcards_t *wc = &rule->wc.wildcards; - size_t i; - - *wc = flow_wildcards_set_dl_dst_mask(*wc, mask); - for (i = 0; i < ETH_ADDR_LEN; i++) { - rule->flow.dl_dst[i] = dl_dst[i] & mask[i]; + * The hash values returned by this function are the same as those returned by + * flow_hash_in_minimask(), only the form of the arguments differ. */ +static inline uint32_t +miniflow_hash_in_minimask(const struct miniflow *flow, + const struct minimask *mask, uint32_t basis) +{ + const uint32_t *p = mask->masks.values; + uint32_t hash = basis; + uint32_t flow_u32; + + MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) { + hash = mhash_add(hash, flow_u32 & *p++); } -} -void -cls_rule_set_dl_tci(struct cls_rule *rule, ovs_be16 tci) -{ - cls_rule_set_dl_tci_masked(rule, tci, htons(0xffff)); + return mhash_finish(hash, (p - mask->masks.values) * 4); } -void -cls_rule_set_dl_tci_masked(struct cls_rule *rule, ovs_be16 tci, ovs_be16 mask) -{ - rule->flow.vlan_tci = tci & mask; - rule->wc.vlan_tci_mask = mask; -} - -/* Modifies 'rule' so that the VLAN VID is wildcarded. If the PCP is already - * wildcarded, then 'rule' will match a packet regardless of whether it has an - * 802.1Q header or not. */ -void -cls_rule_set_any_vid(struct cls_rule *rule) -{ - if (rule->wc.vlan_tci_mask & htons(VLAN_PCP_MASK)) { - rule->wc.vlan_tci_mask &= ~htons(VLAN_VID_MASK); - rule->flow.vlan_tci &= ~htons(VLAN_VID_MASK); - } else { - cls_rule_set_dl_tci_masked(rule, htons(0), htons(0)); - } -} - -/* Modifies 'rule' depending on 'dl_vlan': +/* Returns a hash value for the bits of range [start, end) in 'flow', + * where there are 1-bits in 'mask', given 'hash'. * - * - If 'dl_vlan' is htons(OFP_VLAN_NONE), makes 'rule' match only packets - * without an 802.1Q header. - * - * - Otherwise, makes 'rule' match only packets with an 802.1Q header whose - * VID equals the low 12 bits of 'dl_vlan'. - */ -void -cls_rule_set_dl_vlan(struct cls_rule *rule, ovs_be16 dl_vlan) -{ - flow_set_vlan_vid(&rule->flow, dl_vlan); - if (dl_vlan == htons(OFP_VLAN_NONE)) { - rule->wc.vlan_tci_mask = htons(UINT16_MAX); - } else { - rule->wc.vlan_tci_mask |= htons(VLAN_VID_MASK | VLAN_CFI); + * The hash values returned by this function are the same as those returned by + * minimatch_hash_range(), only the form of the arguments differ. */ +static inline uint32_t +flow_hash_in_minimask_range(const struct flow *flow, + const struct minimask *mask, + uint8_t start, uint8_t end, uint32_t *basis) +{ + const uint32_t *flow_u32 = (const uint32_t *)flow; + unsigned int offset; + uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end, + &offset); + const uint32_t *p = mask->masks.values + offset; + uint32_t hash = *basis; + + for (; map; map = zero_rightmost_1bit(map)) { + hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++); } -} -/* Modifies 'rule' so that the VLAN PCP is wildcarded. If the VID is already - * wildcarded, then 'rule' will match a packet regardless of whether it has an - * 802.1Q header or not. */ -void -cls_rule_set_any_pcp(struct cls_rule *rule) -{ - if (rule->wc.vlan_tci_mask & htons(VLAN_VID_MASK)) { - rule->wc.vlan_tci_mask &= ~htons(VLAN_PCP_MASK); - rule->flow.vlan_tci &= ~htons(VLAN_PCP_MASK); - } else { - cls_rule_set_dl_tci_masked(rule, htons(0), htons(0)); - } + *basis = hash; /* Allow continuation from the unfinished value. */ + return mhash_finish(hash, (p - mask->masks.values) * 4); } -/* Modifies 'rule' so that it matches only packets with an 802.1Q header whose - * PCP equals the low 3 bits of 'dl_vlan_pcp'. */ -void -cls_rule_set_dl_vlan_pcp(struct cls_rule *rule, uint8_t dl_vlan_pcp) +/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */ +static inline void +flow_wildcards_fold_minimask(struct flow_wildcards *wc, + const struct minimask *mask) { - flow_set_vlan_pcp(&rule->flow, dl_vlan_pcp); - rule->wc.vlan_tci_mask |= htons(VLAN_CFI | VLAN_PCP_MASK); + flow_union_with_miniflow(&wc->masks, &mask->masks); } -void -cls_rule_set_tp_src(struct cls_rule *rule, ovs_be16 tp_src) +/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask + * in range [start, end). */ +static inline void +flow_wildcards_fold_minimask_range(struct flow_wildcards *wc, + const struct minimask *mask, + uint8_t start, uint8_t end) { - cls_rule_set_tp_src_masked(rule, tp_src, htons(UINT16_MAX)); -} + uint32_t *dst_u32 = (uint32_t *)&wc->masks; + unsigned int offset; + uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end, + &offset); + const uint32_t *p = mask->masks.values + offset; -void -cls_rule_set_tp_src_masked(struct cls_rule *rule, ovs_be16 port, ovs_be16 mask) -{ - rule->flow.tp_src = port & mask; - rule->wc.tp_src_mask = mask; + for (; map; map = zero_rightmost_1bit(map)) { + dst_u32[raw_ctz(map)] |= *p++; + } } -void -cls_rule_set_tp_dst(struct cls_rule *rule, ovs_be16 tp_dst) +/* Returns a hash value for 'flow', given 'basis'. */ +static inline uint32_t +miniflow_hash(const struct miniflow *flow, uint32_t basis) { - cls_rule_set_tp_dst_masked(rule, tp_dst, htons(UINT16_MAX)); -} + const uint32_t *p = flow->values; + uint32_t hash = basis; + uint64_t hash_map = 0; + uint64_t map; -void -cls_rule_set_tp_dst_masked(struct cls_rule *rule, ovs_be16 port, ovs_be16 mask) -{ - rule->flow.tp_dst = port & mask; - rule->wc.tp_dst_mask = mask; -} + for (map = flow->map; map; map = zero_rightmost_1bit(map)) { + if (*p) { + hash = mhash_add(hash, *p); + hash_map |= rightmost_1bit(map); + } + p++; + } + hash = mhash_add(hash, hash_map); + hash = mhash_add(hash, hash_map >> 32); -void -cls_rule_set_nw_proto(struct cls_rule *rule, uint8_t nw_proto) -{ - rule->wc.wildcards &= ~FWW_NW_PROTO; - rule->flow.nw_proto = nw_proto; + return mhash_finish(hash, p - flow->values); } -void -cls_rule_set_nw_src(struct cls_rule *rule, ovs_be32 nw_src) +/* Returns a hash value for 'mask', given 'basis'. */ +static inline uint32_t +minimask_hash(const struct minimask *mask, uint32_t basis) { - rule->flow.nw_src = nw_src; - rule->wc.nw_src_mask = htonl(UINT32_MAX); + return miniflow_hash(&mask->masks, basis); } -void -cls_rule_set_nw_src_masked(struct cls_rule *rule, - ovs_be32 nw_src, ovs_be32 mask) +/* Returns a hash value for 'match', given 'basis'. */ +static inline uint32_t +minimatch_hash(const struct minimatch *match, uint32_t basis) { - rule->flow.nw_src = nw_src & mask; - rule->wc.nw_src_mask = mask; + return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis)); } -void -cls_rule_set_nw_dst(struct cls_rule *rule, ovs_be32 nw_dst) -{ - rule->flow.nw_dst = nw_dst; - rule->wc.nw_dst_mask = htonl(UINT32_MAX); +/* Returns a hash value for the bits of range [start, end) in 'minimatch', + * given 'basis'. + * + * The hash values returned by this function are the same as those returned by + * flow_hash_in_minimask_range(), only the form of the arguments differ. */ +static inline uint32_t +minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end, + uint32_t *basis) +{ + unsigned int offset; + const uint32_t *p, *q; + uint32_t hash = *basis; + int n, i; + + n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end, + &offset)); + q = match->mask.masks.values + offset; + p = match->flow.values + offset; + + for (i = 0; i < n; i++) { + hash = mhash_add(hash, p[i] & q[i]); + } + *basis = hash; /* Allow continuation from the unfinished value. */ + return mhash_finish(hash, (offset + n) * 4); } -void -cls_rule_set_nw_dst_masked(struct cls_rule *rule, ovs_be32 ip, ovs_be32 mask) -{ - rule->flow.nw_dst = ip & mask; - rule->wc.nw_dst_mask = mask; -} + +/* cls_rule. */ +/* Initializes 'rule' to match packets specified by 'match' at the given + * 'priority'. 'match' must satisfy the invariant described in the comment at + * the definition of struct match. + * + * The caller must eventually destroy 'rule' with cls_rule_destroy(). + * + * (OpenFlow uses priorities between 0 and UINT16_MAX, inclusive, but + * internally Open vSwitch supports a wider range.) */ void -cls_rule_set_nw_dscp(struct cls_rule *rule, uint8_t nw_dscp) +cls_rule_init(struct cls_rule *rule, + const struct match *match, unsigned int priority) { - rule->wc.wildcards &= ~FWW_NW_DSCP; - rule->flow.nw_tos &= ~IP_DSCP_MASK; - rule->flow.nw_tos |= nw_dscp & IP_DSCP_MASK; + minimatch_init(&rule->match, match); + rule->priority = priority; } +/* Same as cls_rule_init() for initialization from a "struct minimatch". */ void -cls_rule_set_nw_ecn(struct cls_rule *rule, uint8_t nw_ecn) +cls_rule_init_from_minimatch(struct cls_rule *rule, + const struct minimatch *match, + unsigned int priority) { - rule->wc.wildcards &= ~FWW_NW_ECN; - rule->flow.nw_tos &= ~IP_ECN_MASK; - rule->flow.nw_tos |= nw_ecn & IP_ECN_MASK; + minimatch_clone(&rule->match, match); + rule->priority = priority; } +/* Initializes 'dst' as a copy of 'src'. + * + * The caller must eventually destroy 'dst' with cls_rule_destroy(). */ void -cls_rule_set_nw_ttl(struct cls_rule *rule, uint8_t nw_ttl) +cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src) { - rule->wc.wildcards &= ~FWW_NW_TTL; - rule->flow.nw_ttl = nw_ttl; + minimatch_clone(&dst->match, &src->match); + dst->priority = src->priority; } +/* Initializes 'dst' with the data in 'src', destroying 'src'. + * + * The caller must eventually destroy 'dst' with cls_rule_destroy(). */ void -cls_rule_set_nw_frag(struct cls_rule *rule, uint8_t nw_frag) +cls_rule_move(struct cls_rule *dst, struct cls_rule *src) { - rule->wc.nw_frag_mask |= FLOW_NW_FRAG_MASK; - rule->flow.nw_frag = nw_frag; + minimatch_move(&dst->match, &src->match); + dst->priority = src->priority; } +/* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's + * normally embedded into a larger structure). + * + * ('rule' must not currently be in a classifier.) */ void -cls_rule_set_nw_frag_masked(struct cls_rule *rule, - uint8_t nw_frag, uint8_t mask) +cls_rule_destroy(struct cls_rule *rule) { - rule->flow.nw_frag = nw_frag & mask; - rule->wc.nw_frag_mask = mask; + minimatch_destroy(&rule->match); } -void -cls_rule_set_icmp_type(struct cls_rule *rule, uint8_t icmp_type) +/* Returns true if 'a' and 'b' match the same packets at the same priority, + * false if they differ in some way. */ +bool +cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b) { - cls_rule_set_tp_src(rule, htons(icmp_type)); + return a->priority == b->priority && minimatch_equal(&a->match, &b->match); } -void -cls_rule_set_icmp_code(struct cls_rule *rule, uint8_t icmp_code) +/* Returns a hash value for 'rule', folding in 'basis'. */ +uint32_t +cls_rule_hash(const struct cls_rule *rule, uint32_t basis) { - cls_rule_set_tp_dst(rule, htons(icmp_code)); + return minimatch_hash(&rule->match, hash_int(rule->priority, basis)); } +/* Appends a string describing 'rule' to 's'. */ void -cls_rule_set_arp_sha(struct cls_rule *rule, const uint8_t sha[ETH_ADDR_LEN]) +cls_rule_format(const struct cls_rule *rule, struct ds *s) { - rule->wc.wildcards &= ~FWW_ARP_SHA; - memcpy(rule->flow.arp_sha, sha, ETH_ADDR_LEN); + minimatch_format(&rule->match, s, rule->priority); } -void -cls_rule_set_arp_tha(struct cls_rule *rule, const uint8_t tha[ETH_ADDR_LEN]) +/* Returns true if 'rule' matches every packet, false otherwise. */ +bool +cls_rule_is_catchall(const struct cls_rule *rule) { - rule->wc.wildcards &= ~FWW_ARP_THA; - memcpy(rule->flow.arp_tha, tha, ETH_ADDR_LEN); + return minimask_is_catchall(&rule->match.mask); } - + +/* Initializes 'cls' as a classifier that initially contains no classification + * rules. */ void -cls_rule_set_ipv6_src(struct cls_rule *rule, const struct in6_addr *src) +classifier_init(struct classifier *cls, const uint8_t *flow_segments) { - rule->flow.ipv6_src = *src; - rule->wc.ipv6_src_mask = in6addr_exact; + cls->n_rules = 0; + hmap_init(&cls->subtables); + list_init(&cls->subtables_priority); + hmap_init(&cls->partitions); + fat_rwlock_init(&cls->rwlock); + cls->n_flow_segments = 0; + if (flow_segments) { + while (cls->n_flow_segments < CLS_MAX_INDICES + && *flow_segments < FLOW_U32S) { + cls->flow_segments[cls->n_flow_segments++] = *flow_segments++; + } + } + cls->n_tries = 0; } +/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the + * caller's responsibility. */ void -cls_rule_set_ipv6_src_masked(struct cls_rule *rule, const struct in6_addr *src, - const struct in6_addr *mask) +classifier_destroy(struct classifier *cls) { - rule->flow.ipv6_src = ipv6_addr_bitand(src, mask); - rule->wc.ipv6_src_mask = *mask; -} + if (cls) { + struct cls_subtable *partition, *next_partition; + struct cls_subtable *subtable, *next_subtable; + int i; -void -cls_rule_set_ipv6_dst(struct cls_rule *rule, const struct in6_addr *dst) -{ - rule->flow.ipv6_dst = *dst; - rule->wc.ipv6_dst_mask = in6addr_exact; -} + for (i = 0; i < cls->n_tries; i++) { + trie_destroy(cls->tries[i].root); + } -void -cls_rule_set_ipv6_dst_masked(struct cls_rule *rule, const struct in6_addr *dst, - const struct in6_addr *mask) -{ - rule->flow.ipv6_dst = ipv6_addr_bitand(dst, mask); - rule->wc.ipv6_dst_mask = *mask; -} + HMAP_FOR_EACH_SAFE (subtable, next_subtable, hmap_node, + &cls->subtables) { + destroy_subtable(cls, subtable); + } + hmap_destroy(&cls->subtables); -void -cls_rule_set_ipv6_label(struct cls_rule *rule, ovs_be32 ipv6_label) -{ - rule->wc.wildcards &= ~FWW_IPV6_LABEL; - rule->flow.ipv6_label = ipv6_label; + HMAP_FOR_EACH_SAFE (partition, next_partition, hmap_node, + &cls->partitions) { + hmap_remove(&cls->partitions, &partition->hmap_node); + free(partition); + } + hmap_destroy(&cls->partitions); + fat_rwlock_destroy(&cls->rwlock); + } } -void -cls_rule_set_nd_target(struct cls_rule *rule, const struct in6_addr *target) -{ - rule->wc.wildcards &= ~FWW_ND_TARGET; - rule->flow.nd_target = *target; -} +/* We use uint64_t as a set for the fields below. */ +BUILD_ASSERT_DECL(MFF_N_IDS <= 64); -/* Returns true if 'a' and 'b' have the same priority, wildcard the same - * fields, and have the same values for fixed fields, otherwise false. */ -bool -cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b) -{ - return (a->priority == b->priority - && flow_wildcards_equal(&a->wc, &b->wc) - && flow_equal(&a->flow, &b->flow)); -} +/* Set the fields for which prefix lookup should be performed. */ +void +classifier_set_prefix_fields(struct classifier *cls, + const enum mf_field_id *trie_fields, + unsigned int n_fields) +{ + uint64_t fields = 0; + int i, trie; + + for (i = 0, trie = 0; i < n_fields && trie < CLS_MAX_TRIES; i++) { + const struct mf_field *field = mf_from_id(trie_fields[i]); + if (field->flow_be32ofs < 0 || field->n_bits % 32) { + /* Incompatible field. This is the only place where we + * enforce these requirements, but the rest of the trie code + * depends on the flow_be32ofs to be non-negative and the + * field length to be a multiple of 32 bits. */ + continue; + } -/* Returns a hash value for the flow, wildcards, and priority in 'rule', - * starting from 'basis'. */ -uint32_t -cls_rule_hash(const struct cls_rule *rule, uint32_t basis) -{ - uint32_t h0 = flow_hash(&rule->flow, basis); - uint32_t h1 = flow_wildcards_hash(&rule->wc, h0); - return hash_int(rule->priority, h1); -} + if (fields & (UINT64_C(1) << trie_fields[i])) { + /* Duplicate field, there is no need to build more than + * one index for any one field. */ + continue; + } + fields |= UINT64_C(1) << trie_fields[i]; -static void -format_ip_netmask(struct ds *s, const char *name, ovs_be32 ip, - ovs_be32 netmask) -{ - if (netmask) { - ds_put_format(s, "%s=", name); - ip_format_masked(ip, netmask, s); - ds_put_char(s, ','); + if (trie >= cls->n_tries || field != cls->tries[trie].field) { + trie_init(cls, trie, field); + } + trie++; } -} -static void -format_ipv6_netmask(struct ds *s, const char *name, - const struct in6_addr *addr, - const struct in6_addr *netmask) -{ - if (!ipv6_mask_is_any(netmask)) { - ds_put_format(s, "%s=", name); - print_ipv6_masked(s, addr, netmask); - ds_put_char(s, ','); + /* Destroy the rest. */ + for (i = trie; i < cls->n_tries; i++) { + trie_init(cls, i, NULL); } + cls->n_tries = trie; } - static void -format_be16_masked(struct ds *s, const char *name, - ovs_be16 value, ovs_be16 mask) +trie_init(struct classifier *cls, int trie_idx, + const struct mf_field *field) { - if (mask != htons(0)) { - ds_put_format(s, "%s=", name); - if (mask == htons(UINT16_MAX)) { - ds_put_format(s, "%"PRIu16, ntohs(value)); - } else { - ds_put_format(s, "0x%"PRIx16"/0x%"PRIx16, - ntohs(value), ntohs(mask)); - } - ds_put_char(s, ','); + struct cls_trie *trie = &cls->tries[trie_idx]; + struct cls_subtable *subtable; + + if (trie_idx < cls->n_tries) { + trie_destroy(trie->root); } -} + trie->root = NULL; + trie->field = field; -void -cls_rule_format(const struct cls_rule *rule, struct ds *s) -{ - const struct flow_wildcards *wc = &rule->wc; - size_t start_len = s->length; - flow_wildcards_t w = wc->wildcards; - const struct flow *f = &rule->flow; - bool skip_type = false; - bool skip_proto = false; + /* Add existing rules to the trie. */ + LIST_FOR_EACH (subtable, list_node, &cls->subtables_priority) { + unsigned int plen; - int i; + plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0; + /* Initialize subtable's prefix length on this field. */ + subtable->trie_plen[trie_idx] = plen; - BUILD_ASSERT_DECL(FLOW_WC_SEQ == 8); + if (plen) { + struct cls_rule *head; - if (rule->priority != OFP_DEFAULT_PRIORITY) { - ds_put_format(s, "priority=%d,", rule->priority); - } + HMAP_FOR_EACH (head, hmap_node, &subtable->rules) { + struct cls_rule *rule; - if (!(w & FWW_DL_TYPE)) { - skip_type = true; - if (f->dl_type == htons(ETH_TYPE_IP)) { - if (!(w & FWW_NW_PROTO)) { - skip_proto = true; - if (f->nw_proto == IPPROTO_ICMP) { - ds_put_cstr(s, "icmp,"); - } else if (f->nw_proto == IPPROTO_TCP) { - ds_put_cstr(s, "tcp,"); - } else if (f->nw_proto == IPPROTO_UDP) { - ds_put_cstr(s, "udp,"); - } else { - ds_put_cstr(s, "ip,"); - skip_proto = false; + FOR_EACH_RULE_IN_LIST (rule, head) { + trie_insert(trie, rule, plen); } - } else { - ds_put_cstr(s, "ip,"); - } - } else if (f->dl_type == htons(ETH_TYPE_IPV6)) { - if (!(w & FWW_NW_PROTO)) { - skip_proto = true; - if (f->nw_proto == IPPROTO_ICMPV6) { - ds_put_cstr(s, "icmp6,"); - } else if (f->nw_proto == IPPROTO_TCP) { - ds_put_cstr(s, "tcp6,"); - } else if (f->nw_proto == IPPROTO_UDP) { - ds_put_cstr(s, "udp6,"); - } else { - ds_put_cstr(s, "ipv6,"); - skip_proto = false; - } - } else { - ds_put_cstr(s, "ipv6,"); - } - } else if (f->dl_type == htons(ETH_TYPE_ARP)) { - ds_put_cstr(s, "arp,"); - } else { - skip_type = false; - } - } - for (i = 0; i < FLOW_N_REGS; i++) { - switch (wc->reg_masks[i]) { - case 0: - break; - case UINT32_MAX: - ds_put_format(s, "reg%d=0x%"PRIx32",", i, f->regs[i]); - break; - default: - ds_put_format(s, "reg%d=0x%"PRIx32"/0x%"PRIx32",", - i, f->regs[i], wc->reg_masks[i]); - break; - } - } - switch (wc->tun_id_mask) { - case 0: - break; - case CONSTANT_HTONLL(UINT64_MAX): - ds_put_format(s, "tun_id=%#"PRIx64",", ntohll(f->tun_id)); - break; - default: - ds_put_format(s, "tun_id=%#"PRIx64"/%#"PRIx64",", - ntohll(f->tun_id), ntohll(wc->tun_id_mask)); - break; - } - if (!(w & FWW_IN_PORT)) { - ds_put_format(s, "in_port=%"PRIu16",", f->in_port); - } - if (wc->vlan_tci_mask) { - ovs_be16 vid_mask = wc->vlan_tci_mask & htons(VLAN_VID_MASK); - ovs_be16 pcp_mask = wc->vlan_tci_mask & htons(VLAN_PCP_MASK); - ovs_be16 cfi = wc->vlan_tci_mask & htons(VLAN_CFI); - - if (cfi && f->vlan_tci & htons(VLAN_CFI) - && (!vid_mask || vid_mask == htons(VLAN_VID_MASK)) - && (!pcp_mask || pcp_mask == htons(VLAN_PCP_MASK)) - && (vid_mask || pcp_mask)) { - if (vid_mask) { - ds_put_format(s, "dl_vlan=%"PRIu16",", - vlan_tci_to_vid(f->vlan_tci)); } - if (pcp_mask) { - ds_put_format(s, "dl_vlan_pcp=%d,", - vlan_tci_to_pcp(f->vlan_tci)); - } - } else if (wc->vlan_tci_mask == htons(0xffff)) { - ds_put_format(s, "vlan_tci=0x%04"PRIx16",", ntohs(f->vlan_tci)); - } else { - ds_put_format(s, "vlan_tci=0x%04"PRIx16"/0x%04"PRIx16",", - ntohs(f->vlan_tci), ntohs(wc->vlan_tci_mask)); } } - if (!(w & FWW_DL_SRC)) { - ds_put_format(s, "dl_src="ETH_ADDR_FMT",", ETH_ADDR_ARGS(f->dl_src)); - } - switch (w & (FWW_DL_DST | FWW_ETH_MCAST)) { - case 0: - ds_put_format(s, "dl_dst="ETH_ADDR_FMT",", ETH_ADDR_ARGS(f->dl_dst)); - break; - case FWW_DL_DST: - ds_put_format(s, "dl_dst="ETH_ADDR_FMT"/01:00:00:00:00:00,", - ETH_ADDR_ARGS(f->dl_dst)); - break; - case FWW_ETH_MCAST: - ds_put_format(s, "dl_dst="ETH_ADDR_FMT"/fe:ff:ff:ff:ff:ff,", - ETH_ADDR_ARGS(f->dl_dst)); - break; - case FWW_DL_DST | FWW_ETH_MCAST: - break; - } - if (!skip_type && !(w & FWW_DL_TYPE)) { - ds_put_format(s, "dl_type=0x%04"PRIx16",", ntohs(f->dl_type)); - } - if (f->dl_type == htons(ETH_TYPE_IPV6)) { - format_ipv6_netmask(s, "ipv6_src", &f->ipv6_src, &wc->ipv6_src_mask); - format_ipv6_netmask(s, "ipv6_dst", &f->ipv6_dst, &wc->ipv6_dst_mask); - if (!(w & FWW_IPV6_LABEL)) { - ds_put_format(s, "ipv6_label=0x%05"PRIx32",", ntohl(f->ipv6_label)); - } - } else { - format_ip_netmask(s, "nw_src", f->nw_src, wc->nw_src_mask); - format_ip_netmask(s, "nw_dst", f->nw_dst, wc->nw_dst_mask); - } - if (!skip_proto && !(w & FWW_NW_PROTO)) { - if (f->dl_type == htons(ETH_TYPE_ARP)) { - ds_put_format(s, "arp_op=%"PRIu8",", f->nw_proto); - } else { - ds_put_format(s, "nw_proto=%"PRIu8",", f->nw_proto); - } - } - if (f->dl_type == htons(ETH_TYPE_ARP)) { - if (!(w & FWW_ARP_SHA)) { - ds_put_format(s, "arp_sha="ETH_ADDR_FMT",", - ETH_ADDR_ARGS(f->arp_sha)); - } - if (!(w & FWW_ARP_THA)) { - ds_put_format(s, "arp_tha="ETH_ADDR_FMT",", - ETH_ADDR_ARGS(f->arp_tha)); - } - } - if (!(w & FWW_NW_DSCP)) { - ds_put_format(s, "nw_tos=%"PRIu8",", f->nw_tos & IP_DSCP_MASK); - } - if (!(w & FWW_NW_ECN)) { - ds_put_format(s, "nw_ecn=%"PRIu8",", f->nw_tos & IP_ECN_MASK); - } - if (!(w & FWW_NW_TTL)) { - ds_put_format(s, "nw_ttl=%"PRIu8",", f->nw_ttl); - } - switch (wc->nw_frag_mask) { - case FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER: - ds_put_format(s, "nw_frag=%s,", - f->nw_frag & FLOW_NW_FRAG_ANY - ? (f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "first") - : (f->nw_frag & FLOW_NW_FRAG_LATER ? "" : "no")); - break; - - case FLOW_NW_FRAG_ANY: - ds_put_format(s, "nw_frag=%s,", - f->nw_frag & FLOW_NW_FRAG_ANY ? "yes" : "no"); - break; - - case FLOW_NW_FRAG_LATER: - ds_put_format(s, "nw_frag=%s,", - f->nw_frag & FLOW_NW_FRAG_LATER ? "later" : "not_later"); - break; - } - if (f->nw_proto == IPPROTO_ICMP) { - format_be16_masked(s, "icmp_type", f->tp_src, wc->tp_src_mask); - format_be16_masked(s, "icmp_code", f->tp_dst, wc->tp_dst_mask); - } else if (f->nw_proto == IPPROTO_ICMPV6) { - format_be16_masked(s, "icmp_type", f->tp_src, wc->tp_src_mask); - format_be16_masked(s, "icmp_code", f->tp_dst, wc->tp_dst_mask); - if (!(w & FWW_ND_TARGET)) { - ds_put_cstr(s, "nd_target="); - print_ipv6_addr(s, &f->nd_target); - ds_put_char(s, ','); - } - if (!(w & FWW_ARP_SHA)) { - ds_put_format(s, "nd_sll="ETH_ADDR_FMT",", - ETH_ADDR_ARGS(f->arp_sha)); - } - if (!(w & FWW_ARP_THA)) { - ds_put_format(s, "nd_tll="ETH_ADDR_FMT",", - ETH_ADDR_ARGS(f->arp_tha)); - } - } else { - format_be16_masked(s, "tp_src", f->tp_src, wc->tp_src_mask); - format_be16_masked(s, "tp_dst", f->tp_dst, wc->tp_dst_mask); - } - - if (s->length > start_len && ds_last(s) == ',') { - s->length--; - } } -/* Converts 'rule' to a string and returns the string. The caller must free - * the string (with free()). */ -char * -cls_rule_to_string(const struct cls_rule *rule) +/* Returns true if 'cls' contains no classification rules, false otherwise. */ +bool +classifier_is_empty(const struct classifier *cls) { - struct ds s = DS_EMPTY_INITIALIZER; - cls_rule_format(rule, &s); - return ds_steal_cstr(&s); + return cls->n_rules == 0; } -void -cls_rule_print(const struct cls_rule *rule) +/* Returns the number of rules in 'cls'. */ +int +classifier_count(const struct classifier *cls) { - char *s = cls_rule_to_string(rule); - puts(s); - free(s); + return cls->n_rules; } - -/* Initializes 'cls' as a classifier that initially contains no classification - * rules. */ -void -classifier_init(struct classifier *cls) + +static uint32_t +hash_metadata(ovs_be64 metadata_) { - cls->n_rules = 0; - hmap_init(&cls->tables); + uint64_t metadata = (OVS_FORCE uint64_t) metadata_; + return hash_uint64(metadata); } -/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the - * caller's responsibility. */ -void -classifier_destroy(struct classifier *cls) +static struct cls_partition * +find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash) { - if (cls) { - struct cls_table *table, *next_table; + struct cls_partition *partition; - HMAP_FOR_EACH_SAFE (table, next_table, hmap_node, &cls->tables) { - hmap_destroy(&table->rules); - hmap_remove(&cls->tables, &table->hmap_node); - free(table); + HMAP_FOR_EACH_IN_BUCKET (partition, hmap_node, hash, &cls->partitions) { + if (partition->metadata == metadata) { + return partition; } - hmap_destroy(&cls->tables); } -} -/* Returns true if 'cls' contains no classification rules, false otherwise. */ -bool -classifier_is_empty(const struct classifier *cls) -{ - return cls->n_rules == 0; + return NULL; } -/* Returns the number of rules in 'classifier'. */ -int -classifier_count(const struct classifier *cls) +static struct cls_partition * +create_partition(struct classifier *cls, struct cls_subtable *subtable, + ovs_be64 metadata) { - return cls->n_rules; + uint32_t hash = hash_metadata(metadata); + struct cls_partition *partition = find_partition(cls, metadata, hash); + if (!partition) { + partition = xmalloc(sizeof *partition); + partition->metadata = metadata; + partition->tags = 0; + tag_tracker_init(&partition->tracker); + hmap_insert(&cls->partitions, &partition->hmap_node, hash); + } + tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag); + return partition; } /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller @@ -756,7 +531,8 @@ classifier_count(const struct classifier *cls) * If 'cls' already contains an identical rule (including wildcards, values of * fixed fields, and priority), replaces the old rule by 'rule' and returns the * rule that was replaced. The caller takes ownership of the returned rule and - * is thus responsible for freeing it, etc., as necessary. + * is thus responsible for destroying it with cls_rule_destroy(), freeing the + * memory block in which it resides, etc., as necessary. * * Returns NULL if 'cls' does not contain a rule with an identical key, after * inserting the new rule. In this case, no rules are displaced by the new @@ -766,17 +542,34 @@ struct cls_rule * classifier_replace(struct classifier *cls, struct cls_rule *rule) { struct cls_rule *old_rule; - struct cls_table *table; + struct cls_subtable *subtable; - table = find_table(cls, &rule->wc); - if (!table) { - table = insert_table(cls, &rule->wc); + subtable = find_subtable(cls, &rule->match.mask); + if (!subtable) { + subtable = insert_subtable(cls, &rule->match.mask); } - old_rule = insert_rule(table, rule); + old_rule = insert_rule(cls, subtable, rule); if (!old_rule) { - table->n_table_rules++; + int i; + + if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) { + ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow); + rule->partition = create_partition(cls, subtable, metadata); + } else { + rule->partition = NULL; + } + + subtable->n_rules++; cls->n_rules++; + + for (i = 0; i < cls->n_tries; i++) { + if (subtable->trie_plen[i]) { + trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]); + } + } + } else { + rule->partition = old_rule->partition; } return old_rule; } @@ -791,55 +584,227 @@ void classifier_insert(struct classifier *cls, struct cls_rule *rule) { struct cls_rule *displaced_rule = classifier_replace(cls, rule); - assert(!displaced_rule); + ovs_assert(!displaced_rule); +} + +/* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy + * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule' + * resides, etc., as necessary. */ +void +classifier_remove(struct classifier *cls, struct cls_rule *rule) +{ + struct cls_partition *partition; + struct cls_rule *head; + struct cls_subtable *subtable; + int i; + + subtable = find_subtable(cls, &rule->match.mask); + + for (i = 0; i < cls->n_tries; i++) { + if (subtable->trie_plen[i]) { + trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]); + } + } + + /* Remove rule node from indices. */ + for (i = 0; i < subtable->n_indices; i++) { + hindex_remove(&subtable->indices[i], &rule->index_nodes[i]); + } + + head = find_equal(subtable, &rule->match.flow, rule->hmap_node.hash); + if (head != rule) { + list_remove(&rule->list); + } else if (list_is_empty(&rule->list)) { + hmap_remove(&subtable->rules, &rule->hmap_node); + } else { + struct cls_rule *next = CONTAINER_OF(rule->list.next, + struct cls_rule, list); + + list_remove(&rule->list); + hmap_replace(&subtable->rules, &rule->hmap_node, &next->hmap_node); + } + + partition = rule->partition; + if (partition) { + tag_tracker_subtract(&partition->tracker, &partition->tags, + subtable->tag); + if (!partition->tags) { + hmap_remove(&cls->partitions, &partition->hmap_node); + free(partition); + } + } + + if (--subtable->n_rules == 0) { + destroy_subtable(cls, subtable); + } else { + update_subtables_after_removal(cls, subtable, rule->priority); + } + + cls->n_rules--; +} + +/* Prefix tree context. Valid when 'lookup_done' is true. Can skip all + * subtables which have more than 'match_plen' bits in their corresponding + * field at offset 'be32ofs'. If skipped, 'maskbits' prefix bits should be + * unwildcarded to quarantee datapath flow matches only packets it should. */ +struct trie_ctx { + const struct cls_trie *trie; + bool lookup_done; /* Status of the lookup. */ + uint8_t be32ofs; /* U32 offset of the field in question. */ + unsigned int match_plen; /* Longest prefix than could possibly match. */ + unsigned int maskbits; /* Prefix length needed to avoid false matches. */ +}; + +static void +trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie) +{ + ctx->trie = trie; + ctx->be32ofs = trie->field->flow_be32ofs; + ctx->lookup_done = false; +} + +/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'. + * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules + * of equal priority match 'flow', returns one arbitrarily. + * + * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the + * set of bits that were significant in the lookup. At some point + * earlier, 'wc' should have been initialized (e.g., by + * flow_wildcards_init_catchall()). */ +struct cls_rule * +classifier_lookup(const struct classifier *cls, const struct flow *flow, + struct flow_wildcards *wc) +{ + const struct cls_partition *partition; + struct cls_subtable *subtable; + struct cls_rule *best; + tag_type tags; + struct trie_ctx trie_ctx[CLS_MAX_TRIES]; + int i; + + /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them, + * then 'flow' cannot possibly match in 'subtable': + * + * - If flow->metadata maps to a given 'partition', then we can use + * 'tags' for 'partition->tags'. + * + * - If flow->metadata has no partition, then no rule in 'cls' has an + * exact-match for flow->metadata. That means that we don't need to + * search any subtable that includes flow->metadata in its mask. + * + * In either case, we always need to search any cls_subtables that do not + * include flow->metadata in its mask. One way to do that would be to + * check the "cls_subtable"s explicitly for that, but that would require an + * extra branch per subtable. Instead, we mark such a cls_subtable's + * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means + * that 'tags' always intersects such a cls_subtable's 'tags', so we don't + * need a special case. + */ + partition = (hmap_is_empty(&cls->partitions) + ? NULL + : find_partition(cls, flow->metadata, + hash_metadata(flow->metadata))); + tags = partition ? partition->tags : TAG_ARBITRARY; + + /* Initialize trie contexts for match_find_wc(). */ + for (i = 0; i < cls->n_tries; i++) { + trie_ctx_init(&trie_ctx[i], &cls->tries[i]); + } + best = NULL; + LIST_FOR_EACH (subtable, list_node, &cls->subtables_priority) { + struct cls_rule *rule; + + if (!tag_intersects(tags, subtable->tag)) { + continue; + } + + rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, wc); + if (rule) { + best = rule; + LIST_FOR_EACH_CONTINUE (subtable, list_node, + &cls->subtables_priority) { + if (subtable->max_priority <= best->priority) { + /* Subtables are in descending priority order, + * can not find anything better. */ + return best; + } + if (!tag_intersects(tags, subtable->tag)) { + continue; + } + + rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, + wc); + if (rule && rule->priority > best->priority) { + best = rule; + } + } + break; + } + } + + return best; +} + +/* Returns true if 'target' satisifies 'match', that is, if each bit for which + * 'match' specifies a particular value has the correct value in 'target'. */ +static bool +minimatch_matches_miniflow(const struct minimatch *match, + const struct miniflow *target) +{ + const uint32_t *flowp = (const uint32_t *)match->flow.values; + const uint32_t *maskp = (const uint32_t *)match->mask.masks.values; + uint32_t target_u32; + + MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, match->mask.masks.map) { + if ((*flowp++ ^ target_u32) & *maskp++) { + return false; + } + } + + return true; } -/* Removes 'rule' from 'cls'. It is the caller's responsibility to free - * 'rule', if this is desirable. */ -void -classifier_remove(struct classifier *cls, struct cls_rule *rule) +static inline struct cls_rule * +find_match_miniflow(const struct cls_subtable *subtable, + const struct miniflow *flow, + uint32_t hash) { - struct cls_rule *head; - struct cls_table *table; - - table = find_table(cls, &rule->wc); - head = find_equal(table, &rule->flow, rule->hmap_node.hash); - if (head != rule) { - list_remove(&rule->list); - } else if (list_is_empty(&rule->list)) { - hmap_remove(&table->rules, &rule->hmap_node); - } else { - struct cls_rule *next = CONTAINER_OF(rule->list.next, - struct cls_rule, list); - - list_remove(&rule->list); - hmap_replace(&table->rules, &rule->hmap_node, &next->hmap_node); - } + struct cls_rule *rule; - if (--table->n_table_rules == 0) { - destroy_table(cls, table); + HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) { + if (minimatch_matches_miniflow(&rule->match, flow)) { + return rule; + } } - cls->n_rules--; + return NULL; } -/* Finds and returns the highest-priority rule in 'cls' that matches 'flow'. - * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules - * of equal priority match 'flow', returns one arbitrarily. */ -struct cls_rule * -classifier_lookup(const struct classifier *cls, const struct flow *flow) +/* Finds and returns the highest-priority rule in 'cls' that matches + * 'miniflow'. Returns a null pointer if no rules in 'cls' match 'flow'. + * If multiple rules of equal priority match 'flow', returns one arbitrarily. + * + * This function is optimized for the userspace datapath, which only ever has + * one priority value for it's flows! + */ +struct cls_rule *classifier_lookup_miniflow_first(const struct classifier *cls, + const struct miniflow *flow) { - struct cls_table *table; - struct cls_rule *best; + struct cls_subtable *subtable; - best = NULL; - HMAP_FOR_EACH (table, hmap_node, &cls->tables) { - struct cls_rule *rule = find_match(table, flow); - if (rule && (!best || rule->priority > best->priority)) { - best = rule; + LIST_FOR_EACH (subtable, list_node, &cls->subtables_priority) { + struct cls_rule *rule; + + rule = find_match_miniflow(subtable, flow, + miniflow_hash_in_minimask(flow, + &subtable->mask, + 0)); + if (rule) { + return rule; } } - return best; + + return NULL; } /* Finds and returns a rule in 'cls' with exactly the same priority and @@ -850,14 +815,21 @@ classifier_find_rule_exactly(const struct classifier *cls, const struct cls_rule *target) { struct cls_rule *head, *rule; - struct cls_table *table; + struct cls_subtable *subtable; + + subtable = find_subtable(cls, &target->match.mask); + if (!subtable) { + return NULL; + } - table = find_table(cls, &target->wc); - if (!table) { + /* Skip if there is no hope. */ + if (target->priority > subtable->max_priority) { return NULL; } - head = find_equal(table, &target->flow, flow_hash(&target->flow, 0)); + head = find_equal(subtable, &target->match.flow, + miniflow_hash_in_minimask(&target->match.flow, + &target->match.mask, 0)); FOR_EACH_RULE_IN_LIST (rule, head) { if (target->priority >= rule->priority) { return target->priority == rule->priority ? rule : NULL; @@ -866,6 +838,24 @@ classifier_find_rule_exactly(const struct classifier *cls, return NULL; } +/* Finds and returns a rule in 'cls' with priority 'priority' and exactly the + * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't + * contain an exact match. */ +struct cls_rule * +classifier_find_match_exactly(const struct classifier *cls, + const struct match *target, + unsigned int priority) +{ + struct cls_rule *retval; + struct cls_rule cr; + + cls_rule_init(&cr, target, priority); + retval = classifier_find_rule_exactly(cls, &cr); + cls_rule_destroy(&cr); + + return retval; +} + /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are * considered to overlap if both rules have the same priority and a packet * could match both. */ @@ -873,19 +863,29 @@ bool classifier_rule_overlaps(const struct classifier *cls, const struct cls_rule *target) { - struct cls_table *table; + struct cls_subtable *subtable; - HMAP_FOR_EACH (table, hmap_node, &cls->tables) { - struct flow_wildcards wc; + /* Iterate subtables in the descending max priority order. */ + LIST_FOR_EACH (subtable, list_node, &cls->subtables_priority) { + uint32_t storage[FLOW_U32S]; + struct minimask mask; struct cls_rule *head; - flow_wildcards_combine(&wc, &target->wc, &table->wc); - HMAP_FOR_EACH (head, hmap_node, &table->rules) { + if (target->priority > subtable->max_priority) { + break; /* Can skip this and the rest of the subtables. */ + } + + minimask_combine(&mask, &target->match.mask, &subtable->mask, storage); + HMAP_FOR_EACH (head, hmap_node, &subtable->rules) { struct cls_rule *rule; FOR_EACH_RULE_IN_LIST (rule, head) { + if (rule->priority < target->priority) { + break; /* Rules in descending priority order. */ + } if (rule->priority == target->priority - && flow_equal_except(&target->flow, &rule->flow, &wc)) { + && miniflow_equal_in_minimask(&target->match.flow, + &rule->match.flow, &mask)) { return true; } } @@ -894,6 +894,48 @@ classifier_rule_overlaps(const struct classifier *cls, return false; } + +/* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more + * specific than 'criteria'. That is, 'rule' matches 'criteria' and this + * function returns true if, for every field: + * + * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the + * field, or + * + * - 'criteria' wildcards the field, + * + * Conversely, 'rule' does not match 'criteria' and this function returns false + * if, for at least one field: + * + * - 'criteria' and 'rule' specify different values for the field, or + * + * - 'criteria' specifies a value for the field but 'rule' wildcards it. + * + * Equivalently, the truth table for whether a field matches is: + * + * rule + * + * c wildcard exact + * r +---------+---------+ + * i wild | yes | yes | + * t card | | | + * e +---------+---------+ + * r exact | no |if values| + * i | |are equal| + * a +---------+---------+ + * + * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD + * commands and by OpenFlow 1.0 aggregate and flow stats. + * + * Ignores rule->priority. */ +bool +cls_rule_is_loose_match(const struct cls_rule *rule, + const struct minimatch *criteria) +{ + return (!minimask_has_extra(&rule->match.mask, &criteria->mask) + && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow, + &criteria->mask)); +} /* Iteration. */ @@ -901,16 +943,19 @@ static bool rule_matches(const struct cls_rule *rule, const struct cls_rule *target) { return (!target - || flow_equal_except(&rule->flow, &target->flow, &target->wc)); + || miniflow_equal_in_minimask(&rule->match.flow, + &target->match.flow, + &target->match.mask)); } static struct cls_rule * -search_table(const struct cls_table *table, const struct cls_rule *target) +search_subtable(const struct cls_subtable *subtable, + const struct cls_rule *target) { - if (!target || !flow_wildcards_has_extra(&table->wc, &target->wc)) { + if (!target || !minimask_has_extra(&subtable->mask, &target->match.mask)) { struct cls_rule *rule; - HMAP_FOR_EACH (rule, hmap_node, &table->rules) { + HMAP_FOR_EACH (rule, hmap_node, &subtable->rules) { if (rule_matches(rule, target)) { return rule; } @@ -919,46 +964,20 @@ search_table(const struct cls_table *table, const struct cls_rule *target) return NULL; } -/* Initializes 'cursor' for iterating through 'cls' rules that exactly match - * 'target' or are more specific than 'target'. That is, a given 'rule' - * matches 'target' if, for every field: - * - * - 'target' and 'rule' specify the same (non-wildcarded) value for the - * field, or - * - * - 'target' wildcards the field, - * - * but not if: - * - * - 'target' and 'rule' specify different values for the field, or +/* Initializes 'cursor' for iterating through rules in 'cls': * - * - 'target' specifies a value for the field but 'rule' wildcards it. - * - * Equivalently, the truth table for whether a field matches is: - * - * rule - * - * wildcard exact - * +---------+---------+ - * t wild | yes | yes | - * a card | | | - * r +---------+---------+ - * g exact | no |if values| - * e | |are equal| - * t +---------+---------+ - * - * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD - * commands and by OpenFlow 1.0 aggregate and flow stats. + * - If 'target' is null, the cursor will visit every rule in 'cls'. * - * Ignores target->priority. + * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls' + * such that cls_rule_is_loose_match(rule, target) returns true. * - * 'target' may be NULL to iterate over every rule in 'cls'. */ + * Ignores target->priority. */ void cls_cursor_init(struct cls_cursor *cursor, const struct classifier *cls, const struct cls_rule *target) { cursor->cls = cls; - cursor->target = target; + cursor->target = target && !cls_rule_is_catchall(target) ? target : NULL; } /* Returns the first matching cls_rule in 'cursor''s iteration, or a null @@ -966,12 +985,12 @@ cls_cursor_init(struct cls_cursor *cursor, const struct classifier *cls, struct cls_rule * cls_cursor_first(struct cls_cursor *cursor) { - struct cls_table *table; + struct cls_subtable *subtable; - HMAP_FOR_EACH (table, hmap_node, &cursor->cls->tables) { - struct cls_rule *rule = search_table(table, cursor->target); + HMAP_FOR_EACH (subtable, hmap_node, &cursor->cls->subtables) { + struct cls_rule *rule = search_subtable(subtable, cursor->target); if (rule) { - cursor->table = table; + cursor->subtable = subtable; return rule; } } @@ -982,9 +1001,10 @@ cls_cursor_first(struct cls_cursor *cursor) /* Returns the next matching cls_rule in 'cursor''s iteration, or a null * pointer if there are no more matches. */ struct cls_rule * -cls_cursor_next(struct cls_cursor *cursor, struct cls_rule *rule) +cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_) { - const struct cls_table *table; + struct cls_rule *rule = CONST_CAST(struct cls_rule *, rule_); + const struct cls_subtable *subtable; struct cls_rule *next; next = next_rule_in_list__(rule); @@ -993,20 +1013,20 @@ cls_cursor_next(struct cls_cursor *cursor, struct cls_rule *rule) } /* 'next' is the head of the list, that is, the rule that is included in - * the table's hmap. (This is important when the classifier contains rules - * that differ only in priority.) */ + * the subtable's hmap. (This is important when the classifier contains + * rules that differ only in priority.) */ rule = next; - HMAP_FOR_EACH_CONTINUE (rule, hmap_node, &cursor->table->rules) { + HMAP_FOR_EACH_CONTINUE (rule, hmap_node, &cursor->subtable->rules) { if (rule_matches(rule, cursor->target)) { return rule; } } - table = cursor->table; - HMAP_FOR_EACH_CONTINUE (table, hmap_node, &cursor->cls->tables) { - rule = search_table(table, cursor->target); + subtable = cursor->subtable; + HMAP_FOR_EACH_CONTINUE (subtable, hmap_node, &cursor->cls->subtables) { + rule = search_subtable(subtable, cursor->target); if (rule) { - cursor->table = table; + cursor->subtable = subtable; return rule; } } @@ -1014,65 +1034,355 @@ cls_cursor_next(struct cls_cursor *cursor, struct cls_rule *rule) return NULL; } -static struct cls_table * -find_table(const struct classifier *cls, const struct flow_wildcards *wc) +static struct cls_subtable * +find_subtable(const struct classifier *cls, const struct minimask *mask) { - struct cls_table *table; + struct cls_subtable *subtable; - HMAP_FOR_EACH_IN_BUCKET (table, hmap_node, flow_wildcards_hash(wc, 0), - &cls->tables) { - if (flow_wildcards_equal(wc, &table->wc)) { - return table; + HMAP_FOR_EACH_IN_BUCKET (subtable, hmap_node, minimask_hash(mask, 0), + &cls->subtables) { + if (minimask_equal(mask, &subtable->mask)) { + return subtable; } } return NULL; } -static struct cls_table * -insert_table(struct classifier *cls, const struct flow_wildcards *wc) +static struct cls_subtable * +insert_subtable(struct classifier *cls, const struct minimask *mask) +{ + uint32_t hash = minimask_hash(mask, 0); + struct cls_subtable *subtable; + int i, index = 0; + struct flow_wildcards old, new; + uint8_t prev; + + subtable = xzalloc(sizeof *subtable); + hmap_init(&subtable->rules); + minimask_clone(&subtable->mask, mask); + + /* Init indices for segmented lookup, if any. */ + flow_wildcards_init_catchall(&new); + old = new; + prev = 0; + for (i = 0; i < cls->n_flow_segments; i++) { + flow_wildcards_fold_minimask_range(&new, mask, prev, + cls->flow_segments[i]); + /* Add an index if it adds mask bits. */ + if (!flow_wildcards_equal(&new, &old)) { + hindex_init(&subtable->indices[index]); + subtable->index_ofs[index] = cls->flow_segments[i]; + index++; + old = new; + } + prev = cls->flow_segments[i]; + } + /* Check if the rest of the subtable's mask adds any bits, + * and remove the last index if it doesn't. */ + if (index > 0) { + flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S); + if (flow_wildcards_equal(&new, &old)) { + --index; + subtable->index_ofs[index] = 0; + hindex_destroy(&subtable->indices[index]); + } + } + subtable->n_indices = index; + + hmap_insert(&cls->subtables, &subtable->hmap_node, hash); + list_push_back(&cls->subtables_priority, &subtable->list_node); + subtable->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX + ? tag_create_deterministic(hash) + : TAG_ALL); + + for (i = 0; i < cls->n_tries; i++) { + subtable->trie_plen[i] = minimask_get_prefix_len(mask, + cls->tries[i].field); + } + + return subtable; +} + +static void +destroy_subtable(struct classifier *cls, struct cls_subtable *subtable) { - struct cls_table *table; + int i; + + for (i = 0; i < subtable->n_indices; i++) { + hindex_destroy(&subtable->indices[i]); + } + minimask_destroy(&subtable->mask); + hmap_remove(&cls->subtables, &subtable->hmap_node); + hmap_destroy(&subtable->rules); + list_remove(&subtable->list_node); + free(subtable); +} - table = xzalloc(sizeof *table); - hmap_init(&table->rules); - table->wc = *wc; - hmap_insert(&cls->tables, &table->hmap_node, flow_wildcards_hash(wc, 0)); +/* This function performs the following updates for 'subtable' in 'cls' + * following the addition of a new rule with priority 'new_priority' to + * 'subtable': + * + * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary. + * + * - Update 'subtable''s position in 'cls->subtables_priority' if necessary. + * + * This function should only be called after adding a new rule, not after + * replacing a rule by an identical one or modifying a rule in-place. */ +static void +update_subtables_after_insertion(struct classifier *cls, + struct cls_subtable *subtable, + unsigned int new_priority) +{ + if (new_priority == subtable->max_priority) { + ++subtable->max_count; + } else if (new_priority > subtable->max_priority) { + struct cls_subtable *iter; + + subtable->max_priority = new_priority; + subtable->max_count = 1; + + /* Possibly move 'subtable' earlier in the priority list. If we break + * out of the loop, then 'subtable' should be moved just after that + * 'iter'. If the loop terminates normally, then 'iter' will be the + * list head and we'll move subtable just after that (e.g. to the front + * of the list). */ + iter = subtable; + LIST_FOR_EACH_REVERSE_CONTINUE (iter, list_node, + &cls->subtables_priority) { + if (iter->max_priority >= subtable->max_priority) { + break; + } + } - return table; + /* Move 'subtable' just after 'iter' (unless it's already there). */ + if (iter->list_node.next != &subtable->list_node) { + list_splice(iter->list_node.next, + &subtable->list_node, subtable->list_node.next); + } + } } +/* This function performs the following updates for 'subtable' in 'cls' + * following the deletion of a rule with priority 'del_priority' from + * 'subtable': + * + * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary. + * + * - Update 'subtable''s position in 'cls->subtables_priority' if necessary. + * + * This function should only be called after removing a rule, not after + * replacing a rule by an identical one or modifying a rule in-place. */ static void -destroy_table(struct classifier *cls, struct cls_table *table) +update_subtables_after_removal(struct classifier *cls, + struct cls_subtable *subtable, + unsigned int del_priority) { - hmap_remove(&cls->tables, &table->hmap_node); - hmap_destroy(&table->rules); - free(table); + struct cls_subtable *iter; + + if (del_priority == subtable->max_priority && --subtable->max_count == 0) { + struct cls_rule *head; + + subtable->max_priority = 0; + HMAP_FOR_EACH (head, hmap_node, &subtable->rules) { + if (head->priority > subtable->max_priority) { + subtable->max_priority = head->priority; + subtable->max_count = 1; + } else if (head->priority == subtable->max_priority) { + ++subtable->max_count; + } + } + + /* Possibly move 'subtable' later in the priority list. If we break + * out of the loop, then 'subtable' should be moved just before that + * 'iter'. If the loop terminates normally, then 'iter' will be the + * list head and we'll move subtable just before that (e.g. to the back + * of the list). */ + iter = subtable; + LIST_FOR_EACH_CONTINUE (iter, list_node, &cls->subtables_priority) { + if (iter->max_priority <= subtable->max_priority) { + break; + } + } + + /* Move 'subtable' just before 'iter' (unless it's already there). */ + if (iter->list_node.prev != &subtable->list_node) { + list_splice(&iter->list_node, + &subtable->list_node, subtable->list_node.next); + } + } } -static struct cls_rule * -find_match(const struct cls_table *table, const struct flow *flow) +struct range { + uint8_t start; + uint8_t end; +}; + +/* Return 'true' if can skip rest of the subtable based on the prefix trie + * lookup results. */ +static inline bool +check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries, + const unsigned int field_plen[CLS_MAX_TRIES], + const struct range ofs, const struct flow *flow, + struct flow_wildcards *wc) +{ + int j; + + /* Check if we could avoid fully unwildcarding the next level of + * fields using the prefix tries. The trie checks are done only as + * needed to avoid folding in additional bits to the wildcards mask. */ + for (j = 0; j < n_tries; j++) { + /* Is the trie field relevant for this subtable? */ + if (field_plen[j]) { + struct trie_ctx *ctx = &trie_ctx[j]; + uint8_t be32ofs = ctx->be32ofs; + + /* Is the trie field within the current range of fields? */ + if (be32ofs >= ofs.start && be32ofs < ofs.end) { + /* On-demand trie lookup. */ + if (!ctx->lookup_done) { + ctx->match_plen = trie_lookup(ctx->trie, flow, + &ctx->maskbits); + ctx->lookup_done = true; + } + /* Possible to skip the rest of the subtable if subtable's + * prefix on the field is longer than what is known to match + * based on the trie lookup. */ + if (field_plen[j] > ctx->match_plen) { + /* RFC: We want the trie lookup to never result in + * unwildcarding any bits that would not be unwildcarded + * otherwise. Since the trie is shared by the whole + * classifier, it is possible that the 'maskbits' contain + * bits that are irrelevant for the partition of the + * classifier relevant for the current flow. */ + + /* Can skip if the field is already unwildcarded. */ + if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) { + return true; + } + /* Check that the trie result will not unwildcard more bits + * than this stage will. */ + if (ctx->maskbits <= field_plen[j]) { + /* Unwildcard the bits and skip the rest. */ + mask_set_prefix_bits(wc, be32ofs, ctx->maskbits); + /* Note: Prerequisite already unwildcarded, as the only + * prerequisite of the supported trie lookup fields is + * the ethertype, which is currently always + * unwildcarded. + */ + return true; + } + } + } + } + } + return false; +} + +static inline struct cls_rule * +find_match(const struct cls_subtable *subtable, const struct flow *flow, + uint32_t hash) { struct cls_rule *rule; - struct flow f; - f = *flow; - flow_zero_wildcards(&f, &table->wc); - HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, flow_hash(&f, 0), - &table->rules) { - if (flow_equal(&f, &rule->flow)) { + HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) { + if (minimatch_matches_flow(&rule->match, flow)) { return rule; } } + + return NULL; +} + +static struct cls_rule * +find_match_wc(const struct cls_subtable *subtable, const struct flow *flow, + struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries, + struct flow_wildcards *wc) +{ + uint32_t basis = 0, hash; + struct cls_rule *rule = NULL; + int i; + struct range ofs; + + if (!wc) { + return find_match(subtable, flow, + flow_hash_in_minimask(flow, &subtable->mask, 0)); + } + + ofs.start = 0; + /* Try to finish early by checking fields in segments. */ + for (i = 0; i < subtable->n_indices; i++) { + struct hindex_node *inode; + ofs.end = subtable->index_ofs[i]; + + if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, + wc)) { + goto range_out; + } + hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start, + ofs.end, &basis); + ofs.start = ofs.end; + inode = hindex_node_with_hash(&subtable->indices[i], hash); + if (!inode) { + /* No match, can stop immediately, but must fold in the mask + * covered so far. */ + goto range_out; + } + + /* If we have narrowed down to a single rule already, check whether + * that rule matches. If it does match, then we're done. If it does + * not match, then we know that we will never get a match, but we do + * not yet know how many wildcards we need to fold into 'wc' so we + * continue iterating through indices to find that out. (We won't + * waste time calling minimatch_matches_flow() again because we've set + * 'rule' nonnull.) + * + * This check shows a measurable benefit with non-trivial flow tables. + * + * (Rare) hash collisions may cause us to miss the opportunity for this + * optimization. */ + if (!inode->s && !rule) { + ASSIGN_CONTAINER(rule, inode - i, index_nodes); + if (minimatch_matches_flow(&rule->match, flow)) { + goto out; + } + } + } + ofs.end = FLOW_U32S; + /* Trie check for the final range. */ + if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) { + goto range_out; + } + if (!rule) { + /* Multiple potential matches exist, look for one. */ + hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start, + ofs.end, &basis); + rule = find_match(subtable, flow, hash); + } else { + /* We already narrowed the matching candidates down to just 'rule', + * but it didn't match. */ + rule = NULL; + } + out: + /* Must unwildcard all the fields, as they were looked at. */ + flow_wildcards_fold_minimask(wc, &subtable->mask); + return rule; + + range_out: + /* Must unwildcard the fields looked up so far, if any. */ + if (ofs.start) { + flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, ofs.start); + } return NULL; } static struct cls_rule * -find_equal(struct cls_table *table, const struct flow *flow, uint32_t hash) +find_equal(struct cls_subtable *subtable, const struct miniflow *flow, + uint32_t hash) { struct cls_rule *head; - HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &table->rules) { - if (flow_equal(&head->flow, flow)) { + HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &subtable->rules) { + if (miniflow_equal(&head->match.flow, flow)) { return head; } } @@ -1080,43 +1390,68 @@ find_equal(struct cls_table *table, const struct flow *flow, uint32_t hash) } static struct cls_rule * -insert_rule(struct cls_table *table, struct cls_rule *new) +insert_rule(struct classifier *cls, struct cls_subtable *subtable, + struct cls_rule *new) { struct cls_rule *head; - - new->hmap_node.hash = flow_hash(&new->flow, 0); - - head = find_equal(table, &new->flow, new->hmap_node.hash); + struct cls_rule *old = NULL; + int i; + uint32_t basis = 0, hash; + uint8_t prev_be32ofs = 0; + + /* Add new node to segment indices. */ + for (i = 0; i < subtable->n_indices; i++) { + hash = minimatch_hash_range(&new->match, prev_be32ofs, + subtable->index_ofs[i], &basis); + hindex_insert(&subtable->indices[i], &new->index_nodes[i], hash); + prev_be32ofs = subtable->index_ofs[i]; + } + hash = minimatch_hash_range(&new->match, prev_be32ofs, FLOW_U32S, &basis); + head = find_equal(subtable, &new->match.flow, hash); if (!head) { - hmap_insert(&table->rules, &new->hmap_node, new->hmap_node.hash); + hmap_insert(&subtable->rules, &new->hmap_node, hash); list_init(&new->list); - return NULL; + goto out; } else { /* Scan the list for the insertion point that will keep the list in * order of decreasing priority. */ struct cls_rule *rule; + + new->hmap_node.hash = hash; /* Otherwise done by hmap_insert. */ + FOR_EACH_RULE_IN_LIST (rule, head) { if (new->priority >= rule->priority) { if (rule == head) { /* 'new' is the new highest-priority flow in the list. */ - hmap_replace(&table->rules, + hmap_replace(&subtable->rules, &rule->hmap_node, &new->hmap_node); } if (new->priority == rule->priority) { list_replace(&new->list, &rule->list); - return rule; + old = rule; + goto out; } else { list_insert(&rule->list, &new->list); - return NULL; + goto out; } } } /* Insert 'new' at the end of the list. */ list_push_back(&head->list, &new->list); - return NULL; } + + out: + if (!old) { + update_subtables_after_insertion(cls, subtable, new->priority); + } else { + /* Remove old node from indices. */ + for (i = 0; i < subtable->n_indices; i++) { + hindex_remove(&subtable->indices[i], &old->index_nodes[i]); + } + } + return old; } static struct cls_rule * @@ -1132,76 +1467,393 @@ next_rule_in_list(struct cls_rule *rule) struct cls_rule *next = next_rule_in_list__(rule); return next->priority < rule->priority ? next : NULL; } + +/* A longest-prefix match tree. */ +struct trie_node { + uint32_t prefix; /* Prefix bits for this node, MSB first. */ + uint8_t nbits; /* Never zero, except for the root node. */ + unsigned int n_rules; /* Number of rules that have this prefix. */ + struct trie_node *edges[2]; /* Both NULL if leaf. */ +}; + +/* Max bits per node. Must fit in struct trie_node's 'prefix'. + * Also tested with 16, 8, and 5 to stress the implementation. */ +#define TRIE_PREFIX_BITS 32 + +/* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'. + * Prefixes are in the network byte order, and the offset 0 corresponds to + * the most significant bit of the first byte. The offset can be read as + * "how many bits to skip from the start of the prefix starting at 'pr'". */ +static uint32_t +raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen) +{ + uint32_t prefix; + + pr += ofs / 32; /* Where to start. */ + ofs %= 32; /* How many bits to skip at 'pr'. */ + + prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */ + if (plen > 32 - ofs) { /* Need more than we have already? */ + prefix |= ntohl(*++pr) >> (32 - ofs); + } + /* Return with possible unwanted bits at the end. */ + return prefix; +} + +/* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit + * offset 'ofs'. Prefixes are in the network byte order, and the offset 0 + * corresponds to the most significant bit of the first byte. The offset can + * be read as "how many bits to skip from the start of the prefix starting at + * 'pr'". */ +static uint32_t +trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen) +{ + if (!plen) { + return 0; + } + if (plen > TRIE_PREFIX_BITS) { + plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */ + } + /* Return with unwanted bits cleared. */ + return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen); +} + +/* Return the number of equal bits in 'nbits' of 'prefix's MSBs and a 'value' + * starting at "MSB 0"-based offset 'ofs'. */ +static unsigned int +prefix_equal_bits(uint32_t prefix, unsigned int nbits, const ovs_be32 value[], + unsigned int ofs) +{ + uint64_t diff = prefix ^ raw_get_prefix(value, ofs, nbits); + /* Set the bit after the relevant bits to limit the result. */ + return raw_clz64(diff << 32 | UINT64_C(1) << (63 - nbits)); +} + +/* Return the number of equal bits in 'node' prefix and a 'prefix' of length + * 'plen', starting at "MSB 0"-based offset 'ofs'. */ +static unsigned int +trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[], + unsigned int ofs, unsigned int plen) +{ + return prefix_equal_bits(node->prefix, MIN(node->nbits, plen - ofs), + prefix, ofs); +} + +/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can + * be greater than 31. */ +static unsigned int +be_get_bit_at(const ovs_be32 value[], unsigned int ofs) +{ + return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u; +} + +/* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must + * be between 0 and 31, inclusive. */ +static unsigned int +get_bit_at(const uint32_t prefix, unsigned int ofs) +{ + return (prefix >> (31 - ofs)) & 1u; +} + +/* Create new branch. */ +static struct trie_node * +trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen, + unsigned int n_rules) +{ + struct trie_node *node = xmalloc(sizeof *node); + + node->prefix = trie_get_prefix(prefix, ofs, plen); + + if (plen <= TRIE_PREFIX_BITS) { + node->nbits = plen; + node->edges[0] = NULL; + node->edges[1] = NULL; + node->n_rules = n_rules; + } else { /* Need intermediate nodes. */ + struct trie_node *subnode = trie_branch_create(prefix, + ofs + TRIE_PREFIX_BITS, + plen - TRIE_PREFIX_BITS, + n_rules); + int bit = get_bit_at(subnode->prefix, 0); + node->nbits = TRIE_PREFIX_BITS; + node->edges[bit] = subnode; + node->edges[!bit] = NULL; + node->n_rules = 0; + } + return node; +} + +static void +trie_node_destroy(struct trie_node *node) +{ + free(node); +} + +static void +trie_destroy(struct trie_node *node) +{ + if (node) { + trie_destroy(node->edges[0]); + trie_destroy(node->edges[1]); + free(node); + } +} static bool -ipv6_equal_except(const struct in6_addr *a, const struct in6_addr *b, - const struct in6_addr *mask) +trie_is_leaf(const struct trie_node *trie) { - int i; + return !trie->edges[0] && !trie->edges[1]; /* No children. */ +} -#ifdef s6_addr32 - for (i=0; i<4; i++) { - if ((a->s6_addr32[i] ^ b->s6_addr32[i]) & mask->s6_addr32[i]) { - return false; - } +static void +mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs, + unsigned int nbits) +{ + ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs]; + unsigned int i; + + for (i = 0; i < nbits / 32; i++) { + mask[i] = OVS_BE32_MAX; } -#else - for (i=0; i<16; i++) { - if ((a->s6_addr[i] ^ b->s6_addr[i]) & mask->s6_addr[i]) { - return false; + if (nbits % 32) { + mask[i] |= htonl(~0u << (32 - nbits % 32)); + } +} + +static bool +mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs, + unsigned int nbits) +{ + ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs]; + unsigned int i; + ovs_be32 zeroes = 0; + + for (i = 0; i < nbits / 32; i++) { + zeroes |= ~mask[i]; + } + if (nbits % 32) { + zeroes |= ~mask[i] & htonl(~0u << (32 - nbits % 32)); + } + + return !zeroes; /* All 'nbits' bits set. */ +} + +static struct trie_node ** +trie_next_edge(struct trie_node *node, const ovs_be32 value[], + unsigned int ofs) +{ + return node->edges + be_get_bit_at(value, ofs); +} + +static const struct trie_node * +trie_next_node(const struct trie_node *node, const ovs_be32 value[], + unsigned int ofs) +{ + return node->edges[be_get_bit_at(value, ofs)]; +} + +/* Return the prefix mask length necessary to find the longest-prefix match for + * the '*value' in the prefix tree 'node'. + * '*checkbits' is set to the number of bits in the prefix mask necessary to + * determine a mismatch, in case there are longer prefixes in the tree below + * the one that matched. + */ +static unsigned int +trie_lookup_value(const struct trie_node *node, const ovs_be32 value[], + unsigned int *checkbits) +{ + unsigned int plen = 0, match_len = 0; + const struct trie_node *prev = NULL; + + for (; node; prev = node, node = trie_next_node(node, value, plen)) { + unsigned int eqbits; + /* Check if this edge can be followed. */ + eqbits = prefix_equal_bits(node->prefix, node->nbits, value, plen); + plen += eqbits; + if (eqbits < node->nbits) { /* Mismatch, nothing more to be found. */ + /* Bit at offset 'plen' differed. */ + *checkbits = plen + 1; /* Includes the first mismatching bit. */ + return match_len; + } + /* Full match, check if rules exist at this prefix length. */ + if (node->n_rules > 0) { + match_len = plen; } } -#endif + /* Dead end, exclude the other branch if it exists. */ + *checkbits = !prev || trie_is_leaf(prev) ? plen : plen + 1; + return match_len; +} - return true; +static unsigned int +trie_lookup(const struct cls_trie *trie, const struct flow *flow, + unsigned int *checkbits) +{ + const struct mf_field *mf = trie->field; + + /* Check that current flow matches the prerequisites for the trie + * field. Some match fields are used for multiple purposes, so we + * must check that the trie is relevant for this flow. */ + if (mf_are_prereqs_ok(mf, flow)) { + return trie_lookup_value(trie->root, + &((ovs_be32 *)flow)[mf->flow_be32ofs], + checkbits); + } + *checkbits = 0; /* Value not used in this case. */ + return UINT_MAX; } +/* Returns the length of a prefix match mask for the field 'mf' in 'minimask'. + * Returns the u32 offset to the miniflow data in '*miniflow_index', if + * 'miniflow_index' is not NULL. */ +static unsigned int +minimask_get_prefix_len(const struct minimask *minimask, + const struct mf_field *mf) +{ + unsigned int nbits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */ + uint8_t u32_ofs = mf->flow_be32ofs; + uint8_t u32_end = u32_ofs + mf->n_bytes / 4; + + for (; u32_ofs < u32_end; ++u32_ofs) { + uint32_t mask; + mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs)); -static bool -flow_equal_except(const struct flow *a, const struct flow *b, - const struct flow_wildcards *wildcards) + /* Validate mask, count the mask length. */ + if (mask_tz) { + if (mask) { + return 0; /* No bits allowed after mask ended. */ + } + } else { + if (~mask & (~mask + 1)) { + return 0; /* Mask not contiguous. */ + } + mask_tz = ctz32(mask); + nbits += 32 - mask_tz; + } + } + + return nbits; +} + +/* + * This is called only when mask prefix is known to be CIDR and non-zero. + * Relies on the fact that the flow and mask have the same map, and since + * the mask is CIDR, the storage for the flow field exists even if it + * happened to be zeros. + */ +static const ovs_be32 * +minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf) { - const flow_wildcards_t wc = wildcards->wildcards; - int i; + return match->flow.values + + count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1)); +} - BUILD_ASSERT_DECL(FLOW_WC_SEQ == 8); +/* Insert rule in to the prefix tree. + * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask + * in 'rule'. */ +static void +trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen) +{ + const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field); + struct trie_node *node; + struct trie_node **edge; + int ofs = 0; + + /* Walk the tree. */ + for (edge = &trie->root; + (node = *edge) != NULL; + edge = trie_next_edge(node, prefix, ofs)) { + unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen); + ofs += eqbits; + if (eqbits < node->nbits) { + /* Mismatch, new node needs to be inserted above. */ + int old_branch = get_bit_at(node->prefix, eqbits); + + /* New parent node. */ + *edge = trie_branch_create(prefix, ofs - eqbits, eqbits, + ofs == mlen ? 1 : 0); + + /* Adjust old node for its new position in the tree. */ + node->prefix <<= eqbits; + node->nbits -= eqbits; + (*edge)->edges[old_branch] = node; + + /* Check if need a new branch for the new rule. */ + if (ofs < mlen) { + (*edge)->edges[!old_branch] + = trie_branch_create(prefix, ofs, mlen - ofs, 1); + } + return; + } + /* Full match so far. */ - for (i = 0; i < FLOW_N_REGS; i++) { - if ((a->regs[i] ^ b->regs[i]) & wildcards->reg_masks[i]) { - return false; + if (ofs == mlen) { + /* Full match at the current node, rule needs to be added here. */ + node->n_rules++; + return; } } + /* Must insert a new tree branch for the new rule. */ + *edge = trie_branch_create(prefix, ofs, mlen - ofs, 1); +} + +/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask + * in 'rule'. */ +static void +trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen) +{ + const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field); + struct trie_node *node; + struct trie_node **edges[sizeof(union mf_value) * 8]; + int depth = 0, ofs = 0; + + /* Walk the tree. */ + for (edges[depth] = &trie->root; + (node = *edges[depth]) != NULL; + edges[++depth] = trie_next_edge(node, prefix, ofs)) { + unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen); + if (eqbits < node->nbits) { + /* Mismatch, nothing to be removed. This should never happen, as + * only rules in the classifier are ever removed. */ + break; /* Log a warning. */ + } + /* Full match so far. */ + ofs += eqbits; - return (!((a->tun_id ^ b->tun_id) & wildcards->tun_id_mask) - && !((a->nw_src ^ b->nw_src) & wildcards->nw_src_mask) - && !((a->nw_dst ^ b->nw_dst) & wildcards->nw_dst_mask) - && (wc & FWW_IN_PORT || a->in_port == b->in_port) - && !((a->vlan_tci ^ b->vlan_tci) & wildcards->vlan_tci_mask) - && (wc & FWW_DL_TYPE || a->dl_type == b->dl_type) - && !((a->tp_src ^ b->tp_src) & wildcards->tp_src_mask) - && !((a->tp_dst ^ b->tp_dst) & wildcards->tp_dst_mask) - && (wc & FWW_DL_SRC || eth_addr_equals(a->dl_src, b->dl_src)) - && (wc & FWW_DL_DST - || (!((a->dl_dst[0] ^ b->dl_dst[0]) & 0xfe) - && a->dl_dst[1] == b->dl_dst[1] - && a->dl_dst[2] == b->dl_dst[2] - && a->dl_dst[3] == b->dl_dst[3] - && a->dl_dst[4] == b->dl_dst[4] - && a->dl_dst[5] == b->dl_dst[5])) - && (wc & FWW_ETH_MCAST - || !((a->dl_dst[0] ^ b->dl_dst[0]) & 0x01)) - && (wc & FWW_NW_PROTO || a->nw_proto == b->nw_proto) - && (wc & FWW_NW_TTL || a->nw_ttl == b->nw_ttl) - && (wc & FWW_NW_DSCP || !((a->nw_tos ^ b->nw_tos) & IP_DSCP_MASK)) - && (wc & FWW_NW_ECN || !((a->nw_tos ^ b->nw_tos) & IP_ECN_MASK)) - && !((a->nw_frag ^ b->nw_frag) & wildcards->nw_frag_mask) - && (wc & FWW_ARP_SHA || eth_addr_equals(a->arp_sha, b->arp_sha)) - && (wc & FWW_ARP_THA || eth_addr_equals(a->arp_tha, b->arp_tha)) - && (wc & FWW_IPV6_LABEL || a->ipv6_label == b->ipv6_label) - && ipv6_equal_except(&a->ipv6_src, &b->ipv6_src, - &wildcards->ipv6_src_mask) - && ipv6_equal_except(&a->ipv6_dst, &b->ipv6_dst, - &wildcards->ipv6_dst_mask) - && (wc & FWW_ND_TARGET - || ipv6_addr_equals(&a->nd_target, &b->nd_target))); + if (ofs == mlen) { + /* Full prefix match at the current node, remove rule here. */ + if (!node->n_rules) { + break; /* Log a warning. */ + } + node->n_rules--; + + /* Check if can prune the tree. */ + while (!node->n_rules && !(node->edges[0] && node->edges[1])) { + /* No rules and at most one child node, remove this node. */ + struct trie_node *next; + next = node->edges[0] ? node->edges[0] : node->edges[1]; + + if (next) { + if (node->nbits + next->nbits > TRIE_PREFIX_BITS) { + break; /* Cannot combine. */ + } + /* Combine node with next. */ + next->prefix = node->prefix | next->prefix >> node->nbits; + next->nbits += node->nbits; + } + trie_node_destroy(node); + /* Update the parent's edge. */ + *edges[depth] = next; + if (next || !depth) { + /* Branch not pruned or at root, nothing more to do. */ + break; + } + node = *edges[--depth]; + } + return; + } + } + /* Cannot go deeper. This should never happen, since only rules + * that actually exist in the classifier are ever removed. */ + VLOG_WARN("Trying to remove non-existing rule from a prefix trie."); }