X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow_table.c;h=c8bd9d1eddc4d8c663f74585b50e75bc22a49b09;hb=d49fc3ff53c65e4eca9cabd52ac63396746a7ef5;hp=54a2e25e99582b60e5ee6c617986d7a9dd1e8954;hpb=9ac56358dec1a5aa7f4275a42971f55fad1f7f35;p=sliver-openvswitch.git diff --git a/datapath/flow_table.c b/datapath/flow_table.c index 54a2e25e9..c8bd9d1ed 100644 --- a/datapath/flow_table.c +++ b/datapath/flow_table.c @@ -47,8 +47,13 @@ #include "vlan.h" #define TBL_MIN_BUCKETS 1024 +#define MASK_ARRAY_SIZE_MIN 16 #define REHASH_INTERVAL (10 * 60 * HZ) +#define MC_HASH_SHIFT 8 +#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT) +#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) + static struct kmem_cache *flow_cache; struct kmem_cache *flow_stats_cache __read_mostly; @@ -141,7 +146,7 @@ static void flow_free(struct sw_flow *flow) { int node; - kfree((struct sf_flow_acts __force *)flow->sf_acts); + kfree((struct sw_flow_actions __force *)flow->sf_acts); for_each_node(node) if (flow->stats[node]) kmem_cache_free(flow_stats_cache, @@ -168,25 +173,6 @@ void ovs_flow_free(struct sw_flow *flow, bool deferred) if (!flow) return; - if (flow->mask) { - struct sw_flow_mask *mask = flow->mask; - - /* ovs-lock is required to protect mask-refcount and - * mask list. - */ - ASSERT_OVSL(); - BUG_ON(!mask->ref_count); - mask->ref_count--; - - if (!mask->ref_count) { - list_del_rcu(&mask->list); - if (deferred) - call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); - else - kfree(mask); - } - } - if (deferred) call_rcu(&flow->rcu, rcu_free_flow_callback); else @@ -226,20 +212,83 @@ static struct table_instance *table_instance_alloc(int new_size) return ti; } +static void mask_array_rcu_cb(struct rcu_head *rcu) +{ + struct mask_array *ma = container_of(rcu, struct mask_array, rcu); + + kfree(ma); +} + +static struct mask_array *tbl_mask_array_alloc(int size) +{ + struct mask_array *new; + + new = kzalloc(sizeof(struct mask_array) + + sizeof(struct sw_flow_mask *) * size, GFP_KERNEL); + if (!new) + return NULL; + + new->count = 0; + new->max = size; + + return new; +} + +static int tbl_mask_array_realloc(struct flow_table *tbl, int size) +{ + struct mask_array *old; + struct mask_array *new; + + new = tbl_mask_array_alloc(size); + if (!new) + return -ENOMEM; + + old = ovsl_dereference(tbl->mask_array); + if (old) { + int i; + + for (i = 0; i < old->max; i++) { + if (old->masks[i]) + new->masks[new->count++] = old->masks[i]; + } + } + rcu_assign_pointer(tbl->mask_array, new); + + if (old) + call_rcu(&old->rcu, mask_array_rcu_cb); + + return 0; +} + int ovs_flow_tbl_init(struct flow_table *table) { struct table_instance *ti; + struct mask_array *ma; - ti = table_instance_alloc(TBL_MIN_BUCKETS); + table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * + MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry)); + if (!table->mask_cache) + return -ENOMEM; + + ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); + if (!ma) + goto free_mask_cache; + ti = table_instance_alloc(TBL_MIN_BUCKETS); if (!ti) - return -ENOMEM; + goto free_mask_array; rcu_assign_pointer(table->ti, ti); - INIT_LIST_HEAD(&table->mask_list); + rcu_assign_pointer(table->mask_array, ma); table->last_rehash = jiffies; table->count = 0; return 0; + +free_mask_array: + kfree((struct mask_array __force *)table->mask_array); +free_mask_cache: + free_percpu(table->mask_cache); + return -ENOMEM; } static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) @@ -278,11 +327,15 @@ skip_flows: __table_instance_destroy(ti); } -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) +/* No need for locking this function is called from RCU callback or + * error path. */ +void ovs_flow_tbl_destroy(struct flow_table *table) { - struct table_instance *ti = ovsl_dereference(table->ti); + struct table_instance *ti = (struct table_instance __force *)table->ti; - table_instance_destroy(ti, deferred); + free_percpu(table->mask_cache); + kfree((struct mask_array __force *)table->mask_array); + table_instance_destroy(ti, false); } struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, @@ -437,7 +490,8 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, static struct sw_flow *masked_flow_lookup(struct table_instance *ti, const struct sw_flow_key *unmasked, - struct sw_flow_mask *mask) + struct sw_flow_mask *mask, + u32 *n_mask_hit) { struct sw_flow *flow; struct hlist_head *head; @@ -449,6 +503,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, ovs_flow_mask_key(&masked_key, unmasked, mask); hash = flow_hash(&masked_key, key_start, key_end); head = find_bucket(ti, hash); + (*n_mask_hit)++; hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { if (flow->mask == mask && flow->hash == hash && flow_cmp_masked_key(flow, &masked_key, @@ -458,41 +513,118 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, return NULL; } + +static struct sw_flow *flow_lookup(struct flow_table *tbl, + struct table_instance *ti, + struct mask_array *ma, + const struct sw_flow_key *key, + u32 *n_mask_hit, + u32 *index) +{ + struct sw_flow *flow; + int i; + + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *mask; + + mask = rcu_dereference_ovsl(ma->masks[i]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) { /* Found */ + *index = i; + return flow; + } + } + } + + return NULL; +} + +/* + * mask_cache maps flow to probable mask. This cache is not tightly + * coupled cache, It means updates to mask list can result in inconsistent + * cache entry in mask cache. + * This is per cpu cache and is divided in MC_HASH_SEGS segments. + * In case of a hash collision the entry is hashed in next segment. + * */ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, - const struct sw_flow_key *key, - u32 *n_mask_hit) + const struct sw_flow_key *key, + u32 skb_hash, + u32 *n_mask_hit) { + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); - struct sw_flow_mask *mask; + struct mask_cache_entry *entries, *ce, *del; struct sw_flow *flow; + u32 hash = skb_hash; + int seg; *n_mask_hit = 0; - list_for_each_entry_rcu(mask, &tbl->mask_list, list) { - (*n_mask_hit)++; - flow = masked_flow_lookup(ti, key, mask); - if (flow) /* Found */ - return flow; + if (unlikely(!skb_hash)) { + u32 __always_unused mask_index; + + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); } - return NULL; + + del = NULL; + entries = this_cpu_ptr(tbl->mask_cache); + + for (seg = 0; seg < MC_HASH_SEGS; seg++) { + int index; + + index = hash & (MC_HASH_ENTRIES - 1); + ce = &entries[index]; + + if (ce->skb_hash == skb_hash) { + struct sw_flow_mask *mask; + struct sw_flow *flow; + + mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, + n_mask_hit); + if (flow) /* Found */ + return flow; + + } + del = ce; + break; + } + + if (!del || (del->skb_hash && !ce->skb_hash) || + (rcu_dereference_ovsl(ma->masks[del->mask_index]) && + !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) { + del = ce; + } + + hash >>= MC_HASH_SHIFT; + } + + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index); + if (flow) + del->skb_hash = skb_hash; + + return flow; } struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, const struct sw_flow_key *key) { + struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; + u32 __always_unused index; - return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); + n_mask_hit = 0; + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); } int ovs_flow_tbl_num_masks(const struct flow_table *table) { - struct sw_flow_mask *mask; - int num = 0; - - list_for_each_entry(mask, &table->mask_list, list) - num++; + struct mask_array *ma; - return num; + ma = rcu_dereference_ovsl(table->mask_array); + return ma->count; } static struct table_instance *table_instance_expand(struct table_instance *ti) @@ -500,6 +632,37 @@ static struct table_instance *table_instance_expand(struct table_instance *ti) return table_instance_rehash(ti, ti->n_buckets * 2); } +/* Remove 'mask' from the mask list, if it is not needed any more. */ +static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) +{ + if (mask) { + /* ovs-lock is required to protect mask-refcount and + * mask list. + */ + ASSERT_OVSL(); + BUG_ON(!mask->ref_count); + mask->ref_count--; + + if (!mask->ref_count) { + struct mask_array *ma; + int i; + + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + if (mask == ovsl_dereference(ma->masks[i])) { + RCU_INIT_POINTER(ma->masks[i], NULL); + ma->count--; + goto free; + } + } + BUG(); +free: + call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); + } + } +} + +/* Must be called with OVS mutex held. */ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { struct table_instance *ti = ovsl_dereference(table->ti); @@ -507,6 +670,10 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) BUG_ON(table->count == 0); hlist_del_rcu(&flow->hash_node[ti->node_ver]); table->count--; + + /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be + * accessible as long as the RCU read lock is held. */ + flow_mask_remove(table, flow->mask); } static struct sw_flow_mask *mask_alloc(void) @@ -534,13 +701,16 @@ static bool mask_equal(const struct sw_flow_mask *a, static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, const struct sw_flow_mask *mask) { - struct list_head *ml; + struct mask_array *ma; + int i; - list_for_each(ml, &tbl->mask_list) { - struct sw_flow_mask *m; - m = container_of(ml, struct sw_flow_mask, list); - if (mask_equal(mask, m)) - return m; + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *t; + + t = ovsl_dereference(ma->masks[i]); + if (t && mask_equal(mask, t)) + return t; } return NULL; @@ -551,15 +721,43 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, struct sw_flow_mask *new) { struct sw_flow_mask *mask; + mask = flow_mask_find(tbl, new); if (!mask) { + struct mask_array *ma; + int i; + /* Allocate a new mask if none exsits. */ mask = mask_alloc(); if (!mask) return -ENOMEM; + mask->key = new->key; mask->range = new->range; - list_add_rcu(&mask->list, &tbl->mask_list); + + /* Add mask to mask-list. */ + ma = ovsl_dereference(tbl->mask_array); + if (ma->count >= ma->max) { + int err; + + err = tbl_mask_array_realloc(tbl, ma->max + + MASK_ARRAY_SIZE_MIN); + if (err) { + kfree(mask); + return err; + } + ma = ovsl_dereference(tbl->mask_array); + } + for (i = 0; i < ma->max; i++) { + const struct sw_flow_mask *t; + + t = ovsl_dereference(ma->masks[i]); + if (!t) { + rcu_assign_pointer(ma->masks[i], mask); + ma->count++; + break; + } + } } else { BUG_ON(!mask->ref_count); mask->ref_count++; @@ -569,6 +767,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, return 0; } +/* Must be called with OVS mutex held. */ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, struct sw_flow_mask *mask) {