From: Pravin B Shelar Date: Wed, 23 Apr 2014 15:34:51 +0000 (-0700) Subject: datapath: Convert mask list in mask array. X-Git-Tag: sliver-openvswitch-2.2.90-1~3^2~46 X-Git-Url: http://git.onelab.eu/?p=sliver-openvswitch.git;a=commitdiff_plain;h=d49fc3ff53c65e4eca9cabd52ac63396746a7ef5 datapath: Convert mask list in mask array. mask caches index of mask in mask_list. On packet recv OVS need to traverse mask-list to get cached mask. Therefore array is better for retrieving cached mask. This also allows better cache replacement algorithm by directly checking mask's existence. Signed-off-by: Pravin B Shelar Acked-by: Thomas Graf --- diff --git a/datapath/flow.h b/datapath/flow.h index d05a9f4c4..201869128 100644 --- a/datapath/flow.h +++ b/datapath/flow.h @@ -127,7 +127,6 @@ struct sw_flow_key_range { struct sw_flow_mask { int ref_count; struct rcu_head rcu; - struct list_head list; struct sw_flow_key_range range; struct sw_flow_key key; }; diff --git a/datapath/flow_table.c b/datapath/flow_table.c index cc0eaf288..c8bd9d1ed 100644 --- a/datapath/flow_table.c +++ b/datapath/flow_table.c @@ -47,6 +47,7 @@ #include "vlan.h" #define TBL_MIN_BUCKETS 1024 +#define MASK_ARRAY_SIZE_MIN 16 #define REHASH_INTERVAL (10 * 60 * HZ) #define MC_HASH_SHIFT 8 @@ -211,26 +212,83 @@ static struct table_instance *table_instance_alloc(int new_size) return ti; } +static void mask_array_rcu_cb(struct rcu_head *rcu) +{ + struct mask_array *ma = container_of(rcu, struct mask_array, rcu); + + kfree(ma); +} + +static struct mask_array *tbl_mask_array_alloc(int size) +{ + struct mask_array *new; + + new = kzalloc(sizeof(struct mask_array) + + sizeof(struct sw_flow_mask *) * size, GFP_KERNEL); + if (!new) + return NULL; + + new->count = 0; + new->max = size; + + return new; +} + +static int tbl_mask_array_realloc(struct flow_table *tbl, int size) +{ + struct mask_array *old; + struct mask_array *new; + + new = tbl_mask_array_alloc(size); + if (!new) + return -ENOMEM; + + old = ovsl_dereference(tbl->mask_array); + if (old) { + int i; + + for (i = 0; i < old->max; i++) { + if (old->masks[i]) + new->masks[new->count++] = old->masks[i]; + } + } + rcu_assign_pointer(tbl->mask_array, new); + + if (old) + call_rcu(&old->rcu, mask_array_rcu_cb); + + return 0; +} + int ovs_flow_tbl_init(struct flow_table *table) { struct table_instance *ti; + struct mask_array *ma; table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry)); if (!table->mask_cache) return -ENOMEM; + ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); + if (!ma) + goto free_mask_cache; + ti = table_instance_alloc(TBL_MIN_BUCKETS); - if (!ti) { - free_percpu(table->mask_cache); - return -ENOMEM; - } + if (!ti) + goto free_mask_array; rcu_assign_pointer(table->ti, ti); - INIT_LIST_HEAD(&table->mask_list); + rcu_assign_pointer(table->mask_array, ma); table->last_rehash = jiffies; table->count = 0; return 0; + +free_mask_array: + kfree((struct mask_array __force *)table->mask_array); +free_mask_cache: + free_percpu(table->mask_cache); + return -ENOMEM; } static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) @@ -276,6 +334,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table) struct table_instance *ti = (struct table_instance __force *)table->ti; free_percpu(table->mask_cache); + kfree((struct mask_array __force *)table->mask_array); table_instance_destroy(ti, false); } @@ -457,17 +516,27 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, static struct sw_flow *flow_lookup(struct flow_table *tbl, struct table_instance *ti, + struct mask_array *ma, const struct sw_flow_key *key, - u32 *n_mask_hit) + u32 *n_mask_hit, + u32 *index) { - struct sw_flow_mask *mask; struct sw_flow *flow; + int i; - list_for_each_entry_rcu(mask, &tbl->mask_list, list) { - flow = masked_flow_lookup(ti, key, mask, n_mask_hit); - if (flow) /* Found */ - return flow; + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *mask; + + mask = rcu_dereference_ovsl(ma->masks[i]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) { /* Found */ + *index = i; + return flow; + } + } } + return NULL; } @@ -483,6 +552,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, u32 skb_hash, u32 *n_mask_hit) { + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct mask_cache_entry *entries, *ce, *del; struct sw_flow *flow; @@ -490,8 +560,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, int seg; *n_mask_hit = 0; - if (unlikely(!skb_hash)) - return flow_lookup(tbl, ti, key, n_mask_hit); + if (unlikely(!skb_hash)) { + u32 __always_unused mask_index; + + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); + } del = NULL; entries = this_cpu_ptr(tbl->mask_cache); @@ -504,36 +577,33 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, if (ce->skb_hash == skb_hash) { struct sw_flow_mask *mask; - int i; + struct sw_flow *flow; - i = 0; - list_for_each_entry_rcu(mask, &tbl->mask_list, list) { - if (ce->mask_index == i++) { - flow = masked_flow_lookup(ti, key, mask, - n_mask_hit); - if (flow) /* Found */ - return flow; + mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, + n_mask_hit); + if (flow) /* Found */ + return flow; - break; - } } del = ce; break; } - if (!del || (del->skb_hash && !ce->skb_hash)) { + if (!del || (del->skb_hash && !ce->skb_hash) || + (rcu_dereference_ovsl(ma->masks[del->mask_index]) && + !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) { del = ce; } hash >>= MC_HASH_SHIFT; } - flow = flow_lookup(tbl, ti, key, n_mask_hit); - - if (flow) { + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index); + if (flow) del->skb_hash = skb_hash; - del->mask_index = (*n_mask_hit - 1); - } + return flow; } @@ -541,21 +611,20 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, const struct sw_flow_key *key) { struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; + u32 __always_unused index; n_mask_hit = 0; - return flow_lookup(tbl, ti, key, &n_mask_hit); + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); } int ovs_flow_tbl_num_masks(const struct flow_table *table) { - struct sw_flow_mask *mask; - int num = 0; - - list_for_each_entry(mask, &table->mask_list, list) - num++; + struct mask_array *ma; - return num; + ma = rcu_dereference_ovsl(table->mask_array); + return ma->count; } static struct table_instance *table_instance_expand(struct table_instance *ti) @@ -575,7 +644,19 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) mask->ref_count--; if (!mask->ref_count) { - list_del_rcu(&mask->list); + struct mask_array *ma; + int i; + + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + if (mask == ovsl_dereference(ma->masks[i])) { + RCU_INIT_POINTER(ma->masks[i], NULL); + ma->count--; + goto free; + } + } + BUG(); +free: call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); } } @@ -620,13 +701,16 @@ static bool mask_equal(const struct sw_flow_mask *a, static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, const struct sw_flow_mask *mask) { - struct list_head *ml; + struct mask_array *ma; + int i; + + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *t; - list_for_each(ml, &tbl->mask_list) { - struct sw_flow_mask *m; - m = container_of(ml, struct sw_flow_mask, list); - if (mask_equal(mask, m)) - return m; + t = ovsl_dereference(ma->masks[i]); + if (t && mask_equal(mask, t)) + return t; } return NULL; @@ -637,15 +721,43 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, struct sw_flow_mask *new) { struct sw_flow_mask *mask; + mask = flow_mask_find(tbl, new); if (!mask) { + struct mask_array *ma; + int i; + /* Allocate a new mask if none exsits. */ mask = mask_alloc(); if (!mask) return -ENOMEM; + mask->key = new->key; mask->range = new->range; - list_add_tail_rcu(&mask->list, &tbl->mask_list); + + /* Add mask to mask-list. */ + ma = ovsl_dereference(tbl->mask_array); + if (ma->count >= ma->max) { + int err; + + err = tbl_mask_array_realloc(tbl, ma->max + + MASK_ARRAY_SIZE_MIN); + if (err) { + kfree(mask); + return err; + } + ma = ovsl_dereference(tbl->mask_array); + } + for (i = 0; i < ma->max; i++) { + const struct sw_flow_mask *t; + + t = ovsl_dereference(ma->masks[i]); + if (!t) { + rcu_assign_pointer(ma->masks[i], mask); + ma->count++; + break; + } + } } else { BUG_ON(!mask->ref_count); mask->ref_count++; diff --git a/datapath/flow_table.h b/datapath/flow_table.h index 281d1da8d..ee8695371 100644 --- a/datapath/flow_table.h +++ b/datapath/flow_table.h @@ -41,6 +41,12 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_array { + struct rcu_head rcu; + int count, max; + struct sw_flow_mask __rcu *masks[]; +}; + struct table_instance { struct flex_array *buckets; unsigned int n_buckets; @@ -53,7 +59,7 @@ struct table_instance { struct flow_table { struct table_instance __rcu *ti; struct mask_cache_entry __percpu *mask_cache; - struct list_head mask_list; + struct mask_array __rcu *mask_array; unsigned long last_rehash; unsigned int count; };