X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Fflow_table.c;h=c8bd9d1eddc4d8c663f74585b50e75bc22a49b09;hb=003ce655b7116d18c86a74c50391e54990346931;hp=e8e8511be909d6ad90f223497af6aa8dfe1b63c0;hpb=a097c0b230333cbc038be4ec67bf34b5b26c5112;p=sliver-openvswitch.git diff --git a/datapath/flow_table.c b/datapath/flow_table.c index e8e8511be..c8bd9d1ed 100644 --- a/datapath/flow_table.c +++ b/datapath/flow_table.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include #include @@ -46,7 +46,16 @@ #include "vlan.h" +#define TBL_MIN_BUCKETS 1024 +#define MASK_ARRAY_SIZE_MIN 16 +#define REHASH_INTERVAL (10 * 60 * HZ) + +#define MC_HASH_SHIFT 8 +#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT) +#define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) + static struct kmem_cache *flow_cache; +struct kmem_cache *flow_stats_cache __read_mostly; static u16 range_n_bytes(const struct sw_flow_key_range *range) { @@ -56,8 +65,10 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, const struct sw_flow_mask *mask) { - const long *m = (long *)((u8 *)&mask->key + mask->range.start); - const long *s = (long *)((u8 *)src + mask->range.start); + const long *m = (const long *)((const u8 *)&mask->key + + mask->range.start); + const long *s = (const long *)((const u8 *)src + + mask->range.start); long *d = (long *)((u8 *)dst + mask->range.start); int i; @@ -72,16 +83,40 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, struct sw_flow *ovs_flow_alloc(void) { struct sw_flow *flow; + struct flow_stats *stats; + int node; flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); if (!flow) return ERR_PTR(-ENOMEM); - spin_lock_init(&flow->lock); flow->sf_acts = NULL; flow->mask = NULL; + flow->stats_last_writer = NUMA_NO_NODE; + + /* Initialize the default stat node. */ + stats = kmem_cache_alloc_node(flow_stats_cache, + GFP_KERNEL | __GFP_ZERO, 0); + if (!stats) + goto err; + + spin_lock_init(&stats->lock); + + RCU_INIT_POINTER(flow->stats[0], stats); + + for_each_node(node) + if (node != 0) + RCU_INIT_POINTER(flow->stats[node], NULL); return flow; +err: + kmem_cache_free(flow_cache, flow); + return ERR_PTR(-ENOMEM); +} + +int ovs_flow_tbl_count(struct flow_table *table) +{ + return table->count; } static struct flex_array *alloc_buckets(unsigned int n_buckets) @@ -109,7 +144,13 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) static void flow_free(struct sw_flow *flow) { - kfree((struct sf_flow_acts __force *)flow->sf_acts); + int node; + + kfree((struct sw_flow_actions __force *)flow->sf_acts); + for_each_node(node) + if (flow->stats[node]) + kmem_cache_free(flow_stats_cache, + (struct flow_stats __force *)flow->stats[node]); kmem_cache_free(flow_cache, flow); } @@ -120,13 +161,18 @@ static void rcu_free_flow_callback(struct rcu_head *rcu) flow_free(flow); } +static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) +{ + struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); + + kfree(mask); +} + void ovs_flow_free(struct sw_flow *flow, bool deferred) { if (!flow) return; - ovs_sw_flow_mask_del_ref(flow->mask, deferred); - if (deferred) call_rcu(&flow->rcu, rcu_free_flow_callback); else @@ -138,93 +184,161 @@ static void free_buckets(struct flex_array *buckets) flex_array_free(buckets); } -static void __flow_tbl_destroy(struct flow_table *table) + +static void __table_instance_destroy(struct table_instance *ti) { - int i; + free_buckets(ti->buckets); + kfree(ti); +} - if (table->keep_flows) - goto skip_flows; +static struct table_instance *table_instance_alloc(int new_size) +{ + struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); - for (i = 0; i < table->n_buckets; i++) { - struct sw_flow *flow; - struct hlist_head *head = flex_array_get(table->buckets, i); - struct hlist_node *n; - int ver = table->node_ver; + if (!ti) + return NULL; - hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { - hlist_del(&flow->hash_node[ver]); - ovs_flow_free(flow, false); - } + ti->buckets = alloc_buckets(new_size); + + if (!ti->buckets) { + kfree(ti); + return NULL; } + ti->n_buckets = new_size; + ti->node_ver = 0; + ti->keep_flows = false; + get_random_bytes(&ti->hash_seed, sizeof(u32)); - BUG_ON(!list_empty(table->mask_list)); - kfree(table->mask_list); + return ti; +} -skip_flows: - free_buckets(table->buckets); - kfree(table); +static void mask_array_rcu_cb(struct rcu_head *rcu) +{ + struct mask_array *ma = container_of(rcu, struct mask_array, rcu); + + kfree(ma); } -static struct flow_table *__flow_tbl_alloc(int new_size) +static struct mask_array *tbl_mask_array_alloc(int size) { - struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); + struct mask_array *new; - if (!table) + new = kzalloc(sizeof(struct mask_array) + + sizeof(struct sw_flow_mask *) * size, GFP_KERNEL); + if (!new) return NULL; - table->buckets = alloc_buckets(new_size); + new->count = 0; + new->max = size; - if (!table->buckets) { - kfree(table); - return NULL; + return new; +} + +static int tbl_mask_array_realloc(struct flow_table *tbl, int size) +{ + struct mask_array *old; + struct mask_array *new; + + new = tbl_mask_array_alloc(size); + if (!new) + return -ENOMEM; + + old = ovsl_dereference(tbl->mask_array); + if (old) { + int i; + + for (i = 0; i < old->max; i++) { + if (old->masks[i]) + new->masks[new->count++] = old->masks[i]; + } } - table->n_buckets = new_size; - table->count = 0; - table->node_ver = 0; - table->keep_flows = false; - get_random_bytes(&table->hash_seed, sizeof(u32)); - table->mask_list = NULL; + rcu_assign_pointer(tbl->mask_array, new); + + if (old) + call_rcu(&old->rcu, mask_array_rcu_cb); - return table; + return 0; } -struct flow_table *ovs_flow_tbl_alloc(int new_size) +int ovs_flow_tbl_init(struct flow_table *table) { - struct flow_table *table = __flow_tbl_alloc(new_size); + struct table_instance *ti; + struct mask_array *ma; - if (!table) - return NULL; + table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * + MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry)); + if (!table->mask_cache) + return -ENOMEM; - table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); - if (!table->mask_list) { - table->keep_flows = true; - __flow_tbl_destroy(table); - return NULL; - } - INIT_LIST_HEAD(table->mask_list); + ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); + if (!ma) + goto free_mask_cache; + + ti = table_instance_alloc(TBL_MIN_BUCKETS); + if (!ti) + goto free_mask_array; + + rcu_assign_pointer(table->ti, ti); + rcu_assign_pointer(table->mask_array, ma); + table->last_rehash = jiffies; + table->count = 0; + return 0; - return table; +free_mask_array: + kfree((struct mask_array __force *)table->mask_array); +free_mask_cache: + free_percpu(table->mask_cache); + return -ENOMEM; } static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) { - struct flow_table *table = container_of(rcu, struct flow_table, rcu); + struct table_instance *ti = container_of(rcu, struct table_instance, rcu); - __flow_tbl_destroy(table); + __table_instance_destroy(ti); } -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) +static void table_instance_destroy(struct table_instance *ti, bool deferred) { - if (!table) + int i; + + if (!ti) return; + if (ti->keep_flows) + goto skip_flows; + + for (i = 0; i < ti->n_buckets; i++) { + struct sw_flow *flow; + struct hlist_head *head = flex_array_get(ti->buckets, i); + struct hlist_node *n; + int ver = ti->node_ver; + + hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { + hlist_del_rcu(&flow->hash_node[ver]); + ovs_flow_free(flow, deferred); + } + } + +skip_flows: if (deferred) - call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); + call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); else - __flow_tbl_destroy(table); + __table_instance_destroy(ti); +} + +/* No need for locking this function is called from RCU callback or + * error path. */ +void ovs_flow_tbl_destroy(struct flow_table *table) +{ + struct table_instance *ti = (struct table_instance __force *)table->ti; + + free_percpu(table->mask_cache); + kfree((struct mask_array __force *)table->mask_array); + table_instance_destroy(ti, false); } -struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, +struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, u32 *bucket, u32 *last) { struct sw_flow *flow; @@ -232,10 +346,10 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, int ver; int i; - ver = table->node_ver; - while (*bucket < table->n_buckets) { + ver = ti->node_ver; + while (*bucket < ti->n_buckets) { i = 0; - head = flex_array_get(table->buckets, *bucket); + head = flex_array_get(ti->buckets, *bucket); hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { if (i < *last) { i++; @@ -251,25 +365,23 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct flow_table *table, return NULL; } -static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) +static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) { - hash = jhash_1word(hash, table->hash_seed); - return flex_array_get(table->buckets, - (hash & (table->n_buckets - 1))); + hash = jhash_1word(hash, ti->hash_seed); + return flex_array_get(ti->buckets, + (hash & (ti->n_buckets - 1))); } -static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) +static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) { struct hlist_head *head; - head = find_bucket(table, flow->hash); - hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); - - table->count++; + head = find_bucket(ti, flow->hash); + hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); } -static void flow_table_copy_flows(struct flow_table *old, - struct flow_table *new) +static void flow_table_copy_flows(struct table_instance *old, + struct table_instance *new) { int old_ver; int i; @@ -285,47 +397,54 @@ static void flow_table_copy_flows(struct flow_table *old, head = flex_array_get(old->buckets, i); hlist_for_each_entry(flow, head, hash_node[old_ver]) - __tbl_insert(new, flow); + table_instance_insert(new, flow); } - new->mask_list = old->mask_list; old->keep_flows = true; } -static struct flow_table *__flow_tbl_rehash(struct flow_table *table, +static struct table_instance *table_instance_rehash(struct table_instance *ti, int n_buckets) { - struct flow_table *new_table; + struct table_instance *new_ti; - new_table = __flow_tbl_alloc(n_buckets); - if (!new_table) - return ERR_PTR(-ENOMEM); + new_ti = table_instance_alloc(n_buckets); + if (!new_ti) + return NULL; - flow_table_copy_flows(table, new_table); + flow_table_copy_flows(ti, new_ti); - return new_table; + return new_ti; } -struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) +int ovs_flow_tbl_flush(struct flow_table *flow_table) { - return __flow_tbl_rehash(table, table->n_buckets); -} + struct table_instance *old_ti; + struct table_instance *new_ti; -struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) -{ - return __flow_tbl_rehash(table, table->n_buckets * 2); + old_ti = ovsl_dereference(flow_table->ti); + new_ti = table_instance_alloc(TBL_MIN_BUCKETS); + if (!new_ti) + return -ENOMEM; + + rcu_assign_pointer(flow_table->ti, new_ti); + flow_table->last_rehash = jiffies; + flow_table->count = 0; + + table_instance_destroy(old_ti, true); + return 0; } static u32 flow_hash(const struct sw_flow_key *key, int key_start, int key_end) { - u32 *hash_key = (u32 *)((u8 *)key + key_start); + const u32 *hash_key = (const u32 *)((const u8 *)key + key_start); int hash_u32s = (key_end - key_start) >> 2; /* Make sure number of hash bytes are multiple of u32. */ BUILD_BUG_ON(sizeof(long) % sizeof(u32)); - return jhash2(hash_key, hash_u32s, 0); + return arch_fast_hash2(hash_key, hash_u32s, 0); } static int flow_key_start(const struct sw_flow_key *key) @@ -341,8 +460,8 @@ static bool cmp_key(const struct sw_flow_key *key1, const struct sw_flow_key *key2, int key_start, int key_end) { - const long *cp1 = (long *)((u8 *)key1 + key_start); - const long *cp2 = (long *)((u8 *)key2 + key_start); + const long *cp1 = (const long *)((const u8 *)key1 + key_start); + const long *cp2 = (const long *)((const u8 *)key2 + key_start); long diffs = 0; int i; @@ -369,9 +488,10 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, return cmp_key(&flow->unmasked_key, key, key_start, key_end); } -static struct sw_flow *masked_flow_lookup(struct flow_table *table, +static struct sw_flow *masked_flow_lookup(struct table_instance *ti, const struct sw_flow_key *unmasked, - struct sw_flow_mask *mask) + struct sw_flow_mask *mask, + u32 *n_mask_hit) { struct sw_flow *flow; struct hlist_head *head; @@ -382,9 +502,10 @@ static struct sw_flow *masked_flow_lookup(struct flow_table *table, ovs_flow_mask_key(&masked_key, unmasked, mask); hash = flow_hash(&masked_key, key_start, key_end); - head = find_bucket(table, hash); - hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { - if (flow->mask == mask && + head = find_bucket(ti, hash); + (*n_mask_hit)++; + hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { + if (flow->mask == mask && flow->hash == hash && flow_cmp_masked_key(flow, &masked_key, key_start, key_end)) return flow; @@ -392,109 +513,290 @@ static struct sw_flow *masked_flow_lookup(struct flow_table *table, return NULL; } -struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, - const struct sw_flow_key *key) + +static struct sw_flow *flow_lookup(struct flow_table *tbl, + struct table_instance *ti, + struct mask_array *ma, + const struct sw_flow_key *key, + u32 *n_mask_hit, + u32 *index) { - struct sw_flow *flow = NULL; - struct sw_flow_mask *mask; + struct sw_flow *flow; + int i; - list_for_each_entry_rcu(mask, tbl->mask_list, list) { - flow = masked_flow_lookup(tbl, key, mask); - if (flow) /* Found */ + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *mask; + + mask = rcu_dereference_ovsl(ma->masks[i]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, n_mask_hit); + if (flow) { /* Found */ + *index = i; + return flow; + } + } + } + + return NULL; +} + +/* + * mask_cache maps flow to probable mask. This cache is not tightly + * coupled cache, It means updates to mask list can result in inconsistent + * cache entry in mask cache. + * This is per cpu cache and is divided in MC_HASH_SEGS segments. + * In case of a hash collision the entry is hashed in next segment. + * */ +struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, + const struct sw_flow_key *key, + u32 skb_hash, + u32 *n_mask_hit) +{ + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); + struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct mask_cache_entry *entries, *ce, *del; + struct sw_flow *flow; + u32 hash = skb_hash; + int seg; + + *n_mask_hit = 0; + if (unlikely(!skb_hash)) { + u32 __always_unused mask_index; + + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); + } + + del = NULL; + entries = this_cpu_ptr(tbl->mask_cache); + + for (seg = 0; seg < MC_HASH_SEGS; seg++) { + int index; + + index = hash & (MC_HASH_ENTRIES - 1); + ce = &entries[index]; + + if (ce->skb_hash == skb_hash) { + struct sw_flow_mask *mask; + struct sw_flow *flow; + + mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]); + if (mask) { + flow = masked_flow_lookup(ti, key, mask, + n_mask_hit); + if (flow) /* Found */ + return flow; + + } + del = ce; break; + } + + if (!del || (del->skb_hash && !ce->skb_hash) || + (rcu_dereference_ovsl(ma->masks[del->mask_index]) && + !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) { + del = ce; + } + + hash >>= MC_HASH_SHIFT; } + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index); + if (flow) + del->skb_hash = skb_hash; + return flow; } -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) +struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, + const struct sw_flow_key *key) { - flow->hash = flow_hash(&flow->key, flow->mask->range.start, - flow->mask->range.end); - __tbl_insert(table, flow); -} + struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); + struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); + u32 __always_unused n_mask_hit; + u32 __always_unused index; -void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) -{ - BUG_ON(table->count == 0); - hlist_del_rcu(&flow->hash_node[table->node_ver]); - table->count--; + n_mask_hit = 0; + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); } -struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) +int ovs_flow_tbl_num_masks(const struct flow_table *table) { - struct sw_flow_mask *mask; + struct mask_array *ma; - mask = kmalloc(sizeof(*mask), GFP_KERNEL); - if (mask) - mask->ref_count = 0; + ma = rcu_dereference_ovsl(table->mask_array); + return ma->count; +} - return mask; +static struct table_instance *table_instance_expand(struct table_instance *ti) +{ + return table_instance_rehash(ti, ti->n_buckets * 2); } -void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) +/* Remove 'mask' from the mask list, if it is not needed any more. */ +static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) { - mask->ref_count++; + if (mask) { + /* ovs-lock is required to protect mask-refcount and + * mask list. + */ + ASSERT_OVSL(); + BUG_ON(!mask->ref_count); + mask->ref_count--; + + if (!mask->ref_count) { + struct mask_array *ma; + int i; + + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + if (mask == ovsl_dereference(ma->masks[i])) { + RCU_INIT_POINTER(ma->masks[i], NULL); + ma->count--; + goto free; + } + } + BUG(); +free: + call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); + } + } } -static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) +/* Must be called with OVS mutex held. */ +void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { - struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); + struct table_instance *ti = ovsl_dereference(table->ti); - kfree(mask); + BUG_ON(table->count == 0); + hlist_del_rcu(&flow->hash_node[ti->node_ver]); + table->count--; + + /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be + * accessible as long as the RCU read lock is held. */ + flow_mask_remove(table, flow->mask); } -void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) +static struct sw_flow_mask *mask_alloc(void) { - if (!mask) - return; + struct sw_flow_mask *mask; - BUG_ON(!mask->ref_count); - mask->ref_count--; + mask = kmalloc(sizeof(*mask), GFP_KERNEL); + if (mask) + mask->ref_count = 1; - if (!mask->ref_count) { - list_del_rcu(&mask->list); - if (deferred) - call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); - else - kfree(mask); - } + return mask; } static bool mask_equal(const struct sw_flow_mask *a, const struct sw_flow_mask *b) { - u8 *a_ = (u8 *)&a->key + a->range.start; - u8 *b_ = (u8 *)&b->key + b->range.start; + const u8 *a_ = (const u8 *)&a->key + a->range.start; + const u8 *b_ = (const u8 *)&b->key + b->range.start; return (a->range.end == b->range.end) && (a->range.start == b->range.start) && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); } -struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, +static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, const struct sw_flow_mask *mask) { - struct list_head *ml; + struct mask_array *ma; + int i; - list_for_each(ml, tbl->mask_list) { - struct sw_flow_mask *m; - m = container_of(ml, struct sw_flow_mask, list); - if (mask_equal(mask, m)) - return m; + ma = ovsl_dereference(tbl->mask_array); + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *t; + + t = ovsl_dereference(ma->masks[i]); + if (t && mask_equal(mask, t)) + return t; } return NULL; } -/** - * add a new mask into the mask list. - * The caller needs to make sure that 'mask' is not the same - * as any masks that are already on the list. - */ -void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) +/* Add 'mask' into the mask list, if it is not already there. */ +static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, + struct sw_flow_mask *new) { - list_add_rcu(&mask->list, tbl->mask_list); + struct sw_flow_mask *mask; + + mask = flow_mask_find(tbl, new); + if (!mask) { + struct mask_array *ma; + int i; + + /* Allocate a new mask if none exsits. */ + mask = mask_alloc(); + if (!mask) + return -ENOMEM; + + mask->key = new->key; + mask->range = new->range; + + /* Add mask to mask-list. */ + ma = ovsl_dereference(tbl->mask_array); + if (ma->count >= ma->max) { + int err; + + err = tbl_mask_array_realloc(tbl, ma->max + + MASK_ARRAY_SIZE_MIN); + if (err) { + kfree(mask); + return err; + } + ma = ovsl_dereference(tbl->mask_array); + } + for (i = 0; i < ma->max; i++) { + const struct sw_flow_mask *t; + + t = ovsl_dereference(ma->masks[i]); + if (!t) { + rcu_assign_pointer(ma->masks[i], mask); + ma->count++; + break; + } + } + } else { + BUG_ON(!mask->ref_count); + mask->ref_count++; + } + + flow->mask = mask; + return 0; +} + +/* Must be called with OVS mutex held. */ +int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, + struct sw_flow_mask *mask) +{ + struct table_instance *new_ti = NULL; + struct table_instance *ti; + int err; + + err = flow_mask_insert(table, flow, mask); + if (err) + return err; + + flow->hash = flow_hash(&flow->key, flow->mask->range.start, + flow->mask->range.end); + ti = ovsl_dereference(table->ti); + table_instance_insert(ti, flow); + table->count++; + + /* Expand table, if necessary, to make room. */ + if (table->count > ti->n_buckets) + new_ti = table_instance_expand(ti); + else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) + new_ti = table_instance_rehash(ti, ti->n_buckets); + + if (new_ti) { + rcu_assign_pointer(table->ti, new_ti); + table_instance_destroy(ti, true); + table->last_rehash = jiffies; + } + return 0; } /* Initializes the flow module. @@ -504,16 +806,28 @@ int ovs_flow_init(void) BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); - flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, - 0, NULL); + flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) + + (num_possible_nodes() + * sizeof(struct flow_stats *)), + 0, 0, NULL); if (flow_cache == NULL) return -ENOMEM; + flow_stats_cache + = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (flow_stats_cache == NULL) { + kmem_cache_destroy(flow_cache); + flow_cache = NULL; + return -ENOMEM; + } + return 0; } /* Uninitializes the flow module. */ void ovs_flow_exit(void) { + kmem_cache_destroy(flow_stats_cache); kmem_cache_destroy(flow_cache); }