struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
+ int cpu;
flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
if (!flow)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
flow->mask = NULL;
+ memset(flow->stats, 0, num_possible_cpus() * sizeof(struct sw_flow_stats));
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&flow->stats[cpu].lock);
+
return flow;
}
hash = flow_hash(&masked_key, key_start, key_end);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
- if (flow->mask == mask &&
+ if (flow->mask == mask && flow->hash == hash &&
flow_cmp_masked_key(flow, &masked_key,
key_start, key_end))
return flow;
return NULL;
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
const struct sw_flow_key *key,
u32 *n_mask_hit)
{
return NULL;
}
+struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ u32 __always_unused n_mask_hit;
+
+ return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
+}
+
int ovs_flow_tbl_num_masks(const struct flow_table *table)
{
struct sw_flow_mask *mask;
return NULL;
}
-/**
- * add a new mask into the mask list.
- * The caller needs to make sure that 'mask' is not the same
- * as any masks that are already on the list.
- */
+/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
struct sw_flow_mask *new)
{
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
+ int flow_size;
+
BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
- flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
- 0, NULL);
+ flow_size = sizeof(struct sw_flow) +
+ (num_possible_cpus() * sizeof(struct sw_flow_stats));
+
+ flow_cache = kmem_cache_create("sw_flow", flow_size, 0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;