{
struct sw_flow_actions *sfa;
- if (n_actions > (PAGE_SIZE - sizeof *sfa) / sizeof(union odp_action))
+ /* At least DP_MAX_PORTS actions are required to be able to flood a
+ * packet to every port. Factor of 2 allows for setting VLAN tags,
+ * etc. */
+ if (n_actions > 2 * DP_MAX_PORTS)
return ERR_PTR(-EINVAL);
sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union odp_action),
return sfa;
}
-
-/* Frees 'flow' immediately. */
-static void flow_free(struct sw_flow *flow)
+struct sw_flow *flow_alloc(void)
{
- if (unlikely(!flow))
- return;
- kfree(flow->sf_acts);
- kmem_cache_free(flow_cache, flow);
+ struct sw_flow *flow;
+
+ flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&flow->lock);
+ atomic_set(&flow->refcnt, 1);
+ flow->dead = false;
+
+ return flow;
}
void flow_free_tbl(struct tbl_node *node)
{
struct sw_flow *flow = flow_cast(node);
- flow_free(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* RCU callback used by flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- flow_free(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
+void flow_hold(struct sw_flow *flow)
+{
+ atomic_inc(&flow->refcnt);
+}
+
+void flow_put(struct sw_flow *flow)
+{
+ if (unlikely(!flow))
+ return;
+
+ if (atomic_dec_and_test(&flow->refcnt)) {
+ kfree(flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+ }
+}
+
/* RCU callback used by flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{