datapath: Move flow allocation into a function.
authorJesse Gross <jesse@nicira.com>
Tue, 27 Jul 2010 01:46:27 +0000 (18:46 -0700)
committerJesse Gross <jesse@nicira.com>
Wed, 22 Sep 2010 20:43:01 +0000 (13:43 -0700)
As the process to allocate a flow becomes more involved it becomes
more cumbersome for the code to be mixed in with the general
datapath so split it out into a new function.

Signed-off-by: Jesse Gross <jesse@nicira.com>
Reviewed-by: Ben Pfaff <blp@nicira.com>
datapath/datapath.c
datapath/flow.c
datapath/flow.h

index b3f77b3..1677927 100644 (file)
@@ -1049,12 +1049,12 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
                }
 
                /* Allocate flow. */
-               error = -ENOMEM;
-               flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
-               if (flow == NULL)
+               flow = flow_alloc();
+               if (IS_ERR(flow)) {
+                       error = PTR_ERR(flow);
                        goto error;
+               }
                flow->key = uf->flow.key;
-               spin_lock_init(&flow->lock);
                clear_stats(flow);
 
                /* Obtain actions. */
@@ -1109,7 +1109,7 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
 error_free_flow_acts:
        kfree(flow->sf_acts);
 error_free_flow:
-       kmem_cache_free(flow_cache, flow);
+       flow_free(flow);
 error:
        return error;
 }
index 7684c06..1f01166 100644 (file)
@@ -123,27 +123,45 @@ struct sw_flow_actions *flow_actions_alloc(size_t n_actions)
        return sfa;
 }
 
+struct sw_flow *flow_alloc(void)
+{
+       struct sw_flow *flow;
+
+       flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+       if (!flow)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&flow->lock);
 
-/* Frees 'flow' immediately. */
-static void flow_free(struct sw_flow *flow)
+       return flow;
+}
+
+void flow_free(struct sw_flow *flow)
 {
        if (unlikely(!flow))
                return;
-       kfree(flow->sf_acts);
+
        kmem_cache_free(flow_cache, flow);
 }
 
+/* Frees the entire 'flow' (both base and actions) immediately. */
+static void flow_free_full(struct sw_flow *flow)
+{
+       kfree(flow->sf_acts);
+       flow_free(flow);
+}
+
 void flow_free_tbl(struct tbl_node *node)
 {
        struct sw_flow *flow = flow_cast(node);
-       flow_free(flow);
+       flow_free_full(flow);
 }
 
 /* RCU callback used by flow_deferred_free. */
 static void rcu_free_flow_callback(struct rcu_head *rcu)
 {
        struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
-       flow_free(flow);
+       flow_free_full(flow);
 }
 
 /* Schedules 'flow' to be freed after the next RCU grace period.
index 80a5b66..484ca12 100644 (file)
@@ -58,20 +58,22 @@ struct arp_eth_header
        unsigned char       ar_tip[4];          /* target IP address        */
 } __attribute__((packed));
 
-extern struct kmem_cache *flow_cache;
+int flow_init(void);
+void flow_exit(void);
 
-struct sw_flow_actions *flow_actions_alloc(size_t n_actions);
+struct sw_flow *flow_alloc(void);
+void flow_free(struct sw_flow *flow);
 void flow_deferred_free(struct sw_flow *);
+void flow_free_tbl(struct tbl_node *);
+
+struct sw_flow_actions *flow_actions_alloc(size_t n_actions);
 void flow_deferred_free_acts(struct sw_flow_actions *);
+
 int flow_extract(struct sk_buff *, u16 in_port, struct odp_flow_key *);
 void flow_used(struct sw_flow *, struct sk_buff *);
 
 u32 flow_hash(const struct odp_flow_key *key);
 int flow_cmp(const struct tbl_node *, void *target);
-void flow_free_tbl(struct tbl_node *);
-
-int flow_init(void);
-void flow_exit(void);
 
 static inline struct sw_flow *flow_cast(const struct tbl_node *node)
 {