When flow table is copied, the mask list from the old table
is not properly copied into the new table. The corrupted mask
list in the new table will lead to kernel crash. This patch
fixes this bug.
Bug #18110
Reported-by: Justin Pettit <jpettit@nicira.com>
Signed-off-by: Andy Zhou <azhou@nicira.com>
Signed-off-by: Jesse Gross <jesse@nicira.com>
flex_array_free(buckets);
}
flex_array_free(buckets);
}
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
+static struct flow_table *__flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
- INIT_LIST_HEAD(&table->mask_list);
+ table->mask_list = NULL;
+ BUG_ON(!list_empty(table->mask_list));
+ kfree(table->mask_list);
+
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+ struct flow_table *table = __flow_tbl_alloc(new_size);
+
+ if (!table)
+ return NULL;
+
+ table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!table->mask_list) {
+ table->keep_flows = true;
+ __flow_tbl_destroy(table);
+ return NULL;
+ }
+ INIT_LIST_HEAD(table->mask_list);
+
+ return table;
+}
+
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
{
struct flow_table *new_table;
{
struct flow_table *new_table;
- new_table = ovs_flow_tbl_alloc(n_buckets);
+ new_table = __flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
if (!new_table)
return ERR_PTR(-ENOMEM);
struct sw_flow *flow = NULL;
struct sw_flow_mask *mask;
struct sw_flow *flow = NULL;
struct sw_flow_mask *mask;
- list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
+ list_for_each_entry_rcu(mask, tbl->mask_list, list) {
flow = ovs_masked_flow_lookup(tbl, key, mask);
if (flow) /* Found */
break;
flow = ovs_masked_flow_lookup(tbl, key, mask);
if (flow) /* Found */
break;
- list_for_each(ml, &tbl->mask_list) {
+ list_for_each(ml, tbl->mask_list) {
struct sw_flow_mask *m;
m = container_of(ml, struct sw_flow_mask, list);
if (ovs_sw_flow_mask_equal(mask, m))
struct sw_flow_mask *m;
m = container_of(ml, struct sw_flow_mask, list);
if (ovs_sw_flow_mask_equal(mask, m))
*/
void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
{
*/
void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
{
- list_add_rcu(&mask->list, &tbl->mask_list);
+ list_add_rcu(&mask->list, tbl->mask_list);
struct flex_array *buckets;
unsigned int count, n_buckets;
struct rcu_head rcu;
struct flex_array *buckets;
unsigned int count, n_buckets;
struct rcu_head rcu;
- struct list_head mask_list;
+ struct list_head *mask_list;
int node_ver;
u32 hash_seed;
bool keep_flows;
int node_ver;
u32 hash_seed;
bool keep_flows;