2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/hash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define REHASH_INTERVAL (10 * 60 * HZ)
52 static struct kmem_cache *flow_cache;
54 static u16 range_n_bytes(const struct sw_flow_key_range *range)
56 return range->end - range->start;
59 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
60 const struct sw_flow_mask *mask)
62 const long *m = (const long *)((const u8 *)&mask->key +
64 const long *s = (const long *)((const u8 *)src +
66 long *d = (long *)((u8 *)dst + mask->range.start);
69 /* The memory outside of the 'mask->range' are not set since
70 * further operations on 'dst' only uses contents within
73 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
77 struct sw_flow *ovs_flow_alloc(bool percpu_stats)
82 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
84 return ERR_PTR(-ENOMEM);
89 flow->stats.is_percpu = percpu_stats;
92 flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
93 if (!flow->stats.stat)
96 spin_lock_init(&flow->stats.stat->lock);
98 flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
99 if (!flow->stats.cpu_stats)
102 for_each_possible_cpu(cpu) {
103 struct flow_stats *cpu_stats;
105 cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
106 spin_lock_init(&cpu_stats->lock);
111 kmem_cache_free(flow_cache, flow);
112 return ERR_PTR(-ENOMEM);
115 int ovs_flow_tbl_count(struct flow_table *table)
120 static struct flex_array *alloc_buckets(unsigned int n_buckets)
122 struct flex_array *buckets;
125 buckets = flex_array_alloc(sizeof(struct hlist_head),
126 n_buckets, GFP_KERNEL);
130 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
132 flex_array_free(buckets);
136 for (i = 0; i < n_buckets; i++)
137 INIT_HLIST_HEAD((struct hlist_head *)
138 flex_array_get(buckets, i));
143 static void flow_free(struct sw_flow *flow)
145 kfree((struct sf_flow_acts __force *)flow->sf_acts);
146 if (flow->stats.is_percpu)
147 free_percpu(flow->stats.cpu_stats);
149 kfree(flow->stats.stat);
150 kmem_cache_free(flow_cache, flow);
153 static void rcu_free_flow_callback(struct rcu_head *rcu)
155 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
160 static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
162 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
167 void ovs_flow_free(struct sw_flow *flow, bool deferred)
173 struct sw_flow_mask *mask = flow->mask;
175 /* ovs-lock is required to protect mask-refcount and
179 BUG_ON(!mask->ref_count);
182 if (!mask->ref_count) {
183 list_del_rcu(&mask->list);
185 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
192 call_rcu(&flow->rcu, rcu_free_flow_callback);
197 static void free_buckets(struct flex_array *buckets)
199 flex_array_free(buckets);
203 static void __table_instance_destroy(struct table_instance *ti)
205 free_buckets(ti->buckets);
209 static struct table_instance *table_instance_alloc(int new_size)
211 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
216 ti->buckets = alloc_buckets(new_size);
222 ti->n_buckets = new_size;
224 ti->keep_flows = false;
225 get_random_bytes(&ti->hash_seed, sizeof(u32));
230 int ovs_flow_tbl_init(struct flow_table *table)
232 struct table_instance *ti;
234 ti = table_instance_alloc(TBL_MIN_BUCKETS);
239 rcu_assign_pointer(table->ti, ti);
240 INIT_LIST_HEAD(&table->mask_list);
241 table->last_rehash = jiffies;
246 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
248 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
250 __table_instance_destroy(ti);
253 static void table_instance_destroy(struct table_instance *ti, bool deferred)
263 for (i = 0; i < ti->n_buckets; i++) {
264 struct sw_flow *flow;
265 struct hlist_head *head = flex_array_get(ti->buckets, i);
266 struct hlist_node *n;
267 int ver = ti->node_ver;
269 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
270 hlist_del_rcu(&flow->hash_node[ver]);
271 ovs_flow_free(flow, deferred);
277 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
279 __table_instance_destroy(ti);
282 void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
284 struct table_instance *ti = ovsl_dereference(table->ti);
286 table_instance_destroy(ti, deferred);
289 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
290 u32 *bucket, u32 *last)
292 struct sw_flow *flow;
293 struct hlist_head *head;
298 while (*bucket < ti->n_buckets) {
300 head = flex_array_get(ti->buckets, *bucket);
301 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
316 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
318 hash = jhash_1word(hash, ti->hash_seed);
319 return flex_array_get(ti->buckets,
320 (hash & (ti->n_buckets - 1)));
323 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
325 struct hlist_head *head;
327 head = find_bucket(ti, flow->hash);
328 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
331 static void flow_table_copy_flows(struct table_instance *old,
332 struct table_instance *new)
337 old_ver = old->node_ver;
338 new->node_ver = !old_ver;
340 /* Insert in new table. */
341 for (i = 0; i < old->n_buckets; i++) {
342 struct sw_flow *flow;
343 struct hlist_head *head;
345 head = flex_array_get(old->buckets, i);
347 hlist_for_each_entry(flow, head, hash_node[old_ver])
348 table_instance_insert(new, flow);
351 old->keep_flows = true;
354 static struct table_instance *table_instance_rehash(struct table_instance *ti,
357 struct table_instance *new_ti;
359 new_ti = table_instance_alloc(n_buckets);
363 flow_table_copy_flows(ti, new_ti);
368 int ovs_flow_tbl_flush(struct flow_table *flow_table)
370 struct table_instance *old_ti;
371 struct table_instance *new_ti;
373 old_ti = ovsl_dereference(flow_table->ti);
374 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
378 rcu_assign_pointer(flow_table->ti, new_ti);
379 flow_table->last_rehash = jiffies;
380 flow_table->count = 0;
382 table_instance_destroy(old_ti, true);
386 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
389 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
390 int hash_u32s = (key_end - key_start) >> 2;
392 /* Make sure number of hash bytes are multiple of u32. */
393 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
395 return arch_fast_hash2(hash_key, hash_u32s, 0);
398 static int flow_key_start(const struct sw_flow_key *key)
400 if (key->tun_key.ipv4_dst)
403 return rounddown(offsetof(struct sw_flow_key, phy),
407 static bool cmp_key(const struct sw_flow_key *key1,
408 const struct sw_flow_key *key2,
409 int key_start, int key_end)
411 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
412 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
416 for (i = key_start; i < key_end; i += sizeof(long))
417 diffs |= *cp1++ ^ *cp2++;
422 static bool flow_cmp_masked_key(const struct sw_flow *flow,
423 const struct sw_flow_key *key,
424 int key_start, int key_end)
426 return cmp_key(&flow->key, key, key_start, key_end);
429 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
430 struct sw_flow_match *match)
432 struct sw_flow_key *key = match->key;
433 int key_start = flow_key_start(key);
434 int key_end = match->range.end;
436 return cmp_key(&flow->unmasked_key, key, key_start, key_end);
439 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
440 const struct sw_flow_key *unmasked,
441 struct sw_flow_mask *mask)
443 struct sw_flow *flow;
444 struct hlist_head *head;
445 int key_start = mask->range.start;
446 int key_end = mask->range.end;
448 struct sw_flow_key masked_key;
450 ovs_flow_mask_key(&masked_key, unmasked, mask);
451 hash = flow_hash(&masked_key, key_start, key_end);
452 head = find_bucket(ti, hash);
453 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
454 if (flow->mask == mask && flow->hash == hash &&
455 flow_cmp_masked_key(flow, &masked_key,
462 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
463 const struct sw_flow_key *key,
466 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
467 struct sw_flow_mask *mask;
468 struct sw_flow *flow;
471 list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
473 flow = masked_flow_lookup(ti, key, mask);
474 if (flow) /* Found */
480 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
481 const struct sw_flow_key *key)
483 u32 __always_unused n_mask_hit;
485 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
488 int ovs_flow_tbl_num_masks(const struct flow_table *table)
490 struct sw_flow_mask *mask;
493 list_for_each_entry(mask, &table->mask_list, list)
499 static struct table_instance *table_instance_expand(struct table_instance *ti)
501 return table_instance_rehash(ti, ti->n_buckets * 2);
504 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
506 struct table_instance *ti = ovsl_dereference(table->ti);
508 BUG_ON(table->count == 0);
509 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
513 static struct sw_flow_mask *mask_alloc(void)
515 struct sw_flow_mask *mask;
517 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
524 static bool mask_equal(const struct sw_flow_mask *a,
525 const struct sw_flow_mask *b)
527 const u8 *a_ = (const u8 *)&a->key + a->range.start;
528 const u8 *b_ = (const u8 *)&b->key + b->range.start;
530 return (a->range.end == b->range.end)
531 && (a->range.start == b->range.start)
532 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
535 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
536 const struct sw_flow_mask *mask)
538 struct list_head *ml;
540 list_for_each(ml, &tbl->mask_list) {
541 struct sw_flow_mask *m;
542 m = container_of(ml, struct sw_flow_mask, list);
543 if (mask_equal(mask, m))
550 /* Add 'mask' into the mask list, if it is not already there. */
551 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
552 struct sw_flow_mask *new)
554 struct sw_flow_mask *mask;
555 mask = flow_mask_find(tbl, new);
557 /* Allocate a new mask if none exsits. */
561 mask->key = new->key;
562 mask->range = new->range;
563 list_add_rcu(&mask->list, &tbl->mask_list);
565 BUG_ON(!mask->ref_count);
573 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
574 struct sw_flow_mask *mask)
576 struct table_instance *new_ti = NULL;
577 struct table_instance *ti;
580 err = flow_mask_insert(table, flow, mask);
584 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
585 flow->mask->range.end);
586 ti = ovsl_dereference(table->ti);
587 table_instance_insert(ti, flow);
590 /* Expand table, if necessary, to make room. */
591 if (table->count > ti->n_buckets)
592 new_ti = table_instance_expand(ti);
593 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
594 new_ti = table_instance_rehash(ti, ti->n_buckets);
597 rcu_assign_pointer(table->ti, new_ti);
598 table_instance_destroy(ti, true);
599 table->last_rehash = jiffies;
604 /* Initializes the flow module.
605 * Returns zero if successful or a negative error code. */
606 int ovs_flow_init(void)
608 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
609 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
611 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
613 if (flow_cache == NULL)
619 /* Uninitializes the flow module. */
620 void ovs_flow_exit(void)
622 kmem_cache_destroy(flow_cache);