2 * Copyright (c) 2007-2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/uaccess.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <net/llc_pdu.h>
27 #include <linux/kernel.h>
28 #include <linux/hash.h>
29 #include <linux/jiffies.h>
30 #include <linux/llc.h>
31 #include <linux/module.h>
33 #include <linux/rcupdate.h>
34 #include <linux/if_arp.h>
36 #include <linux/ipv6.h>
37 #include <linux/sctp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
45 #include <net/ndisc.h>
49 #define TBL_MIN_BUCKETS 1024
50 #define MASK_ARRAY_SIZE_MIN 16
51 #define REHASH_INTERVAL (10 * 60 * HZ)
53 #define MC_HASH_SHIFT 8
54 #define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT)
55 #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
57 static struct kmem_cache *flow_cache;
58 struct kmem_cache *flow_stats_cache __read_mostly;
60 static u16 range_n_bytes(const struct sw_flow_key_range *range)
62 return range->end - range->start;
65 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
66 const struct sw_flow_mask *mask)
68 const long *m = (const long *)((const u8 *)&mask->key +
70 const long *s = (const long *)((const u8 *)src +
72 long *d = (long *)((u8 *)dst + mask->range.start);
75 /* The memory outside of the 'mask->range' are not set since
76 * further operations on 'dst' only uses contents within
79 for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
83 struct sw_flow *ovs_flow_alloc(void)
86 struct flow_stats *stats;
89 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
91 return ERR_PTR(-ENOMEM);
95 flow->stats_last_writer = NUMA_NO_NODE;
97 /* Initialize the default stat node. */
98 stats = kmem_cache_alloc_node(flow_stats_cache,
99 GFP_KERNEL | __GFP_ZERO, 0);
103 spin_lock_init(&stats->lock);
105 RCU_INIT_POINTER(flow->stats[0], stats);
109 RCU_INIT_POINTER(flow->stats[node], NULL);
113 kmem_cache_free(flow_cache, flow);
114 return ERR_PTR(-ENOMEM);
117 int ovs_flow_tbl_count(struct flow_table *table)
122 static struct flex_array *alloc_buckets(unsigned int n_buckets)
124 struct flex_array *buckets;
127 buckets = flex_array_alloc(sizeof(struct hlist_head),
128 n_buckets, GFP_KERNEL);
132 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
134 flex_array_free(buckets);
138 for (i = 0; i < n_buckets; i++)
139 INIT_HLIST_HEAD((struct hlist_head *)
140 flex_array_get(buckets, i));
145 static void flow_free(struct sw_flow *flow)
149 kfree((struct sw_flow_actions __force *)flow->sf_acts);
151 if (flow->stats[node])
152 kmem_cache_free(flow_stats_cache,
153 (struct flow_stats __force *)flow->stats[node]);
154 kmem_cache_free(flow_cache, flow);
157 static void rcu_free_flow_callback(struct rcu_head *rcu)
159 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
164 static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu)
166 struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu);
171 void ovs_flow_free(struct sw_flow *flow, bool deferred)
177 call_rcu(&flow->rcu, rcu_free_flow_callback);
182 static void free_buckets(struct flex_array *buckets)
184 flex_array_free(buckets);
188 static void __table_instance_destroy(struct table_instance *ti)
190 free_buckets(ti->buckets);
194 static struct table_instance *table_instance_alloc(int new_size)
196 struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
201 ti->buckets = alloc_buckets(new_size);
207 ti->n_buckets = new_size;
209 ti->keep_flows = false;
210 get_random_bytes(&ti->hash_seed, sizeof(u32));
215 static void mask_array_rcu_cb(struct rcu_head *rcu)
217 struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
222 static struct mask_array *tbl_mask_array_alloc(int size)
224 struct mask_array *new;
226 new = kzalloc(sizeof(struct mask_array) +
227 sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
237 static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
239 struct mask_array *old;
240 struct mask_array *new;
242 new = tbl_mask_array_alloc(size);
246 old = ovsl_dereference(tbl->mask_array);
250 for (i = 0; i < old->max; i++) {
252 new->masks[new->count++] = old->masks[i];
255 rcu_assign_pointer(tbl->mask_array, new);
258 call_rcu(&old->rcu, mask_array_rcu_cb);
263 int ovs_flow_tbl_init(struct flow_table *table)
265 struct table_instance *ti;
266 struct mask_array *ma;
268 table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
269 MC_HASH_ENTRIES, __alignof__(struct mask_cache_entry));
270 if (!table->mask_cache)
273 ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
275 goto free_mask_cache;
277 ti = table_instance_alloc(TBL_MIN_BUCKETS);
279 goto free_mask_array;
281 rcu_assign_pointer(table->ti, ti);
282 rcu_assign_pointer(table->mask_array, ma);
283 table->last_rehash = jiffies;
288 kfree((struct mask_array __force *)table->mask_array);
290 free_percpu(table->mask_cache);
294 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
296 struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
298 __table_instance_destroy(ti);
301 static void table_instance_destroy(struct table_instance *ti, bool deferred)
311 for (i = 0; i < ti->n_buckets; i++) {
312 struct sw_flow *flow;
313 struct hlist_head *head = flex_array_get(ti->buckets, i);
314 struct hlist_node *n;
315 int ver = ti->node_ver;
317 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
318 hlist_del_rcu(&flow->hash_node[ver]);
319 ovs_flow_free(flow, deferred);
325 call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
327 __table_instance_destroy(ti);
330 /* No need for locking this function is called from RCU callback or
332 void ovs_flow_tbl_destroy(struct flow_table *table)
334 struct table_instance *ti = (struct table_instance __force *)table->ti;
336 free_percpu(table->mask_cache);
337 kfree((struct mask_array __force *)table->mask_array);
338 table_instance_destroy(ti, false);
341 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
342 u32 *bucket, u32 *last)
344 struct sw_flow *flow;
345 struct hlist_head *head;
350 while (*bucket < ti->n_buckets) {
352 head = flex_array_get(ti->buckets, *bucket);
353 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
368 static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
370 hash = jhash_1word(hash, ti->hash_seed);
371 return flex_array_get(ti->buckets,
372 (hash & (ti->n_buckets - 1)));
375 static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow)
377 struct hlist_head *head;
379 head = find_bucket(ti, flow->hash);
380 hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head);
383 static void flow_table_copy_flows(struct table_instance *old,
384 struct table_instance *new)
389 old_ver = old->node_ver;
390 new->node_ver = !old_ver;
392 /* Insert in new table. */
393 for (i = 0; i < old->n_buckets; i++) {
394 struct sw_flow *flow;
395 struct hlist_head *head;
397 head = flex_array_get(old->buckets, i);
399 hlist_for_each_entry(flow, head, hash_node[old_ver])
400 table_instance_insert(new, flow);
403 old->keep_flows = true;
406 static struct table_instance *table_instance_rehash(struct table_instance *ti,
409 struct table_instance *new_ti;
411 new_ti = table_instance_alloc(n_buckets);
415 flow_table_copy_flows(ti, new_ti);
420 int ovs_flow_tbl_flush(struct flow_table *flow_table)
422 struct table_instance *old_ti;
423 struct table_instance *new_ti;
425 old_ti = ovsl_dereference(flow_table->ti);
426 new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
430 rcu_assign_pointer(flow_table->ti, new_ti);
431 flow_table->last_rehash = jiffies;
432 flow_table->count = 0;
434 table_instance_destroy(old_ti, true);
438 static u32 flow_hash(const struct sw_flow_key *key, int key_start,
441 const u32 *hash_key = (const u32 *)((const u8 *)key + key_start);
442 int hash_u32s = (key_end - key_start) >> 2;
444 /* Make sure number of hash bytes are multiple of u32. */
445 BUILD_BUG_ON(sizeof(long) % sizeof(u32));
447 return arch_fast_hash2(hash_key, hash_u32s, 0);
450 static int flow_key_start(const struct sw_flow_key *key)
452 if (key->tun_key.ipv4_dst)
455 return rounddown(offsetof(struct sw_flow_key, phy),
459 static bool cmp_key(const struct sw_flow_key *key1,
460 const struct sw_flow_key *key2,
461 int key_start, int key_end)
463 const long *cp1 = (const long *)((const u8 *)key1 + key_start);
464 const long *cp2 = (const long *)((const u8 *)key2 + key_start);
468 for (i = key_start; i < key_end; i += sizeof(long))
469 diffs |= *cp1++ ^ *cp2++;
474 static bool flow_cmp_masked_key(const struct sw_flow *flow,
475 const struct sw_flow_key *key,
476 int key_start, int key_end)
478 return cmp_key(&flow->key, key, key_start, key_end);
481 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
482 struct sw_flow_match *match)
484 struct sw_flow_key *key = match->key;
485 int key_start = flow_key_start(key);
486 int key_end = match->range.end;
488 return cmp_key(&flow->unmasked_key, key, key_start, key_end);
491 static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
492 const struct sw_flow_key *unmasked,
493 struct sw_flow_mask *mask,
496 struct sw_flow *flow;
497 struct hlist_head *head;
498 int key_start = mask->range.start;
499 int key_end = mask->range.end;
501 struct sw_flow_key masked_key;
503 ovs_flow_mask_key(&masked_key, unmasked, mask);
504 hash = flow_hash(&masked_key, key_start, key_end);
505 head = find_bucket(ti, hash);
507 hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) {
508 if (flow->mask == mask && flow->hash == hash &&
509 flow_cmp_masked_key(flow, &masked_key,
517 static struct sw_flow *flow_lookup(struct flow_table *tbl,
518 struct table_instance *ti,
519 struct mask_array *ma,
520 const struct sw_flow_key *key,
524 struct sw_flow *flow;
527 for (i = 0; i < ma->max; i++) {
528 struct sw_flow_mask *mask;
530 mask = rcu_dereference_ovsl(ma->masks[i]);
532 flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
533 if (flow) { /* Found */
544 * mask_cache maps flow to probable mask. This cache is not tightly
545 * coupled cache, It means updates to mask list can result in inconsistent
546 * cache entry in mask cache.
547 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
548 * In case of a hash collision the entry is hashed in next segment.
550 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
551 const struct sw_flow_key *key,
555 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
556 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
557 struct mask_cache_entry *entries, *ce, *del;
558 struct sw_flow *flow;
563 if (unlikely(!skb_hash)) {
564 u32 __always_unused mask_index;
566 return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
570 entries = this_cpu_ptr(tbl->mask_cache);
572 for (seg = 0; seg < MC_HASH_SEGS; seg++) {
575 index = hash & (MC_HASH_ENTRIES - 1);
576 ce = &entries[index];
578 if (ce->skb_hash == skb_hash) {
579 struct sw_flow_mask *mask;
581 mask = rcu_dereference_ovsl(ma->masks[ce->mask_index]);
583 flow = masked_flow_lookup(ti, key, mask,
585 if (flow) /* Found */
593 if (!del || (del->skb_hash && !ce->skb_hash) ||
594 (rcu_dereference_ovsl(ma->masks[del->mask_index]) &&
595 !rcu_dereference_ovsl(ma->masks[ce->mask_index]))) {
599 hash >>= MC_HASH_SHIFT;
602 flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &del->mask_index);
604 del->skb_hash = skb_hash;
609 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
610 const struct sw_flow_key *key)
612 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
613 struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
614 u32 __always_unused n_mask_hit;
615 u32 __always_unused index;
618 return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
621 int ovs_flow_tbl_num_masks(const struct flow_table *table)
623 struct mask_array *ma;
625 ma = rcu_dereference_ovsl(table->mask_array);
629 static struct table_instance *table_instance_expand(struct table_instance *ti)
631 return table_instance_rehash(ti, ti->n_buckets * 2);
634 /* Remove 'mask' from the mask list, if it is not needed any more. */
635 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
638 /* ovs-lock is required to protect mask-refcount and
642 BUG_ON(!mask->ref_count);
645 if (!mask->ref_count) {
646 struct mask_array *ma;
649 ma = ovsl_dereference(tbl->mask_array);
650 for (i = 0; i < ma->max; i++) {
651 if (mask == ovsl_dereference(ma->masks[i])) {
652 RCU_INIT_POINTER(ma->masks[i], NULL);
659 call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb);
664 /* Must be called with OVS mutex held. */
665 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
667 struct table_instance *ti = ovsl_dereference(table->ti);
669 BUG_ON(table->count == 0);
670 hlist_del_rcu(&flow->hash_node[ti->node_ver]);
673 /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be
674 * accessible as long as the RCU read lock is held. */
675 flow_mask_remove(table, flow->mask);
678 static struct sw_flow_mask *mask_alloc(void)
680 struct sw_flow_mask *mask;
682 mask = kmalloc(sizeof(*mask), GFP_KERNEL);
689 static bool mask_equal(const struct sw_flow_mask *a,
690 const struct sw_flow_mask *b)
692 const u8 *a_ = (const u8 *)&a->key + a->range.start;
693 const u8 *b_ = (const u8 *)&b->key + b->range.start;
695 return (a->range.end == b->range.end)
696 && (a->range.start == b->range.start)
697 && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
700 static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
701 const struct sw_flow_mask *mask)
703 struct mask_array *ma;
706 ma = ovsl_dereference(tbl->mask_array);
707 for (i = 0; i < ma->max; i++) {
708 struct sw_flow_mask *t;
710 t = ovsl_dereference(ma->masks[i]);
711 if (t && mask_equal(mask, t))
718 /* Add 'mask' into the mask list, if it is not already there. */
719 static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
720 struct sw_flow_mask *new)
722 struct sw_flow_mask *mask;
724 mask = flow_mask_find(tbl, new);
726 struct mask_array *ma;
729 /* Allocate a new mask if none exsits. */
734 mask->key = new->key;
735 mask->range = new->range;
737 /* Add mask to mask-list. */
738 ma = ovsl_dereference(tbl->mask_array);
739 if (ma->count >= ma->max) {
742 err = tbl_mask_array_realloc(tbl, ma->max +
743 MASK_ARRAY_SIZE_MIN);
748 ma = ovsl_dereference(tbl->mask_array);
750 for (i = 0; i < ma->max; i++) {
751 const struct sw_flow_mask *t;
753 t = ovsl_dereference(ma->masks[i]);
755 rcu_assign_pointer(ma->masks[i], mask);
761 BUG_ON(!mask->ref_count);
769 /* Must be called with OVS mutex held. */
770 int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
771 struct sw_flow_mask *mask)
773 struct table_instance *new_ti = NULL;
774 struct table_instance *ti;
777 err = flow_mask_insert(table, flow, mask);
781 flow->hash = flow_hash(&flow->key, flow->mask->range.start,
782 flow->mask->range.end);
783 ti = ovsl_dereference(table->ti);
784 table_instance_insert(ti, flow);
787 /* Expand table, if necessary, to make room. */
788 if (table->count > ti->n_buckets)
789 new_ti = table_instance_expand(ti);
790 else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
791 new_ti = table_instance_rehash(ti, ti->n_buckets);
794 rcu_assign_pointer(table->ti, new_ti);
795 table_instance_destroy(ti, true);
796 table->last_rehash = jiffies;
801 /* Initializes the flow module.
802 * Returns zero if successful or a negative error code. */
803 int ovs_flow_init(void)
805 BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
806 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
808 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
809 + (num_possible_nodes()
810 * sizeof(struct flow_stats *)),
812 if (flow_cache == NULL)
816 = kmem_cache_create("sw_flow_stats", sizeof(struct flow_stats),
817 0, SLAB_HWCACHE_ALIGN, NULL);
818 if (flow_stats_cache == NULL) {
819 kmem_cache_destroy(flow_cache);
827 /* Uninitializes the flow module. */
828 void ovs_flow_exit(void)
830 kmem_cache_destroy(flow_stats_cache);
831 kmem_cache_destroy(flow_cache);