Initial import
[sliver-openvswitch.git] / datapath / table-mac.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007 The Board of Trustees of The Leland Stanford Junior Univer
4 sity
5  */
6
7 #include "table.h"
8 #include "crc32.h"
9 #include "flow.h"
10 #include "openflow.h"
11 #include "datapath.h"
12
13 #include <linux/slab.h>
14
15 struct sw_table_mac {
16         struct sw_table swt;
17         spinlock_t lock;
18         struct crc32 crc32;
19         atomic_t n_flows;
20         unsigned int max_flows;
21         unsigned int bucket_mask; /* Number of buckets minus 1. */
22         struct hlist_head *buckets;
23 };
24
25 static struct hlist_head *find_bucket(struct sw_table *swt,
26                                                                          const struct sw_flow_key *key)
27 {
28         struct sw_table_mac *tm = (struct sw_table_mac *) swt;
29         unsigned int crc = crc32_calculate(&tm->crc32, key, sizeof *key);
30         return &tm->buckets[crc & tm->bucket_mask];
31 }
32
33 static struct sw_flow *table_mac_lookup(struct sw_table *swt,
34                                                                                 const struct sw_flow_key *key)
35 {
36         struct hlist_head *bucket = find_bucket(swt, key);
37         struct hlist_node *pos;
38         struct sw_flow *flow;
39         hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode)
40                 if (!memcmp(key->dl_src, flow->key.dl_src, 6))
41                         return flow;
42         return NULL;
43 }
44
45 static int table_mac_insert(struct sw_table *swt, struct sw_flow *flow)
46 {
47         struct sw_table_mac *tm = (struct sw_table_mac *) swt;
48         struct hlist_head *bucket;
49         struct hlist_node *pos;
50         unsigned long int flags;
51         struct sw_flow *f;
52
53         /* MAC table only handles flows that match on Ethernet
54            source address and wildcard everything else. */
55         if (likely(flow->key.wildcards != (OFPFW_ALL & ~OFPFW_DL_SRC)))
56                         return 0;
57         bucket = find_bucket(swt, &flow->key);
58
59         spin_lock_irqsave(&tm->lock, flags);
60         hlist_for_each_entry_rcu (f, pos, bucket, u.hnode) {
61                 if (!memcmp(f->key.dl_src, flow->key.dl_src, 6)
62                                         && flow_del(f)) {
63                         hlist_replace_rcu(&f->u.hnode, &flow->u.hnode);
64                         spin_unlock_irqrestore(&tm->lock, flags);
65                         flow_deferred_free(f);
66                         return 1;
67                 }
68         }
69
70         /* Table overflow? */
71         if (atomic_read(&tm->n_flows) >= tm->max_flows) {
72                 spin_unlock_irqrestore(&tm->lock, flags);
73                 return 0; 
74         }
75         atomic_inc(&tm->n_flows);
76
77         hlist_add_head_rcu(&flow->u.hnode, bucket);
78         spin_unlock_irqrestore(&tm->lock, flags);
79         return 1;
80 }
81
82 static int do_delete(struct sw_table *swt, struct sw_flow *flow)
83 {
84         if (flow_del(flow)) {
85                 hlist_del_rcu(&flow->u.hnode);
86                 flow_deferred_free(flow);
87                 return 1;
88         }
89         return 0;
90 }
91
92 /* Returns number of deleted flows. */
93 static int table_mac_delete(struct sw_table *swt,
94                         const struct sw_flow_key *key, int strict)
95 {
96                 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
97
98         if (key->wildcards == (OFPFW_ALL & ~OFPFW_DL_SRC)) {
99                 struct sw_flow *flow = table_mac_lookup(swt, key);
100                 if (flow && do_delete(swt, flow)) {
101                         atomic_dec(&tm->n_flows);
102                         return 1;
103                 }
104                 return 0;
105         } else {
106                 unsigned int i;
107                 int count = 0;
108                 for (i = 0; i <= tm->bucket_mask; i++) {
109                         struct hlist_head *bucket = &tm->buckets[i];
110                         struct hlist_node *pos;
111                         struct sw_flow *flow;
112                         hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode)
113                                 if (flow_del_matches(&flow->key, key, strict))
114                                         count += do_delete(swt, flow);
115                 }
116                 if (count)
117                         atomic_sub(count, &tm->n_flows);
118                 return count;
119         }
120 }
121
122 static int table_mac_timeout(struct datapath *dp, struct sw_table *swt)
123 {
124         struct sw_table_mac *tm = (struct sw_table_mac *) swt;
125         unsigned int i;
126         int count = 0;
127
128         for (i = 0; i <= tm->bucket_mask; i++) {
129                 struct hlist_head *bucket = &tm->buckets[i];
130                 struct hlist_node *pos;
131                 struct sw_flow *flow;
132                 hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode) {
133                         if (flow_timeout(flow)) {
134                                 count += do_delete(swt, flow);
135                                 if (dp->hello_flags & OFP_CHELLO_SEND_FLOW_EXP)
136                                         dp_send_flow_expired(dp, flow);
137                         }
138                 }
139         }
140         if (count)
141                 atomic_sub(count, &tm->n_flows);
142         return count;
143 }
144
145 static void table_mac_destroy(struct sw_table *swt)
146 {
147         struct sw_table_mac *tm = (struct sw_table_mac *) swt;
148         unsigned int i;
149         for (i = 0; i <= tm->bucket_mask; i++) {
150                 struct hlist_head *hlist = &tm->buckets[i];
151                 while (!hlist_empty(hlist)) {
152                         struct sw_flow *flow = hlist_entry(hlist->first,
153                                            struct sw_flow, u.hnode);
154                         hlist_del(&flow->u.hnode);
155                         flow_free(flow);
156                         }
157         }
158         kfree(tm->buckets);
159         kfree(tm);
160 }
161
162 struct swt_iterator_mac {
163         struct sw_table_mac *tm;
164         unsigned int bucket_i;
165 };
166
167 static struct sw_flow *next_head_flow(struct swt_iterator_mac *im)
168 {
169         for (; im->bucket_i <= im->tm->bucket_mask; im->bucket_i++) {
170                 struct hlist_node *first = im->tm->buckets[im->bucket_i].first;
171                 if (first != NULL) {
172                         struct sw_flow *f = hlist_entry(first,
173                                                         struct sw_flow,
174                                                         u.hnode);
175                         return f;
176                 }
177         }
178         return NULL;
179 }
180
181 static int table_mac_iterator(struct sw_table *swt,
182                                   struct swt_iterator *swt_iter)
183 {
184         struct swt_iterator_mac *im;
185
186         swt_iter->private = im = kmalloc(sizeof *im, GFP_KERNEL);
187         if (im == NULL)
188                 return 0;
189
190         im->tm = (struct sw_table_mac *) swt;
191
192         if (atomic_read(&im->tm->n_flows) == 0)
193                 swt_iter->flow = NULL;
194         else {
195                 im->bucket_i = 0;
196                 swt_iter->flow = next_head_flow(im);
197         }
198
199         return 1;
200 }
201
202 static void table_mac_next(struct swt_iterator *swt_iter)
203 {
204         struct swt_iterator_mac *im;
205         struct hlist_node *next;
206
207         if (swt_iter->flow == NULL)
208                 return;
209
210         im = (struct swt_iterator_mac *) swt_iter->private;
211
212         next = swt_iter->flow->u.hnode.next;
213         if (next != NULL) {
214                 swt_iter->flow = hlist_entry(next, struct sw_flow, u.hnode);
215         } else {
216                 im->bucket_i++;
217                 swt_iter->flow = next_head_flow(im);
218         }
219 }
220
221 static void table_mac_iterator_destroy(struct swt_iterator *swt_iter)
222 {
223         kfree(swt_iter->private);
224 }
225
226 static void table_mac_stats(struct sw_table *swt, struct sw_table_stats *stats)
227 {
228         struct sw_table_mac *tm = (struct sw_table_mac *) swt;
229         stats->name = "mac";
230         stats->n_flows = atomic_read(&tm->n_flows);
231         stats->max_flows = tm->max_flows;
232 }
233
234 struct sw_table *table_mac_create(unsigned int n_buckets,
235                                                                   unsigned int max_flows)
236 {
237         struct sw_table_mac *tm;
238         struct sw_table *swt;
239
240         tm = kzalloc(sizeof *tm, GFP_KERNEL);
241         if (tm == NULL)
242                 return NULL;
243
244         BUG_ON(n_buckets & (n_buckets - 1));
245
246         tm->buckets = kzalloc(n_buckets * sizeof *tm->buckets, GFP_KERNEL);
247         if (tm->buckets == NULL) {
248                 printk("failed to allocate %u buckets\n", n_buckets);
249                 kfree(tm);
250                 return NULL;
251         }
252         tm->bucket_mask = n_buckets - 1;
253
254         swt = &tm->swt;
255         swt->lookup = table_mac_lookup;
256         swt->insert = table_mac_insert;
257         swt->delete = table_mac_delete;
258         swt->timeout = table_mac_timeout;
259         swt->destroy = table_mac_destroy;
260         swt->stats = table_mac_stats;
261
262         swt->iterator = table_mac_iterator;
263         swt->iterator_next = table_mac_next;
264         swt->iterator_destroy = table_mac_iterator_destroy;
265
266         crc32_init(&tm->crc32, 0x04C11DB7); /* Ethernet CRC. */
267         atomic_set(&tm->n_flows, 0);
268         tm->max_flows = max_flows;
269         spin_lock_init(&tm->lock);
270
271         return swt;
272 }