Implement subnet mask matching in OpenFlow.
[sliver-openvswitch.git] / datapath / table-hash.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "table.h"
8 #include "crc32.h"
9 #include "flow.h"
10 #include "datapath.h"
11
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <asm/pgtable.h>
17
18 static void *kmem_alloc(size_t);
19 static void *kmem_zalloc(size_t);
20 static void kmem_free(void *, size_t);
21
22 struct sw_table_hash {
23         struct sw_table swt;
24         struct crc32 crc32;
25         unsigned int n_flows;
26         unsigned int bucket_mask; /* Number of buckets minus 1. */
27         struct sw_flow **buckets;
28 };
29
30 static struct sw_flow **find_bucket(struct sw_table *swt,
31                                                                         const struct sw_flow_key *key)
32 {
33         struct sw_table_hash *th = (struct sw_table_hash *) swt;
34         unsigned int crc = crc32_calculate(&th->crc32, key, sizeof *key);
35         return &th->buckets[crc & th->bucket_mask];
36 }
37
38 static struct sw_flow *table_hash_lookup(struct sw_table *swt,
39                                                                                  const struct sw_flow_key *key)
40 {
41         struct sw_flow *flow = *find_bucket(swt, key);
42         return flow && !memcmp(&flow->key, key, sizeof *key) ? flow : NULL;
43 }
44
45 static int table_hash_insert(struct sw_table *swt, struct sw_flow *flow)
46 {
47         struct sw_table_hash *th = (struct sw_table_hash *) swt;
48         struct sw_flow **bucket;
49         int retval;
50
51         if (flow->key.wildcards != 0)
52                 return 0;
53
54         bucket = find_bucket(swt, &flow->key);
55         if (*bucket == NULL) {
56                 th->n_flows++;
57                 rcu_assign_pointer(*bucket, flow);
58                 retval = 1;
59         } else {
60                 struct sw_flow *old_flow = *bucket;
61                 if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)) {
62                         rcu_assign_pointer(*bucket, flow);
63                         flow_deferred_free(old_flow);
64                         retval = 1;
65                 } else {
66                         retval = 0;
67                 }
68         }
69         return retval;
70 }
71
72 /* Caller must update n_flows. */
73 static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
74 {
75         rcu_assign_pointer(*bucket, NULL);
76         flow_deferred_free(flow);
77         return 1;
78 }
79
80 /* Returns number of deleted flows.  We can ignore the priority
81  * argument, since all exact-match entries are the same (highest)
82  * priority. */
83 static int table_hash_delete(struct sw_table *swt,
84                                                          const struct sw_flow_key *key, 
85                                                          uint16_t priority, int strict)
86 {
87         struct sw_table_hash *th = (struct sw_table_hash *) swt;
88         unsigned int count = 0;
89
90         if (key->wildcards == 0) {
91                 struct sw_flow **bucket = find_bucket(swt, key);
92                 struct sw_flow *flow = *bucket;
93                 if (flow && !memcmp(&flow->key, key, sizeof *key))
94                         count = do_delete(bucket, flow);
95         } else {
96                 unsigned int i;
97
98                 for (i = 0; i <= th->bucket_mask; i++) {
99                         struct sw_flow **bucket = &th->buckets[i];
100                         struct sw_flow *flow = *bucket;
101                         if (flow && flow_del_matches(&flow->key, key, strict))
102                                 count += do_delete(bucket, flow);
103                 }
104         }
105         th->n_flows -= count;
106         return count;
107 }
108
109 static int table_hash_timeout(struct datapath *dp, struct sw_table *swt)
110 {
111         struct sw_table_hash *th = (struct sw_table_hash *) swt;
112         unsigned int i;
113         int count = 0;
114
115         mutex_lock(&dp_mutex);
116         for (i = 0; i <= th->bucket_mask; i++) {
117                 struct sw_flow **bucket = &th->buckets[i];
118                 struct sw_flow *flow = *bucket;
119                 if (flow) {
120                         int reason = flow_timeout(flow);
121                         if (reason >= 0) {
122                                 count += do_delete(bucket, flow); 
123                                 dp_send_flow_expired(dp, flow, reason);
124                         }
125                 }
126         }
127         th->n_flows -= count;
128         mutex_unlock(&dp_mutex);
129
130         return count;
131 }
132
133 static void table_hash_destroy(struct sw_table *swt)
134 {
135         struct sw_table_hash *th = (struct sw_table_hash *) swt;
136         unsigned int i;
137         for (i = 0; i <= th->bucket_mask; i++)
138         if (th->buckets[i])
139                 flow_free(th->buckets[i]);
140         kmem_free(th->buckets, (th->bucket_mask + 1) * sizeof *th->buckets);
141         kfree(th);
142 }
143
144 static int table_hash_iterate(struct sw_table *swt,
145                               const struct sw_flow_key *key,
146                               struct sw_table_position *position,
147                               int (*callback)(struct sw_flow *, void *private),
148                               void *private) 
149 {
150         struct sw_table_hash *th = (struct sw_table_hash *) swt;
151
152         if (position->private[0] > th->bucket_mask)
153                 return 0;
154
155         if (key->wildcards == 0) {
156                 struct sw_flow *flow;
157                 int error;
158
159                 flow = table_hash_lookup(swt, key);
160                 if (!flow)
161                         return 0;
162
163                 error = callback(flow, private);
164                 if (!error)
165                         position->private[0] = -1;
166                 return error;
167         } else {
168                 int i;
169
170                 for (i = position->private[0]; i <= th->bucket_mask; i++) {
171                         struct sw_flow *flow = th->buckets[i];
172                         if (flow && flow_matches_1wild(&flow->key, key)) {
173                                 int error = callback(flow, private);
174                                 if (error) {
175                                         position->private[0] = i;
176                                         return error;
177                                 }
178                         }
179                 }
180                 return 0;
181         }
182 }
183 static void table_hash_stats(struct sw_table *swt,
184                                  struct sw_table_stats *stats) 
185 {
186         struct sw_table_hash *th = (struct sw_table_hash *) swt;
187         stats->name = "hash";
188         stats->n_flows = th->n_flows;
189         stats->max_flows = th->bucket_mask + 1;
190         stats->n_matched = swt->n_matched;
191 }
192
193 struct sw_table *table_hash_create(unsigned int polynomial,
194                         unsigned int n_buckets)
195 {
196         struct sw_table_hash *th;
197         struct sw_table *swt;
198
199         th = kzalloc(sizeof *th, GFP_KERNEL);
200         if (th == NULL)
201                 return NULL;
202
203         BUG_ON(n_buckets & (n_buckets - 1));
204         th->buckets = kmem_zalloc(n_buckets * sizeof *th->buckets);
205         if (th->buckets == NULL) {
206                 printk("failed to allocate %u buckets\n", n_buckets);
207                 kfree(th);
208                 return NULL;
209         }
210         th->bucket_mask = n_buckets - 1;
211
212         swt = &th->swt;
213         swt->lookup = table_hash_lookup;
214         swt->insert = table_hash_insert;
215         swt->delete = table_hash_delete;
216         swt->timeout = table_hash_timeout;
217         swt->destroy = table_hash_destroy;
218         swt->iterate = table_hash_iterate;
219         swt->stats = table_hash_stats;
220
221         crc32_init(&th->crc32, polynomial);
222         th->n_flows = 0;
223
224         return swt;
225 }
226
227 /* Double-hashing table. */
228
229 struct sw_table_hash2 {
230         struct sw_table swt;
231         struct sw_table *subtable[2];
232 };
233
234 static struct sw_flow *table_hash2_lookup(struct sw_table *swt,
235                                                                                   const struct sw_flow_key *key)
236 {
237         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
238         int i;
239         
240         for (i = 0; i < 2; i++) {
241                 struct sw_flow *flow = *find_bucket(t2->subtable[i], key);
242                 if (flow && !memcmp(&flow->key, key, sizeof *key))
243                         return flow;
244         }
245         return NULL;
246 }
247
248 static int table_hash2_insert(struct sw_table *swt, struct sw_flow *flow)
249 {
250         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
251
252         if (table_hash_insert(t2->subtable[0], flow))
253                 return 1;
254         return table_hash_insert(t2->subtable[1], flow);
255 }
256
257 static int table_hash2_delete(struct sw_table *swt,
258                                                           const struct sw_flow_key *key, 
259                                                           uint16_t priority, int strict)
260 {
261         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
262         return (table_hash_delete(t2->subtable[0], key, priority, strict)
263                         + table_hash_delete(t2->subtable[1], key, priority, strict));
264 }
265
266 static int table_hash2_timeout(struct datapath *dp, struct sw_table *swt)
267 {
268         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
269         return (table_hash_timeout(dp, t2->subtable[0])
270                         + table_hash_timeout(dp, t2->subtable[1]));
271 }
272
273 static void table_hash2_destroy(struct sw_table *swt)
274 {
275         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
276         table_hash_destroy(t2->subtable[0]);
277         table_hash_destroy(t2->subtable[1]);
278         kfree(t2);
279 }
280
281 static int table_hash2_iterate(struct sw_table *swt,
282                                const struct sw_flow_key *key,
283                                struct sw_table_position *position,
284                                int (*callback)(struct sw_flow *, void *),
285                                void *private)
286 {
287         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
288         int i;
289
290         for (i = position->private[1]; i < 2; i++) {
291                 int error = table_hash_iterate(t2->subtable[i], key, position,
292                                                callback, private);
293                 if (error) {
294                         return error;
295                 }
296                 position->private[0] = 0;
297                 position->private[1]++;
298         }
299         return 0;
300 }
301
302 static void table_hash2_stats(struct sw_table *swt,
303                                  struct sw_table_stats *stats)
304 {
305         struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
306         struct sw_table_stats substats[2];
307         int i;
308
309         for (i = 0; i < 2; i++)
310                 table_hash_stats(t2->subtable[i], &substats[i]);
311         stats->name = "hash2";
312         stats->n_flows = substats[0].n_flows + substats[1].n_flows;
313         stats->max_flows = substats[0].max_flows + substats[1].max_flows;
314         stats->n_matched = swt->n_matched;
315 }
316
317 struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
318                                                                         unsigned int poly1, unsigned int buckets1)
319
320 {
321         struct sw_table_hash2 *t2;
322         struct sw_table *swt;
323
324         t2 = kzalloc(sizeof *t2, GFP_KERNEL);
325         if (t2 == NULL)
326                 return NULL;
327
328         t2->subtable[0] = table_hash_create(poly0, buckets0);
329         if (t2->subtable[0] == NULL)
330                 goto out_free_t2;
331
332         t2->subtable[1] = table_hash_create(poly1, buckets1);
333         if (t2->subtable[1] == NULL)
334                 goto out_free_subtable0;
335
336         swt = &t2->swt;
337         swt->lookup = table_hash2_lookup;
338         swt->insert = table_hash2_insert;
339         swt->delete = table_hash2_delete;
340         swt->timeout = table_hash2_timeout;
341         swt->destroy = table_hash2_destroy;
342         swt->iterate = table_hash2_iterate;
343         swt->stats = table_hash2_stats;
344
345         return swt;
346
347 out_free_subtable0:
348         table_hash_destroy(t2->subtable[0]);
349 out_free_t2:
350         kfree(t2);
351         return NULL;
352 }
353
354 /* From fs/xfs/linux-2.4/kmem.c. */
355
356 static void *
357 kmem_alloc(size_t size)
358 {
359         void *ptr;
360
361 #ifdef KMALLOC_MAX_SIZE
362         if (size > KMALLOC_MAX_SIZE)
363                 return NULL;
364 #endif
365         ptr = kmalloc(size, GFP_KERNEL);
366         if (!ptr) {
367                 ptr = vmalloc(size);
368                 if (ptr)
369                         printk("openflow: used vmalloc for %lu bytes\n", 
370                                         (unsigned long)size);
371         }
372         return ptr;
373 }
374
375 static void *
376 kmem_zalloc(size_t size)
377 {
378         void *ptr = kmem_alloc(size);
379         if (ptr)
380                 memset(ptr, 0, size);
381         return ptr;
382 }
383
384 static void
385 kmem_free(void *ptr, size_t size)
386 {
387         if (((unsigned long)ptr < VMALLOC_START) ||
388                 ((unsigned long)ptr >= VMALLOC_END)) {
389                 kfree(ptr);
390         } else {
391                 vfree(ptr);
392         }
393 }