2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/highmem.h>
16 #include <asm/pgtable.h>
18 static void *kmem_alloc(size_t);
19 static void *kmem_zalloc(size_t);
20 static void kmem_free(void *, size_t);
22 struct sw_table_hash {
27 unsigned int bucket_mask; /* Number of buckets minus 1. */
28 struct sw_flow **buckets;
31 static struct sw_flow **find_bucket(struct sw_table *swt,
32 const struct sw_flow_key *key)
34 struct sw_table_hash *th = (struct sw_table_hash *) swt;
35 unsigned int crc = crc32_calculate(&th->crc32, key, sizeof *key);
36 return &th->buckets[crc & th->bucket_mask];
39 static struct sw_flow *table_hash_lookup(struct sw_table *swt,
40 const struct sw_flow_key *key)
42 struct sw_flow *flow = *find_bucket(swt, key);
43 return flow && !memcmp(&flow->key, key, sizeof *key) ? flow : NULL;
46 static int table_hash_insert(struct sw_table *swt, struct sw_flow *flow)
48 struct sw_table_hash *th = (struct sw_table_hash *) swt;
49 struct sw_flow **bucket;
50 unsigned long int flags;
53 if (flow->key.wildcards != 0)
56 spin_lock_irqsave(&th->lock, flags);
57 bucket = find_bucket(swt, &flow->key);
58 if (*bucket == NULL) {
59 atomic_inc(&th->n_flows);
60 rcu_assign_pointer(*bucket, flow);
63 struct sw_flow *old_flow = *bucket;
64 if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)
65 && flow_del(old_flow)) {
66 rcu_assign_pointer(*bucket, flow);
67 flow_deferred_free(old_flow);
73 spin_unlock_irqrestore(&th->lock, flags);
77 /* Caller must update n_flows. */
78 static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
81 rcu_assign_pointer(*bucket, NULL);
82 flow_deferred_free(flow);
88 /* Returns number of deleted flows. We can ignore the priority
89 * argument, since all exact-match entries are the same (highest)
91 static int table_hash_delete(struct sw_table *swt,
92 const struct sw_flow_key *key,
93 uint16_t priority, int strict)
95 struct sw_table_hash *th = (struct sw_table_hash *) swt;
96 unsigned int count = 0;
98 if (key->wildcards == 0) {
99 struct sw_flow **bucket = find_bucket(swt, key);
100 struct sw_flow *flow = *bucket;
101 if (flow && !memcmp(&flow->key, key, sizeof *key))
102 count = do_delete(bucket, flow);
106 for (i = 0; i <= th->bucket_mask; i++) {
107 struct sw_flow **bucket = &th->buckets[i];
108 struct sw_flow *flow = *bucket;
109 if (flow && flow_del_matches(&flow->key, key, strict))
110 count += do_delete(bucket, flow);
114 atomic_sub(count, &th->n_flows);
118 static int table_hash_timeout(struct datapath *dp, struct sw_table *swt)
120 struct sw_table_hash *th = (struct sw_table_hash *) swt;
124 for (i = 0; i <= th->bucket_mask; i++) {
125 struct sw_flow **bucket = &th->buckets[i];
126 struct sw_flow *flow = *bucket;
127 if (flow && flow_timeout(flow)) {
128 count += do_delete(bucket, flow);
129 if (dp->flags & OFPC_SEND_FLOW_EXP)
130 dp_send_flow_expired(dp, flow);
135 atomic_sub(count, &th->n_flows);
139 static void table_hash_destroy(struct sw_table *swt)
141 struct sw_table_hash *th = (struct sw_table_hash *) swt;
143 for (i = 0; i <= th->bucket_mask; i++)
145 flow_free(th->buckets[i]);
146 kmem_free(th->buckets, (th->bucket_mask + 1) * sizeof *th->buckets);
150 static int table_hash_iterate(struct sw_table *swt,
151 const struct sw_flow_key *key,
152 struct sw_table_position *position,
153 int (*callback)(struct sw_flow *, void *private),
156 struct sw_table_hash *th = (struct sw_table_hash *) swt;
158 if (position->private[0] > th->bucket_mask)
161 if (key->wildcards == 0) {
162 struct sw_flow *flow = table_hash_lookup(swt, key);
163 position->private[0] = -1;
164 return flow ? callback(flow, private) : 0;
168 for (i = position->private[0]; i <= th->bucket_mask; i++) {
169 struct sw_flow *flow = th->buckets[i];
170 if (flow && flow_matches(key, &flow->key)) {
171 int error = callback(flow, private);
173 position->private[0] = i + 1;
181 static void table_hash_stats(struct sw_table *swt,
182 struct sw_table_stats *stats)
184 struct sw_table_hash *th = (struct sw_table_hash *) swt;
185 stats->name = "hash";
186 stats->n_flows = atomic_read(&th->n_flows);
187 stats->max_flows = th->bucket_mask + 1;
190 struct sw_table *table_hash_create(unsigned int polynomial,
191 unsigned int n_buckets)
193 struct sw_table_hash *th;
194 struct sw_table *swt;
196 th = kmalloc(sizeof *th, GFP_KERNEL);
200 BUG_ON(n_buckets & (n_buckets - 1));
201 th->buckets = kmem_zalloc(n_buckets * sizeof *th->buckets);
202 if (th->buckets == NULL) {
203 printk("failed to allocate %u buckets\n", n_buckets);
207 th->bucket_mask = n_buckets - 1;
210 swt->lookup = table_hash_lookup;
211 swt->insert = table_hash_insert;
212 swt->delete = table_hash_delete;
213 swt->timeout = table_hash_timeout;
214 swt->destroy = table_hash_destroy;
215 swt->iterate = table_hash_iterate;
216 swt->stats = table_hash_stats;
218 spin_lock_init(&th->lock);
219 crc32_init(&th->crc32, polynomial);
220 atomic_set(&th->n_flows, 0);
225 /* Double-hashing table. */
227 struct sw_table_hash2 {
229 struct sw_table *subtable[2];
232 static struct sw_flow *table_hash2_lookup(struct sw_table *swt,
233 const struct sw_flow_key *key)
235 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
238 for (i = 0; i < 2; i++) {
239 struct sw_flow *flow = *find_bucket(t2->subtable[i], key);
240 if (flow && !memcmp(&flow->key, key, sizeof *key))
246 static int table_hash2_insert(struct sw_table *swt, struct sw_flow *flow)
248 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
250 if (table_hash_insert(t2->subtable[0], flow))
252 return table_hash_insert(t2->subtable[1], flow);
255 static int table_hash2_delete(struct sw_table *swt,
256 const struct sw_flow_key *key,
257 uint16_t priority, int strict)
259 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
260 return (table_hash_delete(t2->subtable[0], key, priority, strict)
261 + table_hash_delete(t2->subtable[1], key, priority, strict));
264 static int table_hash2_timeout(struct datapath *dp, struct sw_table *swt)
266 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
267 return (table_hash_timeout(dp, t2->subtable[0])
268 + table_hash_timeout(dp, t2->subtable[1]));
271 static void table_hash2_destroy(struct sw_table *swt)
273 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
274 table_hash_destroy(t2->subtable[0]);
275 table_hash_destroy(t2->subtable[1]);
279 static int table_hash2_iterate(struct sw_table *swt,
280 const struct sw_flow_key *key,
281 struct sw_table_position *position,
282 int (*callback)(struct sw_flow *, void *),
285 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
288 for (i = position->private[1]; i < 2; i++) {
289 int error = table_hash_iterate(t2->subtable[i], key, position,
294 position->private[0] = 0;
295 position->private[1]++;
300 static void table_hash2_stats(struct sw_table *swt,
301 struct sw_table_stats *stats)
303 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
304 struct sw_table_stats substats[2];
307 for (i = 0; i < 2; i++)
308 table_hash_stats(t2->subtable[i], &substats[i]);
309 stats->name = "hash2";
310 stats->n_flows = substats[0].n_flows + substats[1].n_flows;
311 stats->max_flows = substats[0].max_flows + substats[1].max_flows;
314 struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
315 unsigned int poly1, unsigned int buckets1)
318 struct sw_table_hash2 *t2;
319 struct sw_table *swt;
321 t2 = kmalloc(sizeof *t2, GFP_KERNEL);
325 t2->subtable[0] = table_hash_create(poly0, buckets0);
326 if (t2->subtable[0] == NULL)
329 t2->subtable[1] = table_hash_create(poly1, buckets1);
330 if (t2->subtable[1] == NULL)
331 goto out_free_subtable0;
334 swt->lookup = table_hash2_lookup;
335 swt->insert = table_hash2_insert;
336 swt->delete = table_hash2_delete;
337 swt->timeout = table_hash2_timeout;
338 swt->destroy = table_hash2_destroy;
339 swt->iterate = table_hash2_iterate;
340 swt->stats = table_hash2_stats;
345 table_hash_destroy(t2->subtable[0]);
351 /* From fs/xfs/linux-2.4/kmem.c. */
354 kmem_alloc(size_t size)
358 #ifdef KMALLOC_MAX_SIZE
359 if (size > KMALLOC_MAX_SIZE)
362 ptr = kmalloc(size, GFP_KERNEL);
366 printk("openflow: used vmalloc for %lu bytes\n",
367 (unsigned long)size);
373 kmem_zalloc(size_t size)
375 void *ptr = kmem_alloc(size);
377 memset(ptr, 0, size);
382 kmem_free(void *ptr, size_t size)
384 if (((unsigned long)ptr < VMALLOC_START) ||
385 ((unsigned long)ptr >= VMALLOC_END)) {