2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/highmem.h>
16 #include <asm/pgtable.h>
18 static void *kmem_alloc(size_t);
19 static void *kmem_zalloc(size_t);
20 static void kmem_free(void *, size_t);
22 struct sw_table_hash {
26 unsigned int bucket_mask; /* Number of buckets minus 1. */
27 struct sw_flow **buckets;
30 static struct sw_flow **find_bucket(struct sw_table *swt,
31 const struct sw_flow_key *key)
33 struct sw_table_hash *th = (struct sw_table_hash *) swt;
34 unsigned int crc = crc32_calculate(&th->crc32, key,
35 offsetof(struct sw_flow_key, wildcards));
36 return &th->buckets[crc & th->bucket_mask];
39 static struct sw_flow *table_hash_lookup(struct sw_table *swt,
40 const struct sw_flow_key *key)
42 struct sw_flow *flow = *find_bucket(swt, key);
43 return flow && flow_keys_equal(&flow->key, key) ? flow : NULL;
46 static int table_hash_insert(struct sw_table *swt, struct sw_flow *flow)
48 struct sw_table_hash *th = (struct sw_table_hash *) swt;
49 struct sw_flow **bucket;
52 if (flow->key.wildcards != 0)
55 bucket = find_bucket(swt, &flow->key);
56 if (*bucket == NULL) {
58 rcu_assign_pointer(*bucket, flow);
61 struct sw_flow *old_flow = *bucket;
62 if (flow_keys_equal(&old_flow->key, &flow->key)) {
63 rcu_assign_pointer(*bucket, flow);
64 flow_deferred_free(old_flow);
73 static int table_hash_modify(struct sw_table *swt,
74 const struct sw_flow_key *key, uint16_t priority, int strict,
75 const struct ofp_action_header *actions, size_t actions_len)
77 struct sw_table_hash *th = (struct sw_table_hash *) swt;
78 unsigned int count = 0;
80 if (key->wildcards == 0) {
81 struct sw_flow **bucket = find_bucket(swt, key);
82 struct sw_flow *flow = *bucket;
83 if (flow && flow_matches_desc(&flow->key, key, strict)
84 && (!strict || (flow->priority == priority))) {
85 flow_replace_acts(flow, actions, actions_len);
91 for (i = 0; i <= th->bucket_mask; i++) {
92 struct sw_flow **bucket = &th->buckets[i];
93 struct sw_flow *flow = *bucket;
94 if (flow && flow_matches_desc(&flow->key, key, strict)
95 && (!strict || (flow->priority == priority))) {
96 flow_replace_acts(flow, actions, actions_len);
104 /* Caller must update n_flows. */
105 static int do_delete(struct datapath *dp, struct sw_flow **bucket,
106 struct sw_flow *flow, enum nx_flow_end_reason reason)
108 dp_send_flow_end(dp, flow, reason);
109 rcu_assign_pointer(*bucket, NULL);
110 flow_deferred_free(flow);
114 /* Returns number of deleted flows. We can ignore the priority
115 * argument, since all exact-match entries are the same (highest)
117 static int table_hash_delete(struct datapath *dp, struct sw_table *swt,
118 const struct sw_flow_key *key, uint16_t out_port,
119 uint16_t priority, int strict)
121 struct sw_table_hash *th = (struct sw_table_hash *) swt;
122 unsigned int count = 0;
124 if (key->wildcards == 0) {
125 struct sw_flow **bucket = find_bucket(swt, key);
126 struct sw_flow *flow = *bucket;
127 if (flow && flow_keys_equal(&flow->key, key)
128 && flow_has_out_port(flow, out_port))
129 count = do_delete(dp, bucket, flow, NXFER_DELETE);
133 for (i = 0; i <= th->bucket_mask; i++) {
134 struct sw_flow **bucket = &th->buckets[i];
135 struct sw_flow *flow = *bucket;
136 if (flow && flow_matches_desc(&flow->key, key, strict)
137 && flow_has_out_port(flow, out_port))
138 count += do_delete(dp, bucket, flow, NXFER_DELETE);
141 th->n_flows -= count;
145 static int table_hash_timeout(struct datapath *dp, struct sw_table *swt)
147 struct sw_table_hash *th = (struct sw_table_hash *) swt;
151 mutex_lock(&dp_mutex);
152 for (i = 0; i <= th->bucket_mask; i++) {
153 struct sw_flow **bucket = &th->buckets[i];
154 struct sw_flow *flow = *bucket;
156 int reason = flow_timeout(flow);
158 count += do_delete(dp, bucket, flow, reason);
162 th->n_flows -= count;
163 mutex_unlock(&dp_mutex);
168 static void table_hash_destroy(struct sw_table *swt)
170 struct sw_table_hash *th = (struct sw_table_hash *) swt;
172 for (i = 0; i <= th->bucket_mask; i++)
174 flow_free(th->buckets[i]);
175 kmem_free(th->buckets, (th->bucket_mask + 1) * sizeof *th->buckets);
179 static int table_hash_iterate(struct sw_table *swt,
180 const struct sw_flow_key *key, uint16_t out_port,
181 struct sw_table_position *position,
182 int (*callback)(struct sw_flow *, void *private),
185 struct sw_table_hash *th = (struct sw_table_hash *) swt;
187 if (position->private[0] > th->bucket_mask)
190 if (key->wildcards == 0) {
191 struct sw_flow *flow;
194 flow = table_hash_lookup(swt, key);
195 if (!flow || !flow_has_out_port(flow, out_port))
198 error = callback(flow, private);
200 position->private[0] = -1;
205 for (i = position->private[0]; i <= th->bucket_mask; i++) {
206 struct sw_flow *flow = th->buckets[i];
207 if (flow && flow_matches_1wild(&flow->key, key)
208 && flow_has_out_port(flow, out_port)) {
209 int error = callback(flow, private);
211 position->private[0] = i;
219 static void table_hash_stats(struct sw_table *swt,
220 struct sw_table_stats *stats)
222 struct sw_table_hash *th = (struct sw_table_hash *) swt;
223 stats->name = "hash";
224 stats->wildcards = 0; /* No wildcards are supported. */
225 stats->n_flows = th->n_flows;
226 stats->max_flows = th->bucket_mask + 1;
227 stats->n_lookup = swt->n_lookup;
228 stats->n_matched = swt->n_matched;
231 struct sw_table *table_hash_create(unsigned int polynomial,
232 unsigned int n_buckets)
234 struct sw_table_hash *th;
235 struct sw_table *swt;
237 th = kzalloc(sizeof *th, GFP_KERNEL);
241 BUG_ON(n_buckets & (n_buckets - 1));
242 th->buckets = kmem_zalloc(n_buckets * sizeof *th->buckets);
243 if (th->buckets == NULL) {
244 printk("failed to allocate %u buckets\n", n_buckets);
248 th->bucket_mask = n_buckets - 1;
251 swt->lookup = table_hash_lookup;
252 swt->insert = table_hash_insert;
253 swt->delete = table_hash_delete;
254 swt->timeout = table_hash_timeout;
255 swt->destroy = table_hash_destroy;
256 swt->iterate = table_hash_iterate;
257 swt->stats = table_hash_stats;
259 crc32_init(&th->crc32, polynomial);
265 /* Double-hashing table. */
267 struct sw_table_hash2 {
269 struct sw_table *subtable[2];
272 static struct sw_flow *table_hash2_lookup(struct sw_table *swt,
273 const struct sw_flow_key *key)
275 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
278 for (i = 0; i < 2; i++) {
279 struct sw_flow *flow = *find_bucket(t2->subtable[i], key);
280 if (flow && flow_keys_equal(&flow->key, key))
286 static int table_hash2_insert(struct sw_table *swt, struct sw_flow *flow)
288 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
290 if (table_hash_insert(t2->subtable[0], flow))
292 return table_hash_insert(t2->subtable[1], flow);
295 static int table_hash2_modify(struct sw_table *swt,
296 const struct sw_flow_key *key, uint16_t priority, int strict,
297 const struct ofp_action_header *actions, size_t actions_len)
299 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
300 return (table_hash_modify(t2->subtable[0], key, priority, strict,
301 actions, actions_len)
302 + table_hash_modify(t2->subtable[1], key, priority, strict,
303 actions, actions_len));
306 static int table_hash2_delete(struct datapath *dp, struct sw_table *swt,
307 const struct sw_flow_key *key,
309 uint16_t priority, int strict)
311 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
312 return (table_hash_delete(dp, t2->subtable[0], key, out_port,
314 + table_hash_delete(dp, t2->subtable[1], key, out_port,
318 static int table_hash2_timeout(struct datapath *dp, struct sw_table *swt)
320 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
321 return (table_hash_timeout(dp, t2->subtable[0])
322 + table_hash_timeout(dp, t2->subtable[1]));
325 static void table_hash2_destroy(struct sw_table *swt)
327 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
328 table_hash_destroy(t2->subtable[0]);
329 table_hash_destroy(t2->subtable[1]);
333 static int table_hash2_iterate(struct sw_table *swt,
334 const struct sw_flow_key *key, uint16_t out_port,
335 struct sw_table_position *position,
336 int (*callback)(struct sw_flow *, void *),
339 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
342 for (i = position->private[1]; i < 2; i++) {
343 int error = table_hash_iterate(t2->subtable[i], key, out_port,
344 position, callback, private);
348 position->private[0] = 0;
349 position->private[1]++;
354 static void table_hash2_stats(struct sw_table *swt,
355 struct sw_table_stats *stats)
357 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
358 struct sw_table_stats substats[2];
361 for (i = 0; i < 2; i++)
362 table_hash_stats(t2->subtable[i], &substats[i]);
363 stats->name = "hash2";
364 stats->wildcards = 0; /* No wildcards are supported. */
365 stats->n_flows = substats[0].n_flows + substats[1].n_flows;
366 stats->max_flows = substats[0].max_flows + substats[1].max_flows;
367 stats->n_lookup = swt->n_lookup;
368 stats->n_matched = swt->n_matched;
371 struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
372 unsigned int poly1, unsigned int buckets1)
375 struct sw_table_hash2 *t2;
376 struct sw_table *swt;
378 t2 = kzalloc(sizeof *t2, GFP_KERNEL);
382 t2->subtable[0] = table_hash_create(poly0, buckets0);
383 if (t2->subtable[0] == NULL)
386 t2->subtable[1] = table_hash_create(poly1, buckets1);
387 if (t2->subtable[1] == NULL)
388 goto out_free_subtable0;
391 swt->lookup = table_hash2_lookup;
392 swt->insert = table_hash2_insert;
393 swt->modify = table_hash2_modify;
394 swt->delete = table_hash2_delete;
395 swt->timeout = table_hash2_timeout;
396 swt->destroy = table_hash2_destroy;
397 swt->iterate = table_hash2_iterate;
398 swt->stats = table_hash2_stats;
403 table_hash_destroy(t2->subtable[0]);
409 /* From fs/xfs/linux-2.4/kmem.c. */
412 kmem_alloc(size_t size)
416 #ifdef KMALLOC_MAX_SIZE
417 if (size > KMALLOC_MAX_SIZE)
420 ptr = kmalloc(size, GFP_KERNEL);
424 printk("openflow: used vmalloc for %lu bytes\n",
425 (unsigned long)size);
431 kmem_zalloc(size_t size)
433 void *ptr = kmem_alloc(size);
435 memset(ptr, 0, size);
440 kmem_free(void *ptr, size_t size)
442 if (((unsigned long)ptr < VMALLOC_START) ||
443 ((unsigned long)ptr >= VMALLOC_END)) {