/*
- * Copyright (c) 2009, 2010 Nicira Networks.
+ * Copyright (c) 2009, 2010, 2011 Nicira Networks.
* Distributed under the terms of the GNU GPL version 2.
*
* Significant portions of this file may be copied from parts of the Linux
#include "datapath.h"
#include "table.h"
+#include <linux/genetlink.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/mm.h>
struct tbl_node *objs[];
};
+static struct tbl_bucket *get_bucket(struct tbl_bucket __rcu *bucket)
+{
+ return rcu_dereference_check(bucket, rcu_read_lock_held() ||
+ lockdep_genl_is_held());
+}
+
+static struct tbl_bucket *get_bucket_protected(struct tbl_bucket __rcu *bucket)
+{
+ return rcu_dereference_protected(bucket, lockdep_genl_is_held());
+}
+
static inline int bucket_size(int n_objs)
{
return sizeof(struct tbl_bucket) + sizeof(struct tbl_node *) * n_objs;
return kmalloc(bucket_size(n_objs), GFP_KERNEL);
}
-static void free_buckets(struct tbl_bucket ***l1, unsigned int n_buckets,
+static void free_buckets(struct tbl_bucket __rcu ***l1,
+ unsigned int n_buckets,
void (*free_obj)(struct tbl_node *))
{
unsigned int i;
- for (i = 0; i < n_buckets >> TBL_L1_BITS; i++) {
- struct tbl_bucket **l2 = l1[i];
+ for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) {
+ struct tbl_bucket __rcu **l2 = l1[i];
unsigned int j;
- for (j = 0; j < TBL_L1_SIZE; j++) {
- struct tbl_bucket *bucket = l2[j];
+ for (j = 0; j < TBL_L2_SIZE; j++) {
+ struct tbl_bucket *bucket = (struct tbl_bucket __force *)l2[j];
if (!bucket)
continue;
kfree(l1);
}
-static struct tbl_bucket ***alloc_buckets(unsigned int n_buckets)
+static struct tbl_bucket __rcu ***alloc_buckets(unsigned int n_buckets)
{
- struct tbl_bucket ***l1;
+ struct tbl_bucket __rcu ***l1;
unsigned int i;
- l1 = kmalloc((n_buckets >> TBL_L1_BITS) * sizeof(struct tbl_bucket **),
+ l1 = kmalloc((n_buckets >> TBL_L1_SHIFT) * sizeof(struct tbl_bucket **),
GFP_KERNEL);
if (!l1)
return NULL;
- for (i = 0; i < n_buckets >> TBL_L1_BITS; i++) {
- l1[i] = (struct tbl_bucket **)get_zeroed_page(GFP_KERNEL);
+ for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) {
+ l1[i] = (struct tbl_bucket __rcu **)get_zeroed_page(GFP_KERNEL);
if (!l1[i]) {
- free_buckets(l1, i << TBL_L1_BITS, NULL);
+ free_buckets(l1, i << TBL_L1_SHIFT, NULL);
return NULL;
}
}
* @n_buckets: number of buckets in the new table
*
* Creates and returns a new hash table, or %NULL if memory cannot be
- * allocated. @n_buckets must be a power of 2 in the range %TBL_L1_SIZE to
+ * allocated. @n_buckets must be a power of 2 in the range %TBL_MIN_BUCKETS to
* %TBL_MAX_BUCKETS.
*/
struct tbl *tbl_create(unsigned int n_buckets)
{
struct tbl *table;
- if (!n_buckets)
- n_buckets = TBL_L1_SIZE;
-
- table = kzalloc(sizeof *table, GFP_KERNEL);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
goto err;
call_rcu(&table->rcu, destroy_table_rcu);
}
-static struct tbl_bucket **find_bucket(struct tbl *table, u32 hash)
+static struct tbl_bucket __rcu **find_bucket(struct tbl *table, u32 hash)
{
unsigned int l1 = (hash & (table->n_buckets - 1)) >> TBL_L1_SHIFT;
unsigned int l2 = hash & ((1 << TBL_L2_BITS) - 1);
struct tbl_node *tbl_lookup(struct tbl *table, void *target, u32 hash,
int (*cmp)(const struct tbl_node *, void *))
{
- struct tbl_bucket **bucketp = find_bucket(table, hash);
- struct tbl_bucket *bucket = rcu_dereference(*bucketp);
+ struct tbl_bucket __rcu **bucketp = find_bucket(table, hash);
+ struct tbl_bucket *bucket = get_bucket(*bucketp);
int index;
if (!bucket)
int tbl_foreach(struct tbl *table,
int (*callback)(struct tbl_node *, void *aux), void *aux)
{
- unsigned int i, j, k;
- for (i = 0; i < table->n_buckets >> TBL_L1_BITS; i++) {
- struct tbl_bucket **l2 = table->buckets[i];
- for (j = 0; j < TBL_L1_SIZE; j++) {
- struct tbl_bucket *bucket = rcu_dereference(l2[j]);
+ unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT;
+ unsigned int l1_idx;
+
+ for (l1_idx = 0; l1_idx < n_l1; l1_idx++) {
+ struct tbl_bucket __rcu **l2 = table->buckets[l1_idx];
+ unsigned int l2_idx;
+
+ for (l2_idx = 0; l2_idx < TBL_L2_SIZE; l2_idx++) {
+ struct tbl_bucket *bucket;
+ unsigned int i;
+
+ bucket = get_bucket(l2[l2_idx]);
if (!bucket)
continue;
- for (k = 0; k < bucket->n_objs; k++) {
- int error = (*callback)(bucket->objs[k], aux);
+ for (i = 0; i < bucket->n_objs; i++) {
+ int error = (*callback)(bucket->objs[i], aux);
if (error)
return error;
}
return 0;
}
+/**
+ * tbl_next - find next node in hash table
+ * @table: table to iterate
+ * @bucketp: On entry, hash value of bucket to start from. On exit, updated
+ * to bucket to start from on next call.
+ * @objp: On entry, index to start from within first bucket. On exit, updated
+ * to index to start from on next call.
+ *
+ * Returns the next node in @table in hash order, or %NULL when no nodes remain
+ * in the hash table.
+ *
+ * On entry, uses the values that @bucketp and @objp reference to determine
+ * where to begin iteration. Use 0 for both values to begin a new iteration.
+ * On exit, stores the values to pass on the next iteration into @bucketp and
+ * @objp's referents.
+ */
+struct tbl_node *tbl_next(struct tbl *table, u32 *bucketp, u32 *objp)
+{
+ unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT;
+ u32 s_l1_idx = *bucketp >> TBL_L1_SHIFT;
+ u32 s_l2_idx = *bucketp & (TBL_L2_SIZE - 1);
+ u32 s_obj = *objp;
+ unsigned int l1_idx;
+
+ for (l1_idx = s_l1_idx; l1_idx < n_l1; l1_idx++) {
+ struct tbl_bucket __rcu **l2 = table->buckets[l1_idx];
+ unsigned int l2_idx;
+
+ for (l2_idx = s_l2_idx; l2_idx < TBL_L2_SIZE; l2_idx++) {
+ struct tbl_bucket *bucket;
+
+ bucket = get_bucket_protected(l2[l2_idx]);
+ if (bucket && s_obj < bucket->n_objs) {
+ *bucketp = (l1_idx << TBL_L1_SHIFT) + l2_idx;
+ *objp = s_obj + 1;
+ return bucket->objs[s_obj];
+ }
+
+ s_obj = 0;
+ }
+ s_l2_idx = 0;
+ }
+ *bucketp = 0;
+ *objp = 0;
+ return NULL;
+}
+
static int insert_table_flow(struct tbl_node *node, void *new_table_)
{
struct tbl *new_table = new_table_;
}
err = -ENOMEM;
- new_table = tbl_create(n_buckets);
+ new_table = tbl_create(TBL_MIN_BUCKETS);
if (!new_table)
goto error;
*/
int tbl_insert(struct tbl *table, struct tbl_node *target, u32 hash)
{
- struct tbl_bucket **oldp = find_bucket(table, hash);
- struct tbl_bucket *old = rcu_dereference(*oldp);
+ struct tbl_bucket __rcu **oldp = find_bucket(table, hash);
+ struct tbl_bucket *old = get_bucket_protected(*oldp);
unsigned int n = old ? old->n_objs : 0;
struct tbl_bucket *new = bucket_alloc(n + 1);
*/
int tbl_remove(struct tbl *table, struct tbl_node *target)
{
- struct tbl_bucket **oldp = find_bucket(table, target->hash);
- struct tbl_bucket *old = rcu_dereference(*oldp);
+ struct tbl_bucket __rcu **oldp = find_bucket(table, target->hash);
+ struct tbl_bucket *old = get_bucket_protected(*oldp);
unsigned int n = old->n_objs;
struct tbl_bucket *new;