struct sw_table_hash {
struct sw_table swt;
- spinlock_t lock;
struct crc32 crc32;
- atomic_t n_flows;
+ unsigned int n_flows;
unsigned int bucket_mask; /* Number of buckets minus 1. */
struct sw_flow **buckets;
};
{
struct sw_table_hash *th = (struct sw_table_hash *) swt;
struct sw_flow **bucket;
- unsigned long int flags;
int retval;
if (flow->key.wildcards != 0)
return 0;
- spin_lock_irqsave(&th->lock, flags);
bucket = find_bucket(swt, &flow->key);
if (*bucket == NULL) {
- atomic_inc(&th->n_flows);
+ th->n_flows++;
rcu_assign_pointer(*bucket, flow);
retval = 1;
} else {
struct sw_flow *old_flow = *bucket;
- if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)
- && flow_del(old_flow)) {
+ if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)) {
rcu_assign_pointer(*bucket, flow);
flow_deferred_free(old_flow);
retval = 1;
retval = 0;
}
}
- spin_unlock_irqrestore(&th->lock, flags);
return retval;
}
/* Caller must update n_flows. */
static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- rcu_assign_pointer(*bucket, NULL);
- flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ rcu_assign_pointer(*bucket, NULL);
+ flow_deferred_free(flow);
+ return 1;
}
/* Returns number of deleted flows. We can ignore the priority
count += do_delete(bucket, flow);
}
}
- if (count)
- atomic_sub(count, &th->n_flows);
+ th->n_flows -= count;
return count;
}
unsigned int i;
int count = 0;
+ mutex_lock(&dp_mutex);
for (i = 0; i <= th->bucket_mask; i++) {
struct sw_flow **bucket = &th->buckets[i];
struct sw_flow *flow = *bucket;
- if (flow && flow_timeout(flow)) {
- count += do_delete(bucket, flow);
- if (dp->flags & OFPC_SEND_FLOW_EXP)
- dp_send_flow_expired(dp, flow);
+ if (flow) {
+ int reason = flow_timeout(flow);
+ if (reason >= 0) {
+ count += do_delete(bucket, flow);
+ dp_send_flow_expired(dp, flow, reason);
+ }
}
}
+ th->n_flows -= count;
+ mutex_unlock(&dp_mutex);
- if (count)
- atomic_sub(count, &th->n_flows);
return count;
}
return 0;
if (key->wildcards == 0) {
- struct sw_flow *flow = table_hash_lookup(swt, key);
- position->private[0] = -1;
- return flow ? callback(flow, private) : 0;
+ struct sw_flow *flow;
+ int error;
+
+ flow = table_hash_lookup(swt, key);
+ if (!flow)
+ return 0;
+
+ error = callback(flow, private);
+ if (!error)
+ position->private[0] = -1;
+ return error;
} else {
int i;
for (i = position->private[0]; i <= th->bucket_mask; i++) {
struct sw_flow *flow = th->buckets[i];
- if (flow && flow_matches(key, &flow->key)) {
+ if (flow && flow_matches_1wild(&flow->key, key)) {
int error = callback(flow, private);
if (error) {
- position->private[0] = i + 1;
+ position->private[0] = i;
return error;
}
}
{
struct sw_table_hash *th = (struct sw_table_hash *) swt;
stats->name = "hash";
- stats->n_flows = atomic_read(&th->n_flows);
+ stats->n_flows = th->n_flows;
stats->max_flows = th->bucket_mask + 1;
+ stats->n_matched = swt->n_matched;
}
struct sw_table *table_hash_create(unsigned int polynomial,
struct sw_table_hash *th;
struct sw_table *swt;
- th = kmalloc(sizeof *th, GFP_KERNEL);
+ th = kzalloc(sizeof *th, GFP_KERNEL);
if (th == NULL)
return NULL;
swt->iterate = table_hash_iterate;
swt->stats = table_hash_stats;
- spin_lock_init(&th->lock);
crc32_init(&th->crc32, polynomial);
- atomic_set(&th->n_flows, 0);
+ th->n_flows = 0;
return swt;
}
stats->name = "hash2";
stats->n_flows = substats[0].n_flows + substats[1].n_flows;
stats->max_flows = substats[0].max_flows + substats[1].max_flows;
+ stats->n_matched = swt->n_matched;
}
struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
struct sw_table_hash2 *t2;
struct sw_table *swt;
- t2 = kmalloc(sizeof *t2, GFP_KERNEL);
+ t2 = kzalloc(sizeof *t2, GFP_KERNEL);
if (t2 == NULL)
return NULL;