#include <linux/rcupdate.h>
#include <linux/slab.h>
-#include <linux/list.h>
+#include <linux/rculist.h>
struct sw_table_linear {
struct sw_table swt;
- spinlock_t lock;
unsigned int max_flows;
- atomic_t n_flows;
+ unsigned int n_flows;
struct list_head flows;
+ struct list_head iter_flows;
+ unsigned long int next_serial;
};
static struct sw_flow *table_linear_lookup(struct sw_table *swt,
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
struct sw_flow *flow;
- list_for_each_entry_rcu (flow, &tl->flows, u.node) {
- if (flow_matches(&flow->key, key))
+ list_for_each_entry_rcu (flow, &tl->flows, node) {
+ if (flow_matches_1wild(key, &flow->key))
return flow;
}
return NULL;
static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
- unsigned long int flags;
struct sw_flow *f;
- /* Replace flows that match exactly. */
- spin_lock_irqsave(&tl->lock, flags);
- list_for_each_entry_rcu (f, &tl->flows, u.node) {
- if (f->key.wildcards == flow->key.wildcards
- && flow_matches(&f->key, &flow->key)
- && flow_del(f)) {
- list_replace_rcu(&f->u.node, &flow->u.node);
- spin_unlock_irqrestore(&tl->lock, flags);
+
+ /* Loop through the existing list of entries. New entries will
+ * always be placed behind those with equal priority. Just replace
+ * any flows that match exactly.
+ */
+ list_for_each_entry (f, &tl->flows, node) {
+ if (f->priority == flow->priority
+ && f->key.wildcards == flow->key.wildcards
+ && flow_matches_2wild(&f->key, &flow->key)) {
+ flow->serial = f->serial;
+ list_replace_rcu(&f->node, &flow->node);
+ list_replace_rcu(&f->iter_node, &flow->iter_node);
flow_deferred_free(f);
return 1;
}
+
+ if (f->priority < flow->priority)
+ break;
}
- /* Table overflow? */
- if (atomic_read(&tl->n_flows) >= tl->max_flows) {
- spin_unlock_irqrestore(&tl->lock, flags);
+ /* Make sure there's room in the table. */
+ if (tl->n_flows >= tl->max_flows) {
return 0;
}
- atomic_inc(&tl->n_flows);
+ tl->n_flows++;
- /* FIXME: need to order rules from most to least specific. */
- list_add_rcu(&flow->u.node, &tl->flows);
- spin_unlock_irqrestore(&tl->lock, flags);
+ /* Insert the entry immediately in front of where we're pointing. */
+ flow->serial = tl->next_serial++;
+ list_add_tail_rcu(&flow->node, &f->node);
+ list_add_rcu(&flow->iter_node, &tl->iter_flows);
return 1;
}
-static int do_delete(struct sw_table *swt, struct sw_flow *flow)
+static int table_linear_modify(struct sw_table *swt,
+ const struct sw_flow_key *key, uint16_t priority, int strict,
+ const struct ofp_action_header *actions, size_t actions_len)
{
- if (flow_del(flow)) {
- list_del_rcu(&flow->u.node);
- flow_deferred_free(flow);
- return 1;
+ struct sw_table_linear *tl = (struct sw_table_linear *) swt;
+ struct sw_flow *flow;
+ unsigned int count = 0;
+
+ list_for_each_entry (flow, &tl->flows, node) {
+ if (flow_matches_desc(&flow->key, key, strict)
+ && (!strict || (flow->priority == priority))) {
+ flow_replace_acts(flow, actions, actions_len);
+ count++;
+ }
}
- return 0;
+ return count;
+}
+
+static int do_delete(struct sw_table *swt, struct sw_flow *flow)
+{
+ list_del_rcu(&flow->node);
+ list_del_rcu(&flow->iter_node);
+ flow_deferred_free(flow);
+ return 1;
}
static int table_linear_delete(struct sw_table *swt,
- const struct sw_flow_key *key, int strict)
+ const struct sw_flow_key *key, uint16_t priority, int strict)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
- struct list_head *pos, *n;
+ struct sw_flow *flow;
unsigned int count = 0;
- list_for_each_safe_rcu (pos, n, &tl->flows) {
- struct sw_flow *flow = list_entry(pos, struct sw_flow, u.node);
- if (flow_del_matches(&flow->key, key, strict))
+ list_for_each_entry (flow, &tl->flows, node) {
+ if (flow_matches_desc(&flow->key, key, strict)
+ && (!strict || (flow->priority == priority)))
count += do_delete(swt, flow);
}
- if (count)
- atomic_sub(count, &tl->n_flows);
+ tl->n_flows -= count;
return count;
}
static int table_linear_timeout(struct datapath *dp, struct sw_table *swt)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
- struct list_head *pos, *n;
+ struct sw_flow *flow;
int count = 0;
- list_for_each_safe_rcu (pos, n, &tl->flows) {
- struct sw_flow *flow = list_entry(pos, struct sw_flow, u.node);
- if (flow_timeout(flow)) {
+ mutex_lock(&dp_mutex);
+ list_for_each_entry (flow, &tl->flows, node) {
+ int reason = flow_timeout(flow);
+ if (reason >= 0) {
count += do_delete(swt, flow);
- if (dp->flags & OFPC_SEND_FLOW_EXP)
- dp_send_flow_expired(dp, flow);
+ dp_send_flow_expired(dp, flow, reason);
}
}
- if (count)
- atomic_sub(count, &tl->n_flows);
+ tl->n_flows -= count;
+ mutex_unlock(&dp_mutex);
return count;
}
while (!list_empty(&tl->flows)) {
struct sw_flow *flow = list_entry(tl->flows.next,
- struct sw_flow, u.node);
- list_del(&flow->u.node);
+ struct sw_flow, node);
+ list_del(&flow->node);
flow_free(flow);
}
kfree(tl);
}
-/* Linear table's private data is just a pointer to the table */
-
-static int table_linear_iterator(struct sw_table *swt,
- struct swt_iterator *swt_iter)
+static int table_linear_iterate(struct sw_table *swt,
+ const struct sw_flow_key *key,
+ struct sw_table_position *position,
+ int (*callback)(struct sw_flow *, void *),
+ void *private)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
-
- swt_iter->private = tl;
-
- if (atomic_read(&tl->n_flows) == 0)
- swt_iter->flow = NULL;
- else
- swt_iter->flow = list_entry(tl->flows.next,
- struct sw_flow, u.node);
-
- return 1;
-}
-
-static void table_linear_next(struct swt_iterator *swt_iter)
-{
- struct sw_table_linear *tl;
- struct list_head *next;
-
- if (swt_iter->flow == NULL)
- return;
-
- tl = (struct sw_table_linear *) swt_iter->private;
-
- next = swt_iter->flow->u.node.next;
- if (next == &tl->flows)
- swt_iter->flow = NULL;
- else
- swt_iter->flow = list_entry(next, struct sw_flow, u.node);
+ struct sw_flow *flow;
+ unsigned long start;
+
+ start = position->private[0];
+ list_for_each_entry (flow, &tl->iter_flows, iter_node) {
+ if (flow->serial >= start
+ && flow_matches_2wild(key, &flow->key)) {
+ int error = callback(flow, private);
+ if (error) {
+ position->private[0] = flow->serial;
+ return error;
+ }
+ }
+ }
+ return 0;
}
-static void table_linear_iterator_destroy(struct swt_iterator *swt_iter)
-{}
-
static void table_linear_stats(struct sw_table *swt,
struct sw_table_stats *stats)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
stats->name = "linear";
- stats->n_flows = atomic_read(&tl->n_flows);
+ stats->wildcards = OFPFW_ALL;
+ stats->n_flows = tl->n_flows;
stats->max_flows = tl->max_flows;
+ stats->n_lookup = swt->n_lookup;
+ stats->n_matched = swt->n_matched;
}
swt = &tl->swt;
swt->lookup = table_linear_lookup;
swt->insert = table_linear_insert;
+ swt->modify = table_linear_modify;
swt->delete = table_linear_delete;
swt->timeout = table_linear_timeout;
swt->destroy = table_linear_destroy;
+ swt->iterate = table_linear_iterate;
swt->stats = table_linear_stats;
- swt->iterator = table_linear_iterator;
- swt->iterator_next = table_linear_next;
- swt->iterator_destroy = table_linear_iterator_destroy;
-
tl->max_flows = max_flows;
- atomic_set(&tl->n_flows, 0);
+ tl->n_flows = 0;
INIT_LIST_HEAD(&tl->flows);
- spin_lock_init(&tl->lock);
+ INIT_LIST_HEAD(&tl->iter_flows);
+ tl->next_serial = 0;
return swt;
}