- On flow entries with wildcards, match priority field when doing a "strict" delete.
[sliver-openvswitch.git] / datapath / table-linear.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "table.h"
8 #include "flow.h"
9 #include "datapath.h"
10
11 #include <linux/rcupdate.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14
15 struct sw_table_linear {
16         struct sw_table swt;
17
18         spinlock_t lock;
19         unsigned int max_flows;
20         atomic_t n_flows;
21         struct list_head flows;
22         struct list_head iter_flows;
23         unsigned long int next_serial;
24 };
25
26 static struct sw_flow *table_linear_lookup(struct sw_table *swt,
27                                          const struct sw_flow_key *key)
28 {
29         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
30         struct sw_flow *flow;
31         list_for_each_entry_rcu (flow, &tl->flows, node) {
32                 if (flow_matches(&flow->key, key))
33                         return flow;
34         }
35         return NULL;
36 }
37
38 static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
39 {
40         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
41         unsigned long int flags;
42         struct sw_flow *f;
43
44
45         /* Loop through the existing list of entries.  New entries will
46          * always be placed behind those with equal priority.  Just replace 
47          * any flows that match exactly.
48          */
49         spin_lock_irqsave(&tl->lock, flags);
50         list_for_each_entry_rcu (f, &tl->flows, node) {
51                 if (f->priority == flow->priority
52                                 && f->key.wildcards == flow->key.wildcards
53                                 && flow_matches(&f->key, &flow->key)
54                                 && flow_del(f)) {
55                         flow->serial = f->serial;
56                         list_replace_rcu(&f->node, &flow->node);
57                         list_replace_rcu(&f->iter_node, &flow->iter_node);
58                         spin_unlock_irqrestore(&tl->lock, flags);
59                         flow_deferred_free(f);
60                         return 1;
61                 }
62
63                 if (f->priority < flow->priority)
64                         break;
65         }
66
67         /* Make sure there's room in the table. */
68         if (atomic_read(&tl->n_flows) >= tl->max_flows) {
69                 spin_unlock_irqrestore(&tl->lock, flags);
70                 return 0;
71         }
72         atomic_inc(&tl->n_flows);
73
74         /* Insert the entry immediately in front of where we're pointing. */
75         list_add_tail_rcu(&flow->node, &f->node);
76         list_add_rcu(&flow->iter_node, &tl->iter_flows);
77         spin_unlock_irqrestore(&tl->lock, flags);
78         return 1;
79 }
80
81 static int do_delete(struct sw_table *swt, struct sw_flow *flow) 
82 {
83         if (flow_del(flow)) {
84                 list_del_rcu(&flow->node);
85                 list_del_rcu(&flow->iter_node);
86                 flow_deferred_free(flow);
87                 return 1;
88         }
89         return 0;
90 }
91
92 static int table_linear_delete(struct sw_table *swt,
93                                 const struct sw_flow_key *key, uint16_t priority, int strict)
94 {
95         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
96         struct list_head *pos, *n;
97         unsigned int count = 0;
98
99         list_for_each_safe_rcu (pos, n, &tl->flows) {
100                 struct sw_flow *flow = list_entry(pos, struct sw_flow, node);
101                 if (flow_del_matches(&flow->key, key, strict)
102                                 && (strict && (flow->priority == priority)))
103                         count += do_delete(swt, flow);
104         }
105         if (count)
106                 atomic_sub(count, &tl->n_flows);
107         return count;
108 }
109
110 static int table_linear_timeout(struct datapath *dp, struct sw_table *swt)
111 {
112         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
113         struct list_head *pos, *n;
114         int count = 0;
115
116         list_for_each_safe_rcu (pos, n, &tl->flows) {
117                 struct sw_flow *flow = list_entry(pos, struct sw_flow, node);
118                 if (flow_timeout(flow)) {
119                         count += do_delete(swt, flow);
120                         if (dp->flags & OFPC_SEND_FLOW_EXP)
121                                 dp_send_flow_expired(dp, flow);
122                 }
123         }
124         if (count)
125                 atomic_sub(count, &tl->n_flows);
126         return count;
127 }
128
129 static void table_linear_destroy(struct sw_table *swt)
130 {
131         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
132
133         while (!list_empty(&tl->flows)) {
134                 struct sw_flow *flow = list_entry(tl->flows.next,
135                                                   struct sw_flow, node);
136                 list_del(&flow->node);
137                 flow_free(flow);
138         }
139         kfree(tl);
140 }
141
142 static int table_linear_iterate(struct sw_table *swt,
143                                 const struct sw_flow_key *key,
144                                 struct sw_table_position *position,
145                                 int (*callback)(struct sw_flow *, void *),
146                                 void *private)
147 {
148         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
149         struct sw_flow *flow;
150         unsigned long start;
151
152         start = ~position->private[0];
153         list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
154                 if (flow->serial <= start && flow_matches(key, &flow->key)) {
155                         int error = callback(flow, private);
156                         if (error) {
157                                 position->private[0] = ~(flow->serial - 1);
158                                 return error;
159                         }
160                 }
161         }
162         return 0;
163 }
164
165 static void table_linear_stats(struct sw_table *swt,
166                                 struct sw_table_stats *stats)
167 {
168         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
169         stats->name = "linear";
170         stats->n_flows = atomic_read(&tl->n_flows);
171         stats->max_flows = tl->max_flows;
172 }
173
174
175 struct sw_table *table_linear_create(unsigned int max_flows)
176 {
177         struct sw_table_linear *tl;
178         struct sw_table *swt;
179
180         tl = kzalloc(sizeof *tl, GFP_KERNEL);
181         if (tl == NULL)
182                 return NULL;
183
184         swt = &tl->swt;
185         swt->lookup = table_linear_lookup;
186         swt->insert = table_linear_insert;
187         swt->delete = table_linear_delete;
188         swt->timeout = table_linear_timeout;
189         swt->destroy = table_linear_destroy;
190         swt->iterate = table_linear_iterate;
191         swt->stats = table_linear_stats;
192
193         tl->max_flows = max_flows;
194         atomic_set(&tl->n_flows, 0);
195         INIT_LIST_HEAD(&tl->flows);
196         INIT_LIST_HEAD(&tl->iter_flows);
197         spin_lock_init(&tl->lock);
198         tl->next_serial = 0;
199
200         return swt;
201 }