a82b63ebf6a4ee444374b364df0f344d66efba6f
[sliver-openvswitch.git] / datapath / table-linear.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "table.h"
8 #include "flow.h"
9 #include "datapath.h"
10
11 #include <linux/rcupdate.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14
15 struct sw_table_linear {
16         struct sw_table swt;
17
18         spinlock_t lock;
19         unsigned int max_flows;
20         atomic_t n_flows;
21         struct list_head flows;
22 };
23
24 static struct sw_flow *table_linear_lookup(struct sw_table *swt,
25                                          const struct sw_flow_key *key)
26 {
27         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
28         struct sw_flow *flow;
29         list_for_each_entry_rcu (flow, &tl->flows, u.node) {
30                 if (flow_matches(&flow->key, key))
31                         return flow;
32         }
33         return NULL;
34 }
35
36 static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
37 {
38         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
39         unsigned long int flags;
40         struct sw_flow *f;
41
42
43         /* Loop through the existing list of entries.  New entries will
44          * always be placed behind those with equal priority.  Just replace 
45          * any flows that match exactly.
46          */
47         spin_lock_irqsave(&tl->lock, flags);
48         list_for_each_entry_rcu (f, &tl->flows, u.node) {
49                 if (f->priority == flow->priority
50                                 && f->key.wildcards == flow->key.wildcards
51                                 && flow_matches(&f->key, &flow->key)
52                                 && flow_del(f)) {
53                         list_replace_rcu(&f->u.node, &flow->u.node);
54                         spin_unlock_irqrestore(&tl->lock, flags);
55                         flow_deferred_free(f);
56                         return 1;
57                 }
58
59                 if (f->priority < flow->priority)
60                         break;
61         }
62
63         /* Make sure there's room in the table. */
64         if (atomic_read(&tl->n_flows) >= tl->max_flows) {
65                 spin_unlock_irqrestore(&tl->lock, flags);
66                 return 0;
67         }
68         atomic_inc(&tl->n_flows);
69
70         /* Insert the entry immediately in front of where we're pointing. */
71         list_add_tail_rcu(&flow->u.node, &f->u.node);
72         spin_unlock_irqrestore(&tl->lock, flags);
73         return 1;
74 }
75
76 static int do_delete(struct sw_table *swt, struct sw_flow *flow) 
77 {
78         if (flow_del(flow)) {
79                 list_del_rcu(&flow->u.node);
80                 flow_deferred_free(flow);
81                 return 1;
82         }
83         return 0;
84 }
85
86 static int table_linear_delete(struct sw_table *swt,
87                                 const struct sw_flow_key *key, int strict)
88 {
89         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
90         struct list_head *pos, *n;
91         unsigned int count = 0;
92
93         list_for_each_safe_rcu (pos, n, &tl->flows) {
94                 struct sw_flow *flow = list_entry(pos, struct sw_flow, u.node);
95                 if (flow_del_matches(&flow->key, key, strict))
96                         count += do_delete(swt, flow);
97         }
98         if (count)
99                 atomic_sub(count, &tl->n_flows);
100         return count;
101 }
102
103 static int table_linear_timeout(struct datapath *dp, struct sw_table *swt)
104 {
105         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
106         struct list_head *pos, *n;
107         int count = 0;
108
109         list_for_each_safe_rcu (pos, n, &tl->flows) {
110                 struct sw_flow *flow = list_entry(pos, struct sw_flow, u.node);
111                 if (flow_timeout(flow)) {
112                         count += do_delete(swt, flow);
113                         if (dp->flags & OFPC_SEND_FLOW_EXP)
114                                 dp_send_flow_expired(dp, flow);
115                 }
116         }
117         if (count)
118                 atomic_sub(count, &tl->n_flows);
119         return count;
120 }
121
122 static void table_linear_destroy(struct sw_table *swt)
123 {
124         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
125
126         while (!list_empty(&tl->flows)) {
127                 struct sw_flow *flow = list_entry(tl->flows.next,
128                                                   struct sw_flow, u.node);
129                 list_del(&flow->u.node);
130                 flow_free(flow);
131         }
132         kfree(tl);
133 }
134
135 /* Linear table's private data is just a pointer to the table */
136
137 static int table_linear_iterator(struct sw_table *swt,
138                                  struct swt_iterator *swt_iter) 
139 {
140         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
141
142         swt_iter->private = tl;
143
144         if (atomic_read(&tl->n_flows) == 0)
145                 swt_iter->flow = NULL;
146         else
147                 swt_iter->flow = list_entry(tl->flows.next,
148                                 struct sw_flow, u.node);
149
150         return 1;
151 }
152
153 static void table_linear_next(struct swt_iterator *swt_iter)
154 {
155         struct sw_table_linear *tl;
156         struct list_head *next;
157
158         if (swt_iter->flow == NULL)
159                 return;
160
161         tl = (struct sw_table_linear *) swt_iter->private;
162
163         next = swt_iter->flow->u.node.next;
164         if (next == &tl->flows)
165                 swt_iter->flow = NULL;
166         else
167                 swt_iter->flow = list_entry(next, struct sw_flow, u.node);
168 }
169
170 static void table_linear_iterator_destroy(struct swt_iterator *swt_iter)
171 {}
172
173 static void table_linear_stats(struct sw_table *swt,
174                                 struct sw_table_stats *stats)
175 {
176         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
177         stats->name = "linear";
178         stats->n_flows = atomic_read(&tl->n_flows);
179         stats->max_flows = tl->max_flows;
180 }
181
182
183 struct sw_table *table_linear_create(unsigned int max_flows)
184 {
185         struct sw_table_linear *tl;
186         struct sw_table *swt;
187
188         tl = kzalloc(sizeof *tl, GFP_KERNEL);
189         if (tl == NULL)
190                 return NULL;
191
192         swt = &tl->swt;
193         swt->lookup = table_linear_lookup;
194         swt->insert = table_linear_insert;
195         swt->delete = table_linear_delete;
196         swt->timeout = table_linear_timeout;
197         swt->destroy = table_linear_destroy;
198         swt->stats = table_linear_stats;
199
200                 swt->iterator = table_linear_iterator;
201         swt->iterator_next = table_linear_next;
202         swt->iterator_destroy = table_linear_iterator_destroy;
203
204         tl->max_flows = max_flows;
205         atomic_set(&tl->n_flows, 0);
206         INIT_LIST_HEAD(&tl->flows);
207         spin_lock_init(&tl->lock);
208
209         return swt;
210 }