Fix openflow.h compilation with GCC 2.95.
[sliver-openvswitch.git] / datapath / table-linear.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "table.h"
8 #include "flow.h"
9 #include "datapath.h"
10
11 #include <linux/rcupdate.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14
15 struct sw_table_linear {
16         struct sw_table swt;
17
18         spinlock_t lock;
19         unsigned int max_flows;
20         atomic_t n_flows;
21         struct list_head flows;
22         struct list_head iter_flows;
23         unsigned long int next_serial;
24 };
25
26 static struct sw_flow *table_linear_lookup(struct sw_table *swt,
27                                          const struct sw_flow_key *key)
28 {
29         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
30         struct sw_flow *flow;
31         list_for_each_entry_rcu (flow, &tl->flows, node) {
32                 if (flow_matches(&flow->key, key))
33                         return flow;
34         }
35         return NULL;
36 }
37
38 static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
39 {
40         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
41         unsigned long int flags;
42         struct sw_flow *f;
43
44
45         /* Loop through the existing list of entries.  New entries will
46          * always be placed behind those with equal priority.  Just replace 
47          * any flows that match exactly.
48          */
49         spin_lock_irqsave(&tl->lock, flags);
50         list_for_each_entry_rcu (f, &tl->flows, node) {
51                 if (f->priority == flow->priority
52                                 && f->key.wildcards == flow->key.wildcards
53                                 && flow_matches(&f->key, &flow->key)
54                                 && flow_del(f)) {
55                         flow->serial = f->serial;
56                         list_replace_rcu(&f->node, &flow->node);
57                         list_replace_rcu(&f->iter_node, &flow->iter_node);
58                         spin_unlock_irqrestore(&tl->lock, flags);
59                         flow_deferred_free(f);
60                         return 1;
61                 }
62
63                 if (f->priority < flow->priority)
64                         break;
65         }
66
67         /* Make sure there's room in the table. */
68         if (atomic_read(&tl->n_flows) >= tl->max_flows) {
69                 spin_unlock_irqrestore(&tl->lock, flags);
70                 return 0;
71         }
72         atomic_inc(&tl->n_flows);
73
74         /* Insert the entry immediately in front of where we're pointing. */
75         list_add_tail_rcu(&flow->node, &f->node);
76         list_add_rcu(&flow->iter_node, &tl->iter_flows);
77         spin_unlock_irqrestore(&tl->lock, flags);
78         return 1;
79 }
80
81 static int do_delete(struct sw_table *swt, struct sw_flow *flow) 
82 {
83         if (flow_del(flow)) {
84                 list_del_rcu(&flow->node);
85                 list_del_rcu(&flow->iter_node);
86                 flow_deferred_free(flow);
87                 return 1;
88         }
89         return 0;
90 }
91
92 static int table_linear_delete(struct sw_table *swt,
93                                 const struct sw_flow_key *key, int strict)
94 {
95         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
96         struct list_head *pos, *n;
97         unsigned int count = 0;
98
99         list_for_each_safe_rcu (pos, n, &tl->flows) {
100                 struct sw_flow *flow = list_entry(pos, struct sw_flow, node);
101                 if (flow_del_matches(&flow->key, key, strict))
102                         count += do_delete(swt, flow);
103         }
104         if (count)
105                 atomic_sub(count, &tl->n_flows);
106         return count;
107 }
108
109 static int table_linear_timeout(struct datapath *dp, struct sw_table *swt)
110 {
111         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
112         struct list_head *pos, *n;
113         int count = 0;
114
115         list_for_each_safe_rcu (pos, n, &tl->flows) {
116                 struct sw_flow *flow = list_entry(pos, struct sw_flow, node);
117                 if (flow_timeout(flow)) {
118                         count += do_delete(swt, flow);
119                         if (dp->flags & OFPC_SEND_FLOW_EXP)
120                                 dp_send_flow_expired(dp, flow);
121                 }
122         }
123         if (count)
124                 atomic_sub(count, &tl->n_flows);
125         return count;
126 }
127
128 static void table_linear_destroy(struct sw_table *swt)
129 {
130         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
131
132         while (!list_empty(&tl->flows)) {
133                 struct sw_flow *flow = list_entry(tl->flows.next,
134                                                   struct sw_flow, node);
135                 list_del(&flow->node);
136                 flow_free(flow);
137         }
138         kfree(tl);
139 }
140
141 static int table_linear_iterate(struct sw_table *swt,
142                                 const struct sw_flow_key *key,
143                                 struct sw_table_position *position,
144                                 int (*callback)(struct sw_flow *, void *),
145                                 void *private)
146 {
147         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
148         struct sw_flow *flow;
149         unsigned long start;
150
151         start = ~position->private[0];
152         list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
153                 if (flow->serial <= start && flow_matches(key, &flow->key)) {
154                         int error = callback(flow, private);
155                         if (error) {
156                                 position->private[0] = ~(flow->serial - 1);
157                                 return error;
158                         }
159                 }
160         }
161         return 0;
162 }
163
164 static void table_linear_stats(struct sw_table *swt,
165                                 struct sw_table_stats *stats)
166 {
167         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
168         stats->name = "linear";
169         stats->n_flows = atomic_read(&tl->n_flows);
170         stats->max_flows = tl->max_flows;
171 }
172
173
174 struct sw_table *table_linear_create(unsigned int max_flows)
175 {
176         struct sw_table_linear *tl;
177         struct sw_table *swt;
178
179         tl = kzalloc(sizeof *tl, GFP_KERNEL);
180         if (tl == NULL)
181                 return NULL;
182
183         swt = &tl->swt;
184         swt->lookup = table_linear_lookup;
185         swt->insert = table_linear_insert;
186         swt->delete = table_linear_delete;
187         swt->timeout = table_linear_timeout;
188         swt->destroy = table_linear_destroy;
189         swt->iterate = table_linear_iterate;
190         swt->stats = table_linear_stats;
191
192         tl->max_flows = max_flows;
193         atomic_set(&tl->n_flows, 0);
194         INIT_LIST_HEAD(&tl->flows);
195         INIT_LIST_HEAD(&tl->iter_flows);
196         spin_lock_init(&tl->lock);
197         tl->next_serial = 0;
198
199         return swt;
200 }