Implement 802.1D Spanning Tree Protocol.
[sliver-openvswitch.git] / datapath / flow.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "flow.h"
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <net/llc_pdu.h>
13 #include <linux/ip.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/tcp.h>
18 #include <linux/udp.h>
19 #include <linux/in.h>
20 #include <linux/rcupdate.h>
21 #include <net/ip.h>
22
23 #include "openflow.h"
24 #include "compat.h"
25 #include "snap.h"
26
27 struct kmem_cache *flow_cache;
28
29 /* Internal function used to compare fields in flow. */
30 static inline
31 int flow_fields_match(const struct sw_flow_key *a, const struct sw_flow_key *b,
32                       uint32_t w, uint32_t src_mask, uint32_t dst_mask)
33 {
34         return ((w & OFPFW_IN_PORT || a->in_port == b->in_port)
35                 && (w & OFPFW_DL_VLAN || a->dl_vlan == b->dl_vlan)
36                 && (w & OFPFW_DL_SRC || !memcmp(a->dl_src, b->dl_src, ETH_ALEN))
37                 && (w & OFPFW_DL_DST || !memcmp(a->dl_dst, b->dl_dst, ETH_ALEN))
38                 && (w & OFPFW_DL_TYPE || a->dl_type == b->dl_type)
39                 && !((a->nw_src ^ b->nw_src) & src_mask)
40                 && !((a->nw_dst ^ b->nw_dst) & dst_mask)
41                 && (w & OFPFW_NW_PROTO || a->nw_proto == b->nw_proto)
42                 && (w & OFPFW_TP_SRC || a->tp_src == b->tp_src)
43                 && (w & OFPFW_TP_DST || a->tp_dst == b->tp_dst));
44 }
45
46 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
47  * modulo wildcards in 'b', zero otherwise. */
48 int flow_matches_1wild(const struct sw_flow_key *a,
49                        const struct sw_flow_key *b)
50 {
51         return flow_fields_match(a, b, b->wildcards,
52                                  b->nw_src_mask, b->nw_dst_mask);
53 }
54 EXPORT_SYMBOL(flow_matches_1wild);
55
56 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
57  * modulo wildcards in 'a' or 'b', zero otherwise. */
58 int flow_matches_2wild(const struct sw_flow_key *a,
59                        const struct sw_flow_key *b)
60 {
61         return flow_fields_match(a, b,
62                                  a->wildcards | b->wildcards,
63                                  a->nw_src_mask & b->nw_src_mask,
64                                  a->nw_dst_mask & b->nw_dst_mask);
65 }
66 EXPORT_SYMBOL(flow_matches_2wild);
67
68 /* Returns nonzero if 't' (the table entry's key) and 'd' (the key
69  * describing the deletion) match, that is, if their fields are
70  * equal modulo wildcards, zero otherwise.  If 'strict' is nonzero, the
71  * wildcards must match in both 't_key' and 'd_key'.  Note that the
72  * table's wildcards are ignored unless 'strict' is set. */
73 int flow_del_matches(const struct sw_flow_key *t, const struct sw_flow_key *d, int strict)
74 {
75         if (strict && d->wildcards != t->wildcards)
76                 return 0;
77         return flow_matches_1wild(t, d);
78 }
79 EXPORT_SYMBOL(flow_del_matches);
80
81 static uint32_t make_nw_mask(int n_wild_bits)
82 {
83         n_wild_bits &= (1u << OFPFW_NW_SRC_BITS) - 1;
84         return n_wild_bits < 32 ? htonl(~((1u << n_wild_bits) - 1)) : 0;
85 }
86
87 void flow_extract_match(struct sw_flow_key* to, const struct ofp_match* from)
88 {
89         to->wildcards = ntohl(from->wildcards) & OFPFW_ALL;
90         to->pad = 0;
91         to->in_port = from->in_port;
92         to->dl_vlan = from->dl_vlan;
93         memcpy(to->dl_src, from->dl_src, ETH_ALEN);
94         memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
95         to->dl_type = from->dl_type;
96
97         to->nw_src = to->nw_dst = to->nw_proto = 0;
98         to->tp_src = to->tp_dst = 0;
99
100 #define OFPFW_TP (OFPFW_TP_SRC | OFPFW_TP_DST)
101 #define OFPFW_NW (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_NW_PROTO)
102         if (to->wildcards & OFPFW_DL_TYPE) {
103                 /* Can't sensibly match on network or transport headers if the
104                  * data link type is unknown. */
105                 to->wildcards |= OFPFW_NW | OFPFW_TP;
106         } else if (from->dl_type == htons(ETH_P_IP)) {
107                 to->nw_src   = from->nw_src;
108                 to->nw_dst   = from->nw_dst;
109                 to->nw_proto = from->nw_proto;
110
111                 if (to->wildcards & OFPFW_NW_PROTO) {
112                         /* Can't sensibly match on transport headers if the
113                          * network protocol is unknown. */
114                         to->wildcards |= OFPFW_TP;
115                 } else if (from->nw_proto == IPPROTO_TCP
116                            || from->nw_proto == IPPROTO_UDP) {
117                         to->tp_src = from->tp_src;
118                         to->tp_dst = from->tp_dst;
119                 } else {
120                         /* Transport layer fields are undefined.  Mark them as
121                          * exact-match to allow such flows to reside in
122                          * table-hash, instead of falling into table-linear. */
123                         to->wildcards &= ~OFPFW_TP;
124                 }
125         } else {
126                 /* Network and transport layer fields are undefined.  Mark them
127                  * as exact-match to allow such flows to reside in table-hash,
128                  * instead of falling into table-linear. */
129                 to->wildcards &= ~(OFPFW_NW | OFPFW_TP);
130         }
131
132         /* We set these late because code above adjusts to->wildcards. */
133         to->nw_src_mask = make_nw_mask(to->wildcards >> OFPFW_NW_SRC_SHIFT);
134         to->nw_dst_mask = make_nw_mask(to->wildcards >> OFPFW_NW_DST_SHIFT);
135 }
136
137 void flow_fill_match(struct ofp_match* to, const struct sw_flow_key* from)
138 {
139         to->wildcards = htonl(from->wildcards);
140         to->in_port   = from->in_port;
141         to->dl_vlan   = from->dl_vlan;
142         memcpy(to->dl_src, from->dl_src, ETH_ALEN);
143         memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
144         to->dl_type   = from->dl_type;
145         to->nw_src        = from->nw_src;
146         to->nw_dst        = from->nw_dst;
147         to->nw_proto  = from->nw_proto;
148         to->tp_src        = from->tp_src;
149         to->tp_dst        = from->tp_dst;
150         to->pad           = 0;
151 }
152
153 int flow_timeout(struct sw_flow *flow)
154 {
155         if (flow->idle_timeout != OFP_FLOW_PERMANENT
156             && time_after(jiffies, flow->used + flow->idle_timeout * HZ))
157                 return OFPER_IDLE_TIMEOUT;
158         else if (flow->hard_timeout != OFP_FLOW_PERMANENT
159                  && time_after(jiffies,
160                                flow->init_time + flow->hard_timeout * HZ))
161                 return OFPER_HARD_TIMEOUT;
162         else
163                 return -1;
164 }
165 EXPORT_SYMBOL(flow_timeout);
166
167 /* Allocates and returns a new flow with 'n_actions' action, using allocation
168  * flags 'flags'.  Returns the new flow or a null pointer on failure. */
169 struct sw_flow *flow_alloc(int n_actions, gfp_t flags)
170 {
171         struct sw_flow *flow = kmem_cache_alloc(flow_cache, flags);
172         if (unlikely(!flow))
173                 return NULL;
174
175         flow->n_actions = n_actions;
176         flow->actions = kmalloc(n_actions * sizeof *flow->actions,
177                                 flags);
178         if (unlikely(!flow->actions) && n_actions > 0) {
179                 kmem_cache_free(flow_cache, flow);
180                 return NULL;
181         }
182         return flow;
183 }
184
185 /* Frees 'flow' immediately. */
186 void flow_free(struct sw_flow *flow)
187 {
188         if (unlikely(!flow))
189                 return;
190         kfree(flow->actions);
191         kmem_cache_free(flow_cache, flow);
192 }
193 EXPORT_SYMBOL(flow_free);
194
195 /* RCU callback used by flow_deferred_free. */
196 static void rcu_callback(struct rcu_head *rcu)
197 {
198         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
199         flow_free(flow);
200 }
201
202 /* Schedules 'flow' to be freed after the next RCU grace period.
203  * The caller must hold rcu_read_lock for this to be sensible. */
204 void flow_deferred_free(struct sw_flow *flow)
205 {
206         call_rcu(&flow->rcu, rcu_callback);
207 }
208 EXPORT_SYMBOL(flow_deferred_free);
209
210 /* Prints a representation of 'key' to the kernel log. */
211 void print_flow(const struct sw_flow_key *key)
212 {
213         printk("wild%08x port%04x:vlan%04x mac%02x:%02x:%02x:%02x:%02x:%02x"
214                         "->%02x:%02x:%02x:%02x:%02x:%02x "
215                         "proto%04x ip%u.%u.%u.%u->%u.%u.%u.%u port%d->%d\n",
216                         key->wildcards, ntohs(key->in_port), ntohs(key->dl_vlan),
217                         key->dl_src[0], key->dl_src[1], key->dl_src[2],
218                         key->dl_src[3], key->dl_src[4], key->dl_src[5],
219                         key->dl_dst[0], key->dl_dst[1], key->dl_dst[2],
220                         key->dl_dst[3], key->dl_dst[4], key->dl_dst[5],
221                         ntohs(key->dl_type),
222                         ((unsigned char *)&key->nw_src)[0],
223                         ((unsigned char *)&key->nw_src)[1],
224                         ((unsigned char *)&key->nw_src)[2],
225                         ((unsigned char *)&key->nw_src)[3],
226                         ((unsigned char *)&key->nw_dst)[0],
227                         ((unsigned char *)&key->nw_dst)[1],
228                         ((unsigned char *)&key->nw_dst)[2],
229                         ((unsigned char *)&key->nw_dst)[3],
230                         ntohs(key->tp_src), ntohs(key->tp_dst));
231 }
232 EXPORT_SYMBOL(print_flow);
233
234 static int tcphdr_ok(struct sk_buff *skb)
235 {
236         int th_ofs = skb_transport_offset(skb);
237         if (skb->len >= th_ofs + sizeof(struct tcphdr)) {
238                 int tcp_len = tcp_hdrlen(skb);
239                 return (tcp_len >= sizeof(struct tcphdr)
240                         && skb->len >= th_ofs + tcp_len);
241         }
242         return 0;
243 }
244
245 static int udphdr_ok(struct sk_buff *skb)
246 {
247         int th_ofs = skb_transport_offset(skb);
248         return skb->len >= th_ofs + sizeof(struct udphdr);
249 }
250
251 /* Parses the Ethernet frame in 'skb', which was received on 'in_port',
252  * and initializes 'key' to match.  Returns 1 if 'skb' contains an IP
253  * fragment, 0 otherwise. */
254 int flow_extract(struct sk_buff *skb, uint16_t in_port,
255                  struct sw_flow_key *key)
256 {
257         struct ethhdr *mac;
258         int nh_ofs, th_ofs;
259         int retval = 0;
260
261         key->in_port = htons(in_port);
262         key->pad = 0;
263         key->wildcards = 0;
264         key->nw_src_mask = 0;
265         key->nw_dst_mask = 0;
266
267         /* This code doesn't check that skb->len is long enough to contain the
268          * MAC or network header.  With a 46-byte minimum length frame this
269          * assumption is always correct. */
270
271         /* Doesn't verify checksums.  Should it? */
272
273         /* Data link layer.  We only support Ethernet. */
274         mac = eth_hdr(skb);
275         nh_ofs = sizeof(struct ethhdr);
276         if (likely(ntohs(mac->h_proto) >= OFP_DL_TYPE_ETH2_CUTOFF)) {
277                 /* This is an Ethernet II frame */
278                 key->dl_type = mac->h_proto;
279         } else {
280                 /* This is an 802.2 frame */
281                 if (snap_get_ethertype(skb, &key->dl_type) != -EINVAL) {
282                         nh_ofs += sizeof(struct snap_hdr);
283                 } else {
284                         key->dl_type = htons(OFP_DL_TYPE_NOT_ETH_TYPE);
285                         nh_ofs += sizeof(struct llc_pdu_un);
286                 }
287         }
288
289         /* Check for a VLAN tag */
290         if (likely(key->dl_type != htons(ETH_P_8021Q))) {
291                 key->dl_vlan = htons(OFP_VLAN_NONE);
292         } else {
293                 struct vlan_hdr *vh = (struct vlan_hdr *)(skb_mac_header(skb) + nh_ofs);
294                 key->dl_type = vh->h_vlan_encapsulated_proto;
295                 key->dl_vlan = vh->h_vlan_TCI & htons(VLAN_VID_MASK);
296                 nh_ofs += sizeof(*vh);
297         }
298         memcpy(key->dl_src, mac->h_source, ETH_ALEN);
299         memcpy(key->dl_dst, mac->h_dest, ETH_ALEN);
300         skb_set_network_header(skb, nh_ofs);
301
302         /* Network layer. */
303         if (likely(key->dl_type == htons(ETH_P_IP))) {
304                 struct iphdr *nh = ip_hdr(skb);
305                 key->nw_src = nh->saddr;
306                 key->nw_dst = nh->daddr;
307                 key->nw_proto = nh->protocol;
308                 th_ofs = nh_ofs + nh->ihl * 4;
309                 skb_set_transport_header(skb, th_ofs);
310
311                 /* Transport layer. */
312                 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) {
313                         if (key->nw_proto == IPPROTO_TCP) {
314                                 if (tcphdr_ok(skb)) {
315                                         struct tcphdr *tcp = tcp_hdr(skb);
316                                         key->tp_src = tcp->source;
317                                         key->tp_dst = tcp->dest;
318                                 } else {
319                                         /* Avoid tricking other code into
320                                          * thinking that this packet has an L4
321                                          * header. */
322                                         goto no_proto;
323                                 }
324                         } else if (key->nw_proto == IPPROTO_UDP) {
325                                 if (udphdr_ok(skb)) {
326                                         struct udphdr *udp = udp_hdr(skb);
327                                         key->tp_src = udp->source;
328                                         key->tp_dst = udp->dest;
329                                 } else {
330                                         /* Avoid tricking other code into
331                                          * thinking that this packet has an L4
332                                          * header. */
333                                         goto no_proto;
334                                 }
335                         } else {
336                                 goto no_th;
337                         }
338                 } else {
339                         retval = 1;
340                         goto no_th;
341                 }
342
343                 return 0;
344         }
345
346         key->nw_src = 0;
347         key->nw_dst = 0;
348
349 no_proto:
350         key->nw_proto = 0;
351
352 no_th:
353         key->tp_src = 0;
354         key->tp_dst = 0;
355         return retval;
356 }
357
358 /* Initializes the flow module.
359  * Returns zero if successful or a negative error code. */
360 int flow_init(void)
361 {
362         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
363                                         0, NULL);
364         if (flow_cache == NULL)
365                 return -ENOMEM;
366
367         return 0;
368 }
369
370 /* Uninitializes the flow module. */
371 void flow_exit(void)
372 {
373         kmem_cache_destroy(flow_cache);
374 }
375