effd0c83ec41b5f133144bfdd5776aa9238d590e
[sliver-openvswitch.git] / datapath / flow.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "flow.h"
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <net/llc_pdu.h>
13 #include <linux/ip.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/llc.h>
17 #include <linux/module.h>
18 #include <linux/tcp.h>
19 #include <linux/udp.h>
20 #include <linux/icmp.h>
21 #include <linux/in.h>
22 #include <linux/rcupdate.h>
23 #include <net/ip.h>
24
25 #include "openflow/openflow.h"
26 #include "compat.h"
27
28 struct kmem_cache *flow_cache;
29
30 /* Internal function used to compare fields in flow. */
31 static inline
32 int flow_fields_match(const struct sw_flow_key *a, const struct sw_flow_key *b,
33                       uint32_t w, uint32_t src_mask, uint32_t dst_mask)
34 {
35         return ((w & OFPFW_IN_PORT || a->in_port == b->in_port)
36                 && (w & OFPFW_DL_VLAN || a->dl_vlan == b->dl_vlan)
37                 && (w & OFPFW_DL_SRC || !memcmp(a->dl_src, b->dl_src, ETH_ALEN))
38                 && (w & OFPFW_DL_DST || !memcmp(a->dl_dst, b->dl_dst, ETH_ALEN))
39                 && (w & OFPFW_DL_TYPE || a->dl_type == b->dl_type)
40                 && !((a->nw_src ^ b->nw_src) & src_mask)
41                 && !((a->nw_dst ^ b->nw_dst) & dst_mask)
42                 && (w & OFPFW_NW_PROTO || a->nw_proto == b->nw_proto)
43                 && (w & OFPFW_TP_SRC || a->tp_src == b->tp_src)
44                 && (w & OFPFW_TP_DST || a->tp_dst == b->tp_dst));
45 }
46
47 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
48  * modulo wildcards in 'b', zero otherwise. */
49 int flow_matches_1wild(const struct sw_flow_key *a,
50                        const struct sw_flow_key *b)
51 {
52         return flow_fields_match(a, b, b->wildcards,
53                                  b->nw_src_mask, b->nw_dst_mask);
54 }
55 EXPORT_SYMBOL(flow_matches_1wild);
56
57 /* Returns nonzero if 'a' and 'b' match, that is, if their fields are equal
58  * modulo wildcards in 'a' or 'b', zero otherwise. */
59 int flow_matches_2wild(const struct sw_flow_key *a,
60                        const struct sw_flow_key *b)
61 {
62         return flow_fields_match(a, b,
63                                  a->wildcards | b->wildcards,
64                                  a->nw_src_mask & b->nw_src_mask,
65                                  a->nw_dst_mask & b->nw_dst_mask);
66 }
67 EXPORT_SYMBOL(flow_matches_2wild);
68
69 /* Returns nonzero if 't' (the table entry's key) and 'd' (the key
70  * describing the match) match, that is, if their fields are
71  * equal modulo wildcards, zero otherwise.  If 'strict' is nonzero, the
72  * wildcards must match in both 't_key' and 'd_key'.  Note that the
73  * table's wildcards are ignored unless 'strict' is set. */
74 int flow_matches_desc(const struct sw_flow_key *t, const struct sw_flow_key *d, 
75                 int strict)
76 {
77         if (strict && d->wildcards != t->wildcards)
78                 return 0;
79         return flow_matches_1wild(t, d);
80 }
81 EXPORT_SYMBOL(flow_matches_desc);
82
83 static uint32_t make_nw_mask(int n_wild_bits)
84 {
85         n_wild_bits &= (1u << OFPFW_NW_SRC_BITS) - 1;
86         return n_wild_bits < 32 ? htonl(~((1u << n_wild_bits) - 1)) : 0;
87 }
88
89 void flow_extract_match(struct sw_flow_key* to, const struct ofp_match* from)
90 {
91         to->wildcards = ntohl(from->wildcards) & OFPFW_ALL;
92         to->pad = 0;
93         to->in_port = from->in_port;
94         to->dl_vlan = from->dl_vlan;
95         memcpy(to->dl_src, from->dl_src, ETH_ALEN);
96         memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
97         to->dl_type = from->dl_type;
98
99         to->nw_src = to->nw_dst = to->nw_proto = 0;
100         to->tp_src = to->tp_dst = 0;
101
102 #define OFPFW_TP (OFPFW_TP_SRC | OFPFW_TP_DST)
103 #define OFPFW_NW (OFPFW_NW_SRC_MASK | OFPFW_NW_DST_MASK | OFPFW_NW_PROTO)
104         if (to->wildcards & OFPFW_DL_TYPE) {
105                 /* Can't sensibly match on network or transport headers if the
106                  * data link type is unknown. */
107                 to->wildcards |= OFPFW_NW | OFPFW_TP;
108         } else if (from->dl_type == htons(ETH_P_IP)) {
109                 to->nw_src   = from->nw_src;
110                 to->nw_dst   = from->nw_dst;
111                 to->nw_proto = from->nw_proto;
112
113                 if (to->wildcards & OFPFW_NW_PROTO) {
114                         /* Can't sensibly match on transport headers if the
115                          * network protocol is unknown. */
116                         to->wildcards |= OFPFW_TP;
117                 } else if (from->nw_proto == IPPROTO_TCP
118                                 || from->nw_proto == IPPROTO_UDP
119                                 || from->nw_proto == IPPROTO_ICMP) {
120                         to->tp_src = from->tp_src;
121                         to->tp_dst = from->tp_dst;
122                 } else {
123                         /* Transport layer fields are undefined.  Mark them as
124                          * exact-match to allow such flows to reside in
125                          * table-hash, instead of falling into table-linear. */
126                         to->wildcards &= ~OFPFW_TP;
127                 }
128         } else {
129                 /* Network and transport layer fields are undefined.  Mark them
130                  * as exact-match to allow such flows to reside in table-hash,
131                  * instead of falling into table-linear. */
132                 to->wildcards &= ~(OFPFW_NW | OFPFW_TP);
133         }
134
135         /* We set these late because code above adjusts to->wildcards. */
136         to->nw_src_mask = make_nw_mask(to->wildcards >> OFPFW_NW_SRC_SHIFT);
137         to->nw_dst_mask = make_nw_mask(to->wildcards >> OFPFW_NW_DST_SHIFT);
138 }
139
140 void flow_fill_match(struct ofp_match* to, const struct sw_flow_key* from)
141 {
142         to->wildcards = htonl(from->wildcards);
143         to->in_port   = from->in_port;
144         to->dl_vlan   = from->dl_vlan;
145         memcpy(to->dl_src, from->dl_src, ETH_ALEN);
146         memcpy(to->dl_dst, from->dl_dst, ETH_ALEN);
147         to->dl_type   = from->dl_type;
148         to->nw_src    = from->nw_src;
149         to->nw_dst    = from->nw_dst;
150         to->nw_proto  = from->nw_proto;
151         to->tp_src    = from->tp_src;
152         to->tp_dst    = from->tp_dst;
153         to->pad       = 0;
154 }
155
156 int flow_timeout(struct sw_flow *flow)
157 {
158         if (flow->idle_timeout != OFP_FLOW_PERMANENT
159             && time_after(jiffies, flow->used + flow->idle_timeout * HZ))
160                 return OFPER_IDLE_TIMEOUT;
161         else if (flow->hard_timeout != OFP_FLOW_PERMANENT
162                  && time_after(jiffies,
163                                flow->init_time + flow->hard_timeout * HZ))
164                 return OFPER_HARD_TIMEOUT;
165         else
166                 return -1;
167 }
168 EXPORT_SYMBOL(flow_timeout);
169
170 /* Allocates and returns a new flow with room for 'actions_len' actions, 
171  * using allocation flags 'flags'.  Returns the new flow or a null pointer 
172  * on failure. */
173 struct sw_flow *flow_alloc(size_t actions_len, gfp_t flags)
174 {
175         struct sw_flow_actions *sfa;
176         size_t size = sizeof *sfa + actions_len;
177         struct sw_flow *flow = kmem_cache_alloc(flow_cache, flags);
178         if (unlikely(!flow))
179                 return NULL;
180
181         sfa = kmalloc(size, flags);
182         if (unlikely(!sfa)) {
183                 kmem_cache_free(flow_cache, flow);
184                 return NULL;
185         }
186         sfa->actions_len = actions_len;
187         flow->sf_acts = sfa;
188
189         return flow;
190 }
191
192 /* Frees 'flow' immediately. */
193 void flow_free(struct sw_flow *flow)
194 {
195         if (unlikely(!flow))
196                 return;
197         kfree(flow->sf_acts);
198         kmem_cache_free(flow_cache, flow);
199 }
200 EXPORT_SYMBOL(flow_free);
201
202 /* RCU callback used by flow_deferred_free. */
203 static void rcu_free_flow_callback(struct rcu_head *rcu)
204 {
205         struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
206         flow_free(flow);
207 }
208
209 /* Schedules 'flow' to be freed after the next RCU grace period.
210  * The caller must hold rcu_read_lock for this to be sensible. */
211 void flow_deferred_free(struct sw_flow *flow)
212 {
213         call_rcu(&flow->rcu, rcu_free_flow_callback);
214 }
215 EXPORT_SYMBOL(flow_deferred_free);
216
217 /* RCU callback used by flow_deferred_free_acts. */
218 static void rcu_free_acts_callback(struct rcu_head *rcu)
219 {
220         struct sw_flow_actions *sf_acts = container_of(rcu, 
221                         struct sw_flow_actions, rcu);
222         kfree(sf_acts);
223 }
224
225 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
226  * The caller must hold rcu_read_lock for this to be sensible. */
227 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
228 {
229         call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
230 }
231 EXPORT_SYMBOL(flow_deferred_free_acts);
232
233 /* Copies 'actions' into a newly allocated structure for use by 'flow'
234  * and safely frees the structure that defined the previous actions. */
235 void flow_replace_acts(struct sw_flow *flow, 
236                 const struct ofp_action_header *actions, size_t actions_len)
237 {
238         struct sw_flow_actions *sfa;
239         struct sw_flow_actions *orig_sfa = flow->sf_acts;
240         size_t size = sizeof *sfa + actions_len;
241
242         sfa = kmalloc(size, GFP_ATOMIC);
243         if (unlikely(!sfa))
244                 return;
245
246         sfa->actions_len = actions_len;
247         memcpy(sfa->actions, actions, actions_len);
248
249         rcu_assign_pointer(flow->sf_acts, sfa);
250         flow_deferred_free_acts(orig_sfa);
251
252         return;
253 }
254 EXPORT_SYMBOL(flow_replace_acts);
255
256 /* Prints a representation of 'key' to the kernel log. */
257 void print_flow(const struct sw_flow_key *key)
258 {
259         printk("wild%08x port%04x:vlan%04x mac%02x:%02x:%02x:%02x:%02x:%02x"
260                         "->%02x:%02x:%02x:%02x:%02x:%02x "
261                         "proto%04x ip%u.%u.%u.%u->%u.%u.%u.%u port%d->%d\n",
262                         key->wildcards, ntohs(key->in_port), ntohs(key->dl_vlan),
263                         key->dl_src[0], key->dl_src[1], key->dl_src[2],
264                         key->dl_src[3], key->dl_src[4], key->dl_src[5],
265                         key->dl_dst[0], key->dl_dst[1], key->dl_dst[2],
266                         key->dl_dst[3], key->dl_dst[4], key->dl_dst[5],
267                         ntohs(key->dl_type),
268                         ((unsigned char *)&key->nw_src)[0],
269                         ((unsigned char *)&key->nw_src)[1],
270                         ((unsigned char *)&key->nw_src)[2],
271                         ((unsigned char *)&key->nw_src)[3],
272                         ((unsigned char *)&key->nw_dst)[0],
273                         ((unsigned char *)&key->nw_dst)[1],
274                         ((unsigned char *)&key->nw_dst)[2],
275                         ((unsigned char *)&key->nw_dst)[3],
276                         ntohs(key->tp_src), ntohs(key->tp_dst));
277 }
278 EXPORT_SYMBOL(print_flow);
279
280 #define SNAP_OUI_LEN 3
281
282 struct eth_snap_hdr
283 {
284         struct ethhdr eth;
285         uint8_t  dsap;  /* Always 0xAA */
286         uint8_t  ssap;  /* Always 0xAA */
287         uint8_t  ctrl;
288         uint8_t  oui[SNAP_OUI_LEN];
289         uint16_t ethertype;
290 } __attribute__ ((packed));
291
292 static int is_snap(const struct eth_snap_hdr *esh)
293 {
294         return (esh->dsap == LLC_SAP_SNAP
295                 && esh->ssap == LLC_SAP_SNAP
296                 && !memcmp(esh->oui, "\0\0\0", 3));
297 }
298
299 static int iphdr_ok(struct sk_buff *skb)
300 {
301         int nh_ofs = skb_network_offset(skb);
302         if (skb->len >= nh_ofs + sizeof(struct iphdr)) {
303                 int ip_len = ip_hdrlen(skb);
304                 return (ip_len >= sizeof(struct iphdr)
305                         && pskb_may_pull(skb, nh_ofs + ip_len));
306         }
307         return 0;
308 }
309
310 static int tcphdr_ok(struct sk_buff *skb)
311 {
312         int th_ofs = skb_transport_offset(skb);
313         if (pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))) {
314                 int tcp_len = tcp_hdrlen(skb);
315                 return (tcp_len >= sizeof(struct tcphdr)
316                         && skb->len >= th_ofs + tcp_len);
317         }
318         return 0;
319 }
320
321 static int udphdr_ok(struct sk_buff *skb)
322 {
323         int th_ofs = skb_transport_offset(skb);
324         return pskb_may_pull(skb, th_ofs + sizeof(struct udphdr));
325 }
326
327 static int icmphdr_ok(struct sk_buff *skb)
328 {
329         int th_ofs = skb_transport_offset(skb);
330         return pskb_may_pull(skb, th_ofs + sizeof(struct icmphdr));
331 }
332
333 /* Parses the Ethernet frame in 'skb', which was received on 'in_port',
334  * and initializes 'key' to match.  Returns 1 if 'skb' contains an IP
335  * fragment, 0 otherwise. */
336 int flow_extract(struct sk_buff *skb, uint16_t in_port,
337                  struct sw_flow_key *key)
338 {
339         struct ethhdr *eth;
340         struct eth_snap_hdr *esh;
341         int retval = 0;
342         int nh_ofs;
343
344         memset(key, 0, sizeof *key);
345         key->dl_vlan = htons(OFP_VLAN_NONE);
346         key->in_port = htons(in_port);
347
348         if (skb->len < sizeof *eth)
349                 return 0;
350         if (!pskb_may_pull(skb, skb->len >= 64 ? 64 : skb->len)) {
351                 return 0;
352         }
353
354         skb_reset_mac_header(skb);
355         eth = eth_hdr(skb);
356         esh = (struct eth_snap_hdr *) eth;
357         nh_ofs = sizeof *eth;
358         if (likely(ntohs(eth->h_proto) >= OFP_DL_TYPE_ETH2_CUTOFF))
359                 key->dl_type = eth->h_proto;
360         else if (skb->len >= sizeof *esh && is_snap(esh)) {
361                 key->dl_type = esh->ethertype;
362                 nh_ofs = sizeof *esh;
363         } else {
364                 key->dl_type = htons(OFP_DL_TYPE_NOT_ETH_TYPE);
365                 if (skb->len >= nh_ofs + sizeof(struct llc_pdu_un)) {
366                         nh_ofs += sizeof(struct llc_pdu_un); 
367                 }
368         }
369
370         /* Check for a VLAN tag */
371         if (key->dl_type == htons(ETH_P_8021Q) &&
372             skb->len >= nh_ofs + sizeof(struct vlan_hdr)) {
373                 struct vlan_hdr *vh = (struct vlan_hdr*)(skb->data + nh_ofs);
374                 key->dl_type = vh->h_vlan_encapsulated_proto;
375                 key->dl_vlan = vh->h_vlan_TCI & htons(VLAN_VID_MASK);
376                 nh_ofs += sizeof(struct vlan_hdr);
377         }
378         memcpy(key->dl_src, eth->h_source, ETH_ALEN);
379         memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
380         skb_set_network_header(skb, nh_ofs);
381
382         /* Network layer. */
383         if (key->dl_type == htons(ETH_P_IP) && iphdr_ok(skb)) {
384                 struct iphdr *nh = ip_hdr(skb);
385                 int th_ofs = nh_ofs + nh->ihl * 4;
386                 key->nw_src = nh->saddr;
387                 key->nw_dst = nh->daddr;
388                 key->nw_proto = nh->protocol;
389                 skb_set_transport_header(skb, th_ofs);
390
391                 /* Transport layer. */
392                 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) {
393                         if (key->nw_proto == IPPROTO_TCP) {
394                                 if (tcphdr_ok(skb)) {
395                                         struct tcphdr *tcp = tcp_hdr(skb);
396                                         key->tp_src = tcp->source;
397                                         key->tp_dst = tcp->dest;
398                                 } else {
399                                         /* Avoid tricking other code into
400                                          * thinking that this packet has an L4
401                                          * header. */
402                                         key->nw_proto = 0;
403                                 }
404                         } else if (key->nw_proto == IPPROTO_UDP) {
405                                 if (udphdr_ok(skb)) {
406                                         struct udphdr *udp = udp_hdr(skb);
407                                         key->tp_src = udp->source;
408                                         key->tp_dst = udp->dest;
409                                 } else {
410                                         /* Avoid tricking other code into
411                                          * thinking that this packet has an L4
412                                          * header. */
413                                         key->nw_proto = 0;
414                                 }
415                         } else if (key->nw_proto == IPPROTO_ICMP) {
416                                 if (icmphdr_ok(skb)) {
417                                         struct icmphdr *icmp = icmp_hdr(skb);
418                                         /* The ICMP type and code fields use the 16-bit
419                                          * transport port fields, so we need to store them
420                                          * in 16-bit network byte order. */
421                                         key->icmp_type = htons(icmp->type);
422                                         key->icmp_code = htons(icmp->code);
423                                 } else {
424                                         /* Avoid tricking other code into
425                                          * thinking that this packet has an L4
426                                          * header. */
427                                         key->nw_proto = 0;
428                                 }
429                         }
430                 } else {
431                         retval = 1;
432                 }
433         } else {
434                 skb_reset_transport_header(skb);
435         }
436         return retval;
437 }
438
439 /* Initializes the flow module.
440  * Returns zero if successful or a negative error code. */
441 int flow_init(void)
442 {
443         flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
444                                         0, NULL);
445         if (flow_cache == NULL)
446                 return -ENOMEM;
447
448         return 0;
449 }
450
451 /* Uninitializes the flow module. */
452 void flow_exit(void)
453 {
454         kmem_cache_destroy(flow_cache);
455 }
456