datapath: Use vlan acceleration for vlan operations.
[sliver-openvswitch.git] / datapath / actions.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for executing flow actions. */
10
11 #include <linux/skbuff.h>
12 #include <linux/in.h>
13 #include <linux/ip.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22
23 #include "actions.h"
24 #include "checksum.h"
25 #include "datapath.h"
26 #include "openvswitch/datapath-protocol.h"
27 #include "vlan.h"
28 #include "vport.h"
29
30 static int do_execute_actions(struct datapath *, struct sk_buff *,
31                               const struct sw_flow_key *,
32                               const struct nlattr *actions, u32 actions_len);
33
34 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
35 {
36         if (skb_cloned(skb)) {
37                 struct sk_buff *nskb;
38                 unsigned headroom = max(min_headroom, skb_headroom(skb));
39
40                 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
41                 if (nskb) {
42                         set_skb_csum_bits(skb, nskb);
43                         kfree_skb(skb);
44                         return nskb;
45                 }
46         } else {
47                 unsigned int hdr_len = (skb_transport_offset(skb)
48                                         + sizeof(struct tcphdr));
49                 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
50                         return skb;
51         }
52         kfree_skb(skb);
53         return NULL;
54 }
55
56 static struct sk_buff *strip_vlan(struct sk_buff *skb)
57 {
58         struct ethhdr *eh;
59
60         if (vlan_tx_tag_present(skb)) {
61                 vlan_set_tci(skb, 0);
62                 return skb;
63         }
64
65         if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
66             skb->len < VLAN_ETH_HLEN))
67                 return skb;
68
69         skb = make_writable(skb, 0);
70         if (unlikely(!skb))
71                 return NULL;
72
73         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
74                 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
75                                         + ETH_HLEN, VLAN_HLEN, 0));
76
77         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
78
79         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
80
81         skb->protocol = eh->h_proto;
82         skb->mac_header += VLAN_HLEN;
83
84         return skb;
85 }
86
87 static struct sk_buff *modify_vlan_tci(struct datapath *dp, struct sk_buff *skb,
88                                        const struct sw_flow_key *key,
89                                        const struct nlattr *a, u32 actions_len)
90 {
91         __be16 tci = nla_get_be16(a);
92         struct vlan_ethhdr *vh;
93         __be16 old_tci;
94
95         if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
96                 return __vlan_hwaccel_put_tag(skb, ntohs(tci));
97
98         skb = make_writable(skb, 0);
99         if (unlikely(!skb))
100                 return NULL;
101
102         if (unlikely(skb->len < VLAN_ETH_HLEN))
103                 return skb;
104
105         vh = vlan_eth_hdr(skb);
106
107         old_tci = vh->h_vlan_TCI;
108         vh->h_vlan_TCI = tci;
109
110         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
111                 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
112                 skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
113         }
114
115         return skb;
116 }
117
118 static bool is_ip(struct sk_buff *skb, const struct sw_flow_key *key)
119 {
120         return (key->dl_type == htons(ETH_P_IP) &&
121                 skb->transport_header > skb->network_header);
122 }
123
124 static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct sw_flow_key *key)
125 {
126         int transport_len = skb->len - skb_transport_offset(skb);
127         if (key->nw_proto == IPPROTO_TCP) {
128                 if (likely(transport_len >= sizeof(struct tcphdr)))
129                         return &tcp_hdr(skb)->check;
130         } else if (key->nw_proto == IPPROTO_UDP) {
131                 if (likely(transport_len >= sizeof(struct udphdr)))
132                         return &udp_hdr(skb)->check;
133         }
134         return NULL;
135 }
136
137 static struct sk_buff *set_nw_addr(struct sk_buff *skb,
138                                    const struct sw_flow_key *key,
139                                    const struct nlattr *a)
140 {
141         __be32 new_nwaddr = nla_get_be32(a);
142         struct iphdr *nh;
143         __sum16 *check;
144         __be32 *nwaddr;
145
146         if (unlikely(!is_ip(skb, key)))
147                 return skb;
148
149         skb = make_writable(skb, 0);
150         if (unlikely(!skb))
151                 return NULL;
152
153         nh = ip_hdr(skb);
154         nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
155
156         check = get_l4_checksum(skb, key);
157         if (likely(check))
158                 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
159         csum_replace4(&nh->check, *nwaddr, new_nwaddr);
160
161         *nwaddr = new_nwaddr;
162
163         return skb;
164 }
165
166 static struct sk_buff *set_nw_tos(struct sk_buff *skb,
167                                   const struct sw_flow_key *key,
168                                   u8 nw_tos)
169 {
170         if (unlikely(!is_ip(skb, key)))
171                 return skb;
172
173         skb = make_writable(skb, 0);
174         if (skb) {
175                 struct iphdr *nh = ip_hdr(skb);
176                 u8 *f = &nh->tos;
177                 u8 old = *f;
178                 u8 new;
179
180                 /* Set the DSCP bits and preserve the ECN bits. */
181                 new = nw_tos | (nh->tos & INET_ECN_MASK);
182                 csum_replace4(&nh->check, (__force __be32)old,
183                                           (__force __be32)new);
184                 *f = new;
185         }
186         return skb;
187 }
188
189 static struct sk_buff *set_tp_port(struct sk_buff *skb,
190                                    const struct sw_flow_key *key,
191                                    const struct nlattr *a)
192 {
193         struct udphdr *th;
194         __sum16 *check;
195         __be16 *port;
196
197         if (unlikely(!is_ip(skb, key)))
198                 return skb;
199
200         skb = make_writable(skb, 0);
201         if (unlikely(!skb))
202                 return NULL;
203
204         /* Must follow make_writable() since that can move the skb data. */
205         check = get_l4_checksum(skb, key);
206         if (unlikely(!check))
207                 return skb;
208
209         /*
210          * Update port and checksum.
211          *
212          * This is OK because source and destination port numbers are at the
213          * same offsets in both UDP and TCP headers, and get_l4_checksum() only
214          * supports those protocols.
215          */
216         th = udp_hdr(skb);
217         port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
218         inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
219         *port = nla_get_be16(a);
220
221         return skb;
222 }
223
224 /**
225  * is_spoofed_arp - check for invalid ARP packet
226  *
227  * @skb: skbuff containing an Ethernet packet, with network header pointing
228  * just past the Ethernet and optional 802.1Q header.
229  * @key: flow key extracted from @skb by flow_extract()
230  *
231  * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
232  * or truncated header fields or one whose inner and outer Ethernet address
233  * differ.
234  */
235 static bool is_spoofed_arp(struct sk_buff *skb, const struct sw_flow_key *key)
236 {
237         struct arp_eth_header *arp;
238
239         if (key->dl_type != htons(ETH_P_ARP))
240                 return false;
241
242         if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
243                 return true;
244
245         arp = (struct arp_eth_header *)skb_network_header(skb);
246         return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
247                 arp->ar_pro != htons(ETH_P_IP) ||
248                 arp->ar_hln != ETH_ALEN ||
249                 arp->ar_pln != 4 ||
250                 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
251 }
252
253 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
254 {
255         struct vport *p;
256
257         if (!skb)
258                 goto error;
259
260         p = rcu_dereference(dp->ports[out_port]);
261         if (!p)
262                 goto error;
263
264         vport_send(p, skb);
265         return;
266
267 error:
268         kfree_skb(skb);
269 }
270
271 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg,
272                           const struct sw_flow_key *key)
273 {
274         struct dp_upcall_info upcall;
275
276         skb = skb_clone(skb, GFP_ATOMIC);
277         if (!skb)
278                 return -ENOMEM;
279
280         upcall.cmd = ODP_PACKET_CMD_ACTION;
281         upcall.key = key;
282         upcall.userdata = arg;
283         upcall.sample_pool = 0;
284         upcall.actions = NULL;
285         upcall.actions_len = 0;
286         return dp_upcall(dp, skb, &upcall);
287 }
288
289 /* Execute a list of actions against 'skb'. */
290 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
291                               const struct sw_flow_key *key,
292                               const struct nlattr *actions, u32 actions_len)
293 {
294         /* Every output action needs a separate clone of 'skb', but the common
295          * case is just a single output action, so that doing a clone and
296          * then freeing the original skbuff is wasteful.  So the following code
297          * is slightly obscure just to avoid that. */
298         int prev_port = -1;
299         u32 priority = skb->priority;
300         const struct nlattr *a;
301         int rem, err;
302
303         for (a = actions, rem = actions_len; rem > 0; a = nla_next(a, &rem)) {
304                 if (prev_port != -1) {
305                         do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
306                         prev_port = -1;
307                 }
308
309                 switch (nla_type(a)) {
310                 case ODP_ACTION_ATTR_OUTPUT:
311                         prev_port = nla_get_u32(a);
312                         break;
313
314                 case ODP_ACTION_ATTR_CONTROLLER:
315                         err = output_control(dp, skb, nla_get_u64(a), key);
316                         if (err) {
317                                 kfree_skb(skb);
318                                 return err;
319                         }
320                         break;
321
322                 case ODP_ACTION_ATTR_SET_TUNNEL:
323                         OVS_CB(skb)->tun_id = nla_get_be64(a);
324                         break;
325
326                 case ODP_ACTION_ATTR_SET_DL_TCI:
327                         skb = modify_vlan_tci(dp, skb, key, a, rem);
328                         break;
329
330                 case ODP_ACTION_ATTR_STRIP_VLAN:
331                         skb = strip_vlan(skb);
332                         break;
333
334                 case ODP_ACTION_ATTR_SET_DL_SRC:
335                         skb = make_writable(skb, 0);
336                         if (!skb)
337                                 return -ENOMEM;
338                         memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
339                         break;
340
341                 case ODP_ACTION_ATTR_SET_DL_DST:
342                         skb = make_writable(skb, 0);
343                         if (!skb)
344                                 return -ENOMEM;
345                         memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
346                         break;
347
348                 case ODP_ACTION_ATTR_SET_NW_SRC:
349                 case ODP_ACTION_ATTR_SET_NW_DST:
350                         skb = set_nw_addr(skb, key, a);
351                         break;
352
353                 case ODP_ACTION_ATTR_SET_NW_TOS:
354                         skb = set_nw_tos(skb, key, nla_get_u8(a));
355                         break;
356
357                 case ODP_ACTION_ATTR_SET_TP_SRC:
358                 case ODP_ACTION_ATTR_SET_TP_DST:
359                         skb = set_tp_port(skb, key, a);
360                         break;
361
362                 case ODP_ACTION_ATTR_SET_PRIORITY:
363                         skb->priority = nla_get_u32(a);
364                         break;
365
366                 case ODP_ACTION_ATTR_POP_PRIORITY:
367                         skb->priority = priority;
368                         break;
369
370                 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
371                         if (unlikely(is_spoofed_arp(skb, key)))
372                                 goto exit;
373                         break;
374                 }
375                 if (!skb)
376                         return -ENOMEM;
377         }
378 exit:
379         if (prev_port != -1)
380                 do_output(dp, skb, prev_port);
381         else
382                 kfree_skb(skb);
383         return 0;
384 }
385
386 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
387                          const struct sw_flow_key *key,
388                          const struct nlattr *a, u32 actions_len)
389 {
390         struct sk_buff *nskb;
391         struct vport *p = OVS_CB(skb)->vport;
392         struct dp_upcall_info upcall;
393
394         if (unlikely(!p))
395                 return;
396
397         atomic_inc(&p->sflow_pool);
398         if (net_random() >= dp->sflow_probability)
399                 return;
400
401         nskb = skb_clone(skb, GFP_ATOMIC);
402         if (unlikely(!nskb))
403                 return;
404
405         upcall.cmd = ODP_PACKET_CMD_SAMPLE;
406         upcall.key = key;
407         upcall.userdata = 0;
408         upcall.sample_pool = atomic_read(&p->sflow_pool);
409         upcall.actions = a;
410         upcall.actions_len = actions_len;
411         dp_upcall(dp, nskb, &upcall);
412 }
413
414 /* Execute a list of actions against 'skb'. */
415 int execute_actions(struct datapath *dp, struct sk_buff *skb,
416                     const struct sw_flow_key *key,
417                     const struct nlattr *actions, u32 actions_len)
418 {
419         if (dp->sflow_probability)
420                 sflow_sample(dp, skb, key, actions, actions_len);
421
422         OVS_CB(skb)->tun_id = 0;
423
424         return do_execute_actions(dp, skb, key, actions, actions_len);
425 }