Remove NXAST_DROP_SPOOFED_ARP action.
[sliver-openvswitch.git] / datapath / actions.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
4  *
5  * Significant portions of this file may be copied from parts of the Linux
6  * kernel, by Linus Torvalds and others.
7  */
8
9 /* Functions for executing flow actions. */
10
11 #include <linux/skbuff.h>
12 #include <linux/in.h>
13 #include <linux/ip.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22
23 #include "actions.h"
24 #include "checksum.h"
25 #include "datapath.h"
26 #include "loop_counter.h"
27 #include "openvswitch/datapath-protocol.h"
28 #include "vlan.h"
29 #include "vport.h"
30
31 static int do_execute_actions(struct datapath *, struct sk_buff *,
32                               struct sw_flow_actions *acts);
33
34 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
35 {
36         if (skb_cloned(skb)) {
37                 struct sk_buff *nskb;
38                 unsigned headroom = max(min_headroom, skb_headroom(skb));
39
40                 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
41                 if (nskb) {
42                         set_skb_csum_bits(skb, nskb);
43                         kfree_skb(skb);
44                         return nskb;
45                 }
46         } else {
47                 unsigned int hdr_len = (skb_transport_offset(skb)
48                                         + sizeof(struct tcphdr));
49                 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
50                         return skb;
51         }
52         kfree_skb(skb);
53         return NULL;
54 }
55
56 static struct sk_buff *strip_vlan(struct sk_buff *skb)
57 {
58         struct ethhdr *eh;
59
60         if (vlan_tx_tag_present(skb)) {
61                 vlan_set_tci(skb, 0);
62                 return skb;
63         }
64
65         if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
66             skb->len < VLAN_ETH_HLEN))
67                 return skb;
68
69         skb = make_writable(skb, 0);
70         if (unlikely(!skb))
71                 return NULL;
72
73         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
74                 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
75                                         + ETH_HLEN, VLAN_HLEN, 0));
76
77         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
78
79         eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
80
81         skb->protocol = eh->h_proto;
82         skb->mac_header += VLAN_HLEN;
83
84         return skb;
85 }
86
87 static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci)
88 {
89         struct vlan_ethhdr *vh;
90         __be16 old_tci;
91
92         if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
93                 return __vlan_hwaccel_put_tag(skb, ntohs(tci));
94
95         skb = make_writable(skb, 0);
96         if (unlikely(!skb))
97                 return NULL;
98
99         if (unlikely(skb->len < VLAN_ETH_HLEN))
100                 return skb;
101
102         vh = vlan_eth_hdr(skb);
103
104         old_tci = vh->h_vlan_TCI;
105         vh->h_vlan_TCI = tci;
106
107         if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
108                 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
109                 skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
110         }
111
112         return skb;
113 }
114
115 static bool is_ip(struct sk_buff *skb)
116 {
117         return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) &&
118                 skb->transport_header > skb->network_header);
119 }
120
121 static __sum16 *get_l4_checksum(struct sk_buff *skb)
122 {
123         u8 nw_proto = OVS_CB(skb)->flow->key.ip.proto;
124         int transport_len = skb->len - skb_transport_offset(skb);
125         if (nw_proto == IPPROTO_TCP) {
126                 if (likely(transport_len >= sizeof(struct tcphdr)))
127                         return &tcp_hdr(skb)->check;
128         } else if (nw_proto == IPPROTO_UDP) {
129                 if (likely(transport_len >= sizeof(struct udphdr)))
130                         return &udp_hdr(skb)->check;
131         }
132         return NULL;
133 }
134
135 static struct sk_buff *set_nw_addr(struct sk_buff *skb, const struct nlattr *a)
136 {
137         __be32 new_nwaddr = nla_get_be32(a);
138         struct iphdr *nh;
139         __sum16 *check;
140         __be32 *nwaddr;
141
142         if (unlikely(!is_ip(skb)))
143                 return skb;
144
145         skb = make_writable(skb, 0);
146         if (unlikely(!skb))
147                 return NULL;
148
149         nh = ip_hdr(skb);
150         nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
151
152         check = get_l4_checksum(skb);
153         if (likely(check))
154                 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
155         csum_replace4(&nh->check, *nwaddr, new_nwaddr);
156
157         skb_clear_rxhash(skb);
158
159         *nwaddr = new_nwaddr;
160
161         return skb;
162 }
163
164 static struct sk_buff *set_nw_tos(struct sk_buff *skb, u8 nw_tos)
165 {
166         if (unlikely(!is_ip(skb)))
167                 return skb;
168
169         skb = make_writable(skb, 0);
170         if (skb) {
171                 struct iphdr *nh = ip_hdr(skb);
172                 u8 *f = &nh->tos;
173                 u8 old = *f;
174                 u8 new;
175
176                 /* Set the DSCP bits and preserve the ECN bits. */
177                 new = nw_tos | (nh->tos & INET_ECN_MASK);
178                 csum_replace4(&nh->check, (__force __be32)old,
179                                           (__force __be32)new);
180                 *f = new;
181         }
182         return skb;
183 }
184
185 static struct sk_buff *set_tp_port(struct sk_buff *skb, const struct nlattr *a)
186 {
187         struct udphdr *th;
188         __sum16 *check;
189         __be16 *port;
190
191         if (unlikely(!is_ip(skb)))
192                 return skb;
193
194         skb = make_writable(skb, 0);
195         if (unlikely(!skb))
196                 return NULL;
197
198         /* Must follow make_writable() since that can move the skb data. */
199         check = get_l4_checksum(skb);
200         if (unlikely(!check))
201                 return skb;
202
203         /*
204          * Update port and checksum.
205          *
206          * This is OK because source and destination port numbers are at the
207          * same offsets in both UDP and TCP headers, and get_l4_checksum() only
208          * supports those protocols.
209          */
210         th = udp_hdr(skb);
211         port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
212         inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
213         *port = nla_get_be16(a);
214         skb_clear_rxhash(skb);
215
216         return skb;
217 }
218
219 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
220 {
221         struct vport *p;
222
223         if (!skb)
224                 goto error;
225
226         p = rcu_dereference(dp->ports[out_port]);
227         if (!p)
228                 goto error;
229
230         vport_send(p, skb);
231         return;
232
233 error:
234         kfree_skb(skb);
235 }
236
237 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg)
238 {
239         struct dp_upcall_info upcall;
240
241         skb = skb_clone(skb, GFP_ATOMIC);
242         if (!skb)
243                 return -ENOMEM;
244
245         upcall.cmd = ODP_PACKET_CMD_ACTION;
246         upcall.key = &OVS_CB(skb)->flow->key;
247         upcall.userdata = arg;
248         upcall.sample_pool = 0;
249         upcall.actions = NULL;
250         upcall.actions_len = 0;
251         return dp_upcall(dp, skb, &upcall);
252 }
253
254 /* Execute a list of actions against 'skb'. */
255 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
256                               struct sw_flow_actions *acts)
257 {
258         /* Every output action needs a separate clone of 'skb', but the common
259          * case is just a single output action, so that doing a clone and
260          * then freeing the original skbuff is wasteful.  So the following code
261          * is slightly obscure just to avoid that. */
262         int prev_port = -1;
263         u32 priority = skb->priority;
264         const struct nlattr *a;
265         int rem, err;
266
267         for (a = acts->actions, rem = acts->actions_len; rem > 0;
268              a = nla_next(a, &rem)) {
269                 if (prev_port != -1) {
270                         do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
271                         prev_port = -1;
272                 }
273
274                 switch (nla_type(a)) {
275                 case ODP_ACTION_ATTR_OUTPUT:
276                         prev_port = nla_get_u32(a);
277                         break;
278
279                 case ODP_ACTION_ATTR_CONTROLLER:
280                         err = output_control(dp, skb, nla_get_u64(a));
281                         if (err) {
282                                 kfree_skb(skb);
283                                 return err;
284                         }
285                         break;
286
287                 case ODP_ACTION_ATTR_SET_TUNNEL:
288                         OVS_CB(skb)->tun_id = nla_get_be64(a);
289                         break;
290
291                 case ODP_ACTION_ATTR_SET_DL_TCI:
292                         skb = modify_vlan_tci(skb, nla_get_be16(a));
293                         break;
294
295                 case ODP_ACTION_ATTR_STRIP_VLAN:
296                         skb = strip_vlan(skb);
297                         break;
298
299                 case ODP_ACTION_ATTR_SET_DL_SRC:
300                         skb = make_writable(skb, 0);
301                         if (!skb)
302                                 return -ENOMEM;
303                         memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
304                         break;
305
306                 case ODP_ACTION_ATTR_SET_DL_DST:
307                         skb = make_writable(skb, 0);
308                         if (!skb)
309                                 return -ENOMEM;
310                         memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
311                         break;
312
313                 case ODP_ACTION_ATTR_SET_NW_SRC:
314                 case ODP_ACTION_ATTR_SET_NW_DST:
315                         skb = set_nw_addr(skb, a);
316                         break;
317
318                 case ODP_ACTION_ATTR_SET_NW_TOS:
319                         skb = set_nw_tos(skb, nla_get_u8(a));
320                         break;
321
322                 case ODP_ACTION_ATTR_SET_TP_SRC:
323                 case ODP_ACTION_ATTR_SET_TP_DST:
324                         skb = set_tp_port(skb, a);
325                         break;
326
327                 case ODP_ACTION_ATTR_SET_PRIORITY:
328                         skb->priority = nla_get_u32(a);
329                         break;
330
331                 case ODP_ACTION_ATTR_POP_PRIORITY:
332                         skb->priority = priority;
333                         break;
334                 }
335                 if (!skb)
336                         return -ENOMEM;
337         }
338
339         if (prev_port != -1)
340                 do_output(dp, skb, prev_port);
341         else
342                 kfree_skb(skb);
343         return 0;
344 }
345
346 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
347                          struct sw_flow_actions *acts)
348 {
349         struct sk_buff *nskb;
350         struct vport *p = OVS_CB(skb)->vport;
351         struct dp_upcall_info upcall;
352
353         if (unlikely(!p))
354                 return;
355
356         atomic_inc(&p->sflow_pool);
357         if (net_random() >= dp->sflow_probability)
358                 return;
359
360         nskb = skb_clone(skb, GFP_ATOMIC);
361         if (unlikely(!nskb))
362                 return;
363
364         upcall.cmd = ODP_PACKET_CMD_SAMPLE;
365         upcall.key = &OVS_CB(skb)->flow->key;
366         upcall.userdata = 0;
367         upcall.sample_pool = atomic_read(&p->sflow_pool);
368         upcall.actions = acts->actions;
369         upcall.actions_len = acts->actions_len;
370         dp_upcall(dp, nskb, &upcall);
371 }
372
373 /* Execute a list of actions against 'skb'. */
374 int execute_actions(struct datapath *dp, struct sk_buff *skb)
375 {
376         struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
377         struct loop_counter *loop;
378         int error;
379
380         /* Check whether we've looped too much. */
381         loop = loop_get_counter();
382         if (unlikely(++loop->count > MAX_LOOPS))
383                 loop->looping = true;
384         if (unlikely(loop->looping)) {
385                 error = loop_suppress(dp, acts);
386                 kfree_skb(skb);
387                 goto out_loop;
388         }
389
390         /* Really execute actions. */
391         if (dp->sflow_probability)
392                 sflow_sample(dp, skb, acts);
393         OVS_CB(skb)->tun_id = 0;
394         error = do_execute_actions(dp, skb, acts);
395
396         /* Check whether sub-actions looped too much. */
397         if (unlikely(loop->looping))
398                 error = loop_suppress(dp, acts);
399
400 out_loop:
401         /* Decrement loop counter. */
402         if (!--loop->count)
403                 loop->looping = false;
404         loop_put_counter();
405
406         return error;
407 }