dpif-netdev: Fix memory leak.
[sliver-openvswitch.git] / datapath / actions.c
1 /*
2  * Copyright (c) 2007-2013 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
31 #include <net/ip.h>
32 #include <net/ipv6.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
36
37 #include "datapath.h"
38 #include "vlan.h"
39 #include "vport.h"
40
41 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
42                               const struct nlattr *attr, int len, bool keep_skb);
43
44 static int make_writable(struct sk_buff *skb, int write_len)
45 {
46         if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
47                 return 0;
48
49         return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
50 }
51
52 /* remove VLAN header from packet and update csum accordingly. */
53 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
54 {
55         struct vlan_hdr *vhdr;
56         int err;
57
58         err = make_writable(skb, VLAN_ETH_HLEN);
59         if (unlikely(err))
60                 return err;
61
62         if (skb->ip_summed == CHECKSUM_COMPLETE)
63                 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
64                                         + (2 * ETH_ALEN), VLAN_HLEN, 0));
65
66         vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
67         *current_tci = vhdr->h_vlan_TCI;
68
69         memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
70         __skb_pull(skb, VLAN_HLEN);
71
72         vlan_set_encap_proto(skb, vhdr);
73         skb->mac_header += VLAN_HLEN;
74         skb_reset_mac_len(skb);
75
76         return 0;
77 }
78
79 static int pop_vlan(struct sk_buff *skb)
80 {
81         __be16 tci;
82         int err;
83
84         if (likely(vlan_tx_tag_present(skb))) {
85                 vlan_set_tci(skb, 0);
86         } else {
87                 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
88                              skb->len < VLAN_ETH_HLEN))
89                         return 0;
90
91                 err = __pop_vlan_tci(skb, &tci);
92                 if (err)
93                         return err;
94         }
95         /* move next vlan tag to hw accel tag */
96         if (likely(skb->protocol != htons(ETH_P_8021Q) ||
97                    skb->len < VLAN_ETH_HLEN))
98                 return 0;
99
100         err = __pop_vlan_tci(skb, &tci);
101         if (unlikely(err))
102                 return err;
103
104         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
105         return 0;
106 }
107
108 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
109 {
110         if (unlikely(vlan_tx_tag_present(skb))) {
111                 u16 current_tag;
112
113                 /* push down current VLAN tag */
114                 current_tag = vlan_tx_tag_get(skb);
115
116                 if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
117                         return -ENOMEM;
118
119                 if (skb->ip_summed == CHECKSUM_COMPLETE)
120                         skb->csum = csum_add(skb->csum, csum_partial(skb->data
121                                         + (2 * ETH_ALEN), VLAN_HLEN, 0));
122
123         }
124         __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
125         return 0;
126 }
127
128 static int set_eth_addr(struct sk_buff *skb,
129                         const struct ovs_key_ethernet *eth_key)
130 {
131         int err;
132         err = make_writable(skb, ETH_HLEN);
133         if (unlikely(err))
134                 return err;
135
136         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
137
138         ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
139         ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
140
141         ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
142
143         return 0;
144 }
145
146 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
147                                 __be32 *addr, __be32 new_addr)
148 {
149         int transport_len = skb->len - skb_transport_offset(skb);
150
151         if (nh->protocol == IPPROTO_TCP) {
152                 if (likely(transport_len >= sizeof(struct tcphdr)))
153                         inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
154                                                  *addr, new_addr, 1);
155         } else if (nh->protocol == IPPROTO_UDP) {
156                 if (likely(transport_len >= sizeof(struct udphdr))) {
157                         struct udphdr *uh = udp_hdr(skb);
158
159                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
160                                 inet_proto_csum_replace4(&uh->check, skb,
161                                                          *addr, new_addr, 1);
162                                 if (!uh->check)
163                                         uh->check = CSUM_MANGLED_0;
164                         }
165                 }
166         }
167
168         csum_replace4(&nh->check, *addr, new_addr);
169         skb_clear_rxhash(skb);
170         *addr = new_addr;
171 }
172
173 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
174                                  __be32 addr[4], const __be32 new_addr[4])
175 {
176         int transport_len = skb->len - skb_transport_offset(skb);
177
178         if (l4_proto == IPPROTO_TCP) {
179                 if (likely(transport_len >= sizeof(struct tcphdr)))
180                         inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
181                                                   addr, new_addr, 1);
182         } else if (l4_proto == IPPROTO_UDP) {
183                 if (likely(transport_len >= sizeof(struct udphdr))) {
184                         struct udphdr *uh = udp_hdr(skb);
185
186                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
187                                 inet_proto_csum_replace16(&uh->check, skb,
188                                                           addr, new_addr, 1);
189                                 if (!uh->check)
190                                         uh->check = CSUM_MANGLED_0;
191                         }
192                 }
193         }
194 }
195
196 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
197                           __be32 addr[4], const __be32 new_addr[4],
198                           bool recalculate_csum)
199 {
200         if (recalculate_csum)
201                 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
202
203         skb_clear_rxhash(skb);
204         memcpy(addr, new_addr, sizeof(__be32[4]));
205 }
206
207 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
208 {
209         nh->priority = tc >> 4;
210         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
211 }
212
213 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
214 {
215         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
216         nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
217         nh->flow_lbl[2] = fl & 0x000000FF;
218 }
219
220 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
221 {
222         csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
223         nh->ttl = new_ttl;
224 }
225
226 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
227 {
228         struct iphdr *nh;
229         int err;
230
231         err = make_writable(skb, skb_network_offset(skb) +
232                                  sizeof(struct iphdr));
233         if (unlikely(err))
234                 return err;
235
236         nh = ip_hdr(skb);
237
238         if (ipv4_key->ipv4_src != nh->saddr)
239                 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
240
241         if (ipv4_key->ipv4_dst != nh->daddr)
242                 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
243
244         if (ipv4_key->ipv4_tos != nh->tos)
245                 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
246
247         if (ipv4_key->ipv4_ttl != nh->ttl)
248                 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
249
250         return 0;
251 }
252
253 static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
254 {
255         struct ipv6hdr *nh;
256         int err;
257         __be32 *saddr;
258         __be32 *daddr;
259
260         err = make_writable(skb, skb_network_offset(skb) +
261                             sizeof(struct ipv6hdr));
262         if (unlikely(err))
263                 return err;
264
265         nh = ipv6_hdr(skb);
266         saddr = (__be32 *)&nh->saddr;
267         daddr = (__be32 *)&nh->daddr;
268
269         if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
270                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
271                               ipv6_key->ipv6_src, true);
272
273         if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
274                 unsigned int offset = 0;
275                 int flags = OVS_IP6T_FH_F_SKIP_RH;
276                 bool recalc_csum = true;
277
278                 if (ipv6_ext_hdr(nh->nexthdr))
279                         recalc_csum = ipv6_find_hdr(skb, &offset,
280                                                     NEXTHDR_ROUTING, NULL,
281                                                     &flags) != NEXTHDR_ROUTING;
282
283                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
284                               ipv6_key->ipv6_dst, recalc_csum);
285         }
286
287         set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
288         set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
289         nh->hop_limit = ipv6_key->ipv6_hlimit;
290
291         return 0;
292 }
293
294 /* Must follow make_writable() since that can move the skb data. */
295 static void set_tp_port(struct sk_buff *skb, __be16 *port,
296                          __be16 new_port, __sum16 *check)
297 {
298         inet_proto_csum_replace2(check, skb, *port, new_port, 0);
299         *port = new_port;
300         skb_clear_rxhash(skb);
301 }
302
303 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
304 {
305         struct udphdr *uh = udp_hdr(skb);
306
307         if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
308                 set_tp_port(skb, port, new_port, &uh->check);
309
310                 if (!uh->check)
311                         uh->check = CSUM_MANGLED_0;
312         } else {
313                 *port = new_port;
314                 skb_clear_rxhash(skb);
315         }
316 }
317
318 static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
319 {
320         struct udphdr *uh;
321         int err;
322
323         err = make_writable(skb, skb_transport_offset(skb) +
324                                  sizeof(struct udphdr));
325         if (unlikely(err))
326                 return err;
327
328         uh = udp_hdr(skb);
329         if (udp_port_key->udp_src != uh->source)
330                 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
331
332         if (udp_port_key->udp_dst != uh->dest)
333                 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
334
335         return 0;
336 }
337
338 static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
339 {
340         struct tcphdr *th;
341         int err;
342
343         err = make_writable(skb, skb_transport_offset(skb) +
344                                  sizeof(struct tcphdr));
345         if (unlikely(err))
346                 return err;
347
348         th = tcp_hdr(skb);
349         if (tcp_port_key->tcp_src != th->source)
350                 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
351
352         if (tcp_port_key->tcp_dst != th->dest)
353                 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
354
355         return 0;
356 }
357
358 static int set_sctp(struct sk_buff *skb,
359                      const struct ovs_key_sctp *sctp_port_key)
360 {
361         struct sctphdr *sh;
362         int err;
363         unsigned int sctphoff = skb_transport_offset(skb);
364
365         err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
366         if (unlikely(err))
367                 return err;
368
369         sh = sctp_hdr(skb);
370         if (sctp_port_key->sctp_src != sh->source ||
371             sctp_port_key->sctp_dst != sh->dest) {
372                 __le32 old_correct_csum, new_csum, old_csum;
373
374                 old_csum = sh->checksum;
375                 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
376
377                 sh->source = sctp_port_key->sctp_src;
378                 sh->dest = sctp_port_key->sctp_dst;
379
380                 new_csum = sctp_compute_cksum(skb, sctphoff);
381
382                 /* Carry any checksum errors through. */
383                 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
384
385                 skb_clear_rxhash(skb);
386         }
387
388         return 0;
389 }
390
391 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
392 {
393         struct vport *vport;
394
395         if (unlikely(!skb))
396                 return -ENOMEM;
397
398         vport = ovs_vport_rcu(dp, out_port);
399         if (unlikely(!vport)) {
400                 kfree_skb(skb);
401                 return -ENODEV;
402         }
403
404         ovs_vport_send(vport, skb);
405         return 0;
406 }
407
408 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
409                             const struct nlattr *attr)
410 {
411         struct dp_upcall_info upcall;
412         const struct nlattr *a;
413         int rem;
414
415         BUG_ON(!OVS_CB(skb)->pkt_key);
416
417         upcall.cmd = OVS_PACKET_CMD_ACTION;
418         upcall.key = OVS_CB(skb)->pkt_key;
419         upcall.userdata = NULL;
420         upcall.portid = 0;
421
422         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
423                  a = nla_next(a, &rem)) {
424                 switch (nla_type(a)) {
425                 case OVS_USERSPACE_ATTR_USERDATA:
426                         upcall.userdata = a;
427                         break;
428
429                 case OVS_USERSPACE_ATTR_PID:
430                         upcall.portid = nla_get_u32(a);
431                         break;
432                 }
433         }
434
435         return ovs_dp_upcall(dp, skb, &upcall);
436 }
437
438 static int sample(struct datapath *dp, struct sk_buff *skb,
439                   const struct nlattr *attr)
440 {
441         const struct nlattr *acts_list = NULL;
442         const struct nlattr *a;
443         int rem;
444
445         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
446                  a = nla_next(a, &rem)) {
447                 switch (nla_type(a)) {
448                 case OVS_SAMPLE_ATTR_PROBABILITY:
449                         if (net_random() >= nla_get_u32(a))
450                                 return 0;
451                         break;
452
453                 case OVS_SAMPLE_ATTR_ACTIONS:
454                         acts_list = a;
455                         break;
456                 }
457         }
458
459         return do_execute_actions(dp, skb, nla_data(acts_list),
460                                   nla_len(acts_list), true);
461 }
462
463 static int execute_set_action(struct sk_buff *skb,
464                                  const struct nlattr *nested_attr)
465 {
466         int err = 0;
467
468         switch (nla_type(nested_attr)) {
469         case OVS_KEY_ATTR_PRIORITY:
470                 skb->priority = nla_get_u32(nested_attr);
471                 break;
472
473         case OVS_KEY_ATTR_SKB_MARK:
474                 skb->mark = nla_get_u32(nested_attr);
475                 break;
476
477         case OVS_KEY_ATTR_IPV4_TUNNEL:
478                 OVS_CB(skb)->tun_key = nla_data(nested_attr);
479                 break;
480
481         case OVS_KEY_ATTR_ETHERNET:
482                 err = set_eth_addr(skb, nla_data(nested_attr));
483                 break;
484
485         case OVS_KEY_ATTR_IPV4:
486                 err = set_ipv4(skb, nla_data(nested_attr));
487                 break;
488
489         case OVS_KEY_ATTR_IPV6:
490                 err = set_ipv6(skb, nla_data(nested_attr));
491                 break;
492
493         case OVS_KEY_ATTR_TCP:
494                 err = set_tcp(skb, nla_data(nested_attr));
495                 break;
496
497         case OVS_KEY_ATTR_UDP:
498                 err = set_udp(skb, nla_data(nested_attr));
499                 break;
500
501         case OVS_KEY_ATTR_SCTP:
502                 err = set_sctp(skb, nla_data(nested_attr));
503                 break;
504         }
505
506         return err;
507 }
508
509 /* Execute a list of actions against 'skb'. */
510 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
511                         const struct nlattr *attr, int len, bool keep_skb)
512 {
513         /* Every output action needs a separate clone of 'skb', but the common
514          * case is just a single output action, so that doing a clone and
515          * then freeing the original skbuff is wasteful.  So the following code
516          * is slightly obscure just to avoid that. */
517         int prev_port = -1;
518         const struct nlattr *a;
519         int rem;
520
521         for (a = attr, rem = len; rem > 0;
522              a = nla_next(a, &rem)) {
523                 int err = 0;
524
525                 if (prev_port != -1) {
526                         do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
527                         prev_port = -1;
528                 }
529
530                 switch (nla_type(a)) {
531                 case OVS_ACTION_ATTR_OUTPUT:
532                         prev_port = nla_get_u32(a);
533                         break;
534
535                 case OVS_ACTION_ATTR_USERSPACE:
536                         output_userspace(dp, skb, a);
537                         break;
538
539                 case OVS_ACTION_ATTR_PUSH_VLAN:
540                         err = push_vlan(skb, nla_data(a));
541                         if (unlikely(err)) /* skb already freed. */
542                                 return err;
543                         break;
544
545                 case OVS_ACTION_ATTR_POP_VLAN:
546                         err = pop_vlan(skb);
547                         break;
548
549                 case OVS_ACTION_ATTR_SET:
550                         err = execute_set_action(skb, nla_data(a));
551                         break;
552
553                 case OVS_ACTION_ATTR_SAMPLE:
554                         err = sample(dp, skb, a);
555                         break;
556                 }
557
558                 if (unlikely(err)) {
559                         kfree_skb(skb);
560                         return err;
561                 }
562         }
563
564         if (prev_port != -1) {
565                 if (keep_skb)
566                         skb = skb_clone(skb, GFP_ATOMIC);
567
568                 do_output(dp, skb, prev_port);
569         } else if (!keep_skb)
570                 consume_skb(skb);
571
572         return 0;
573 }
574
575 /* We limit the number of times that we pass into execute_actions()
576  * to avoid blowing out the stack in the event that we have a loop. */
577 #define MAX_LOOPS 4
578
579 struct loop_counter {
580         u8 count;               /* Count. */
581         bool looping;           /* Loop detected? */
582 };
583
584 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
585
586 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
587 {
588         if (net_ratelimit())
589                 pr_warn("%s: flow looped %d times, dropping\n",
590                                 ovs_dp_name(dp), MAX_LOOPS);
591         actions->actions_len = 0;
592         return -ELOOP;
593 }
594
595 /* Execute a list of actions against 'skb'. */
596 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
597 {
598         struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
599         struct loop_counter *loop;
600         int error;
601
602         /* Check whether we've looped too much. */
603         loop = &__get_cpu_var(loop_counters);
604         if (unlikely(++loop->count > MAX_LOOPS))
605                 loop->looping = true;
606         if (unlikely(loop->looping)) {
607                 error = loop_suppress(dp, acts);
608                 kfree_skb(skb);
609                 goto out_loop;
610         }
611
612         OVS_CB(skb)->tun_key = NULL;
613         error = do_execute_actions(dp, skb, acts->actions,
614                                          acts->actions_len, false);
615
616         /* Check whether sub-actions looped too much. */
617         if (unlikely(loop->looping))
618                 error = loop_suppress(dp, acts);
619
620 out_loop:
621         /* Decrement loop counter. */
622         if (!--loop->count)
623                 loop->looping = false;
624
625         return error;
626 }