datapath: Unify vport error stats handling.
[sliver-openvswitch.git] / datapath / tunnel.c
1 /*
2  * Copyright (c) 2007-2012 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/in.h>
22 #include <linux/in_route.h>
23 #include <linux/inetdevice.h>
24 #include <linux/jhash.h>
25 #include <linux/list.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/workqueue.h>
29 #include <linux/rculist.h>
30 #include <net/route.h>
31 #include <net/xfrm.h>
32
33 #include "checksum.h"
34 #include "compat.h"
35 #include "datapath.h"
36 #include "tunnel.h"
37 #include "vlan.h"
38 #include "vport.h"
39
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
41 #define rt_dst(rt) (rt->dst)
42 #else
43 #define rt_dst(rt) (rt->u.dst)
44 #endif
45
46 /**
47  *      ovs_tnl_rcv - ingress point for generic tunnel code
48  *
49  * @vport: port this packet was received on
50  * @skb: received packet
51  * @tos: ToS from encapsulating IP packet, used to copy ECN bits
52  *
53  * Must be called with rcu_read_lock.
54  *
55  * Packets received by this function are in the following state:
56  * - skb->data points to the inner Ethernet header.
57  * - The inner Ethernet header is in the linear data area.
58  * - skb->csum does not include the inner Ethernet header.
59  * - The layer pointers are undefined.
60  */
61 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
62 {
63         struct ethhdr *eh;
64
65         skb_reset_mac_header(skb);
66         eh = eth_hdr(skb);
67
68         if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
69                 skb->protocol = eh->h_proto;
70         else
71                 skb->protocol = htons(ETH_P_802_2);
72
73         skb_dst_drop(skb);
74         nf_reset(skb);
75         skb_clear_rxhash(skb);
76         secpath_reset(skb);
77         vlan_set_tci(skb, 0);
78
79         if (unlikely(compute_ip_summed(skb, false))) {
80                 kfree_skb(skb);
81                 return;
82         }
83
84         ovs_vport_receive(vport, skb);
85 }
86
87 static struct rtable *find_route(struct net *net,
88                 __be32 *saddr, __be32 daddr, u8 ipproto,
89                 u8 tos, u32 skb_mark)
90 {
91         struct rtable *rt;
92         /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
93          * router expect RT_TOS bits only. */
94
95 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
96         struct flowi fl = { .nl_u = { .ip4_u = {
97                                         .daddr = daddr,
98                                         .saddr = *saddr,
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
100                                         .fwmark = skb_mark,
101 #endif
102                                         .tos   = RT_TOS(tos) } },
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
104                                         .mark = skb_mark,
105 #endif
106                                         .proto = ipproto };
107
108         if (unlikely(ip_route_output_key(net, &rt, &fl)))
109                 return ERR_PTR(-EADDRNOTAVAIL);
110         *saddr = fl.nl_u.ip4_u.saddr;
111         return rt;
112 #else
113         struct flowi4 fl = { .daddr = daddr,
114                              .saddr = *saddr,
115                              .flowi4_tos = RT_TOS(tos),
116                              .flowi4_mark = skb_mark,
117                              .flowi4_proto = ipproto };
118
119         rt = ip_route_output_key(net, &fl);
120         *saddr = fl.saddr;
121         return rt;
122 #endif
123 }
124
125 static bool need_linearize(const struct sk_buff *skb)
126 {
127         int i;
128
129         if (unlikely(skb_shinfo(skb)->frag_list))
130                 return true;
131
132         /*
133          * Generally speaking we should linearize if there are paged frags.
134          * However, if all of the refcounts are 1 we know nobody else can
135          * change them from underneath us and we can skip the linearization.
136          */
137         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
138                 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
139                         return true;
140
141         return false;
142 }
143
144 static struct sk_buff *handle_offloads(struct sk_buff *skb)
145 {
146         int err;
147
148         forward_ip_summed(skb, true);
149
150         if (skb_is_gso(skb)) {
151                 struct sk_buff *nskb;
152
153                 nskb = __skb_gso_segment(skb, 0, false);
154                 if (IS_ERR(nskb)) {
155                         err = PTR_ERR(nskb);
156                         goto error;
157                 }
158
159                 consume_skb(skb);
160                 skb = nskb;
161         } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
162                 /* Pages aren't locked and could change at any time.
163                  * If this happens after we compute the checksum, the
164                  * checksum will be wrong.  We linearize now to avoid
165                  * this problem.
166                  */
167                 if (unlikely(need_linearize(skb))) {
168                         err = __skb_linearize(skb);
169                         if (unlikely(err))
170                                 goto error;
171                 }
172
173                 err = skb_checksum_help(skb);
174                 if (unlikely(err))
175                         goto error;
176         }
177
178         set_ip_summed(skb, OVS_CSUM_NONE);
179
180         return skb;
181
182 error:
183         return ERR_PTR(err);
184 }
185
186 /* Compute source UDP port for outgoing packet.
187  * Currently we use the flow hash.
188  */
189 u16 ovs_tnl_get_src_port(struct sk_buff *skb)
190 {
191         int low;
192         int high;
193         unsigned int range;
194         u32 hash = OVS_CB(skb)->flow->hash;
195
196         inet_get_local_port_range(&low, &high);
197         range = (high - low) + 1;
198         return (((u64) hash * range) >> 32) + low;
199 }
200
201 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb,
202                  u8 ipproto, int tunnel_hlen,
203                  void (*build_header)(const struct vport *,
204                                       struct sk_buff *,
205                                       int tunnel_hlen))
206 {
207         int min_headroom;
208         struct rtable *rt;
209         __be32 saddr;
210         int sent_len = 0;
211         int err;
212         struct sk_buff *nskb;
213
214         /* Route lookup */
215         saddr = OVS_CB(skb)->tun_key->ipv4_src;
216         rt = find_route(ovs_dp_get_net(vport->dp),
217                         &saddr,
218                         OVS_CB(skb)->tun_key->ipv4_dst,
219                         ipproto,
220                         OVS_CB(skb)->tun_key->ipv4_tos,
221                         skb_get_mark(skb));
222         if (IS_ERR(rt)) {
223                 err = PTR_ERR(rt);
224                 goto error;
225         }
226
227         tunnel_hlen += sizeof(struct iphdr);
228
229         min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
230                         + tunnel_hlen
231                         + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
232
233         if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
234                 int head_delta = SKB_DATA_ALIGN(min_headroom -
235                                                 skb_headroom(skb) +
236                                                 16);
237
238                 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
239                                         0, GFP_ATOMIC);
240                 if (unlikely(err))
241                         goto err_free_rt;
242         }
243
244         /* Offloading */
245         nskb = handle_offloads(skb);
246         if (IS_ERR(nskb)) {
247                 err = PTR_ERR(nskb);
248                 goto err_free_rt;
249         }
250         skb = nskb;
251
252         /* Reset SKB */
253         nf_reset(skb);
254         secpath_reset(skb);
255         skb_dst_drop(skb);
256         skb_clear_rxhash(skb);
257
258         while (skb) {
259                 struct sk_buff *next_skb = skb->next;
260                 struct iphdr *iph;
261                 int frag_len;
262
263                 skb->next = NULL;
264
265                 if (unlikely(vlan_deaccel_tag(skb)))
266                         goto next;
267
268                 frag_len = skb->len;
269                 skb_push(skb, tunnel_hlen);
270                 skb_reset_network_header(skb);
271                 skb_set_transport_header(skb, sizeof(struct iphdr));
272
273                 if (next_skb)
274                         skb_dst_set(skb, dst_clone(&rt_dst(rt)));
275                 else
276                         skb_dst_set(skb, &rt_dst(rt));
277
278                 /* Push Tunnel header. */
279                 build_header(vport, skb, tunnel_hlen);
280
281                 /* Push IP header. */
282                 iph = ip_hdr(skb);
283                 iph->version    = 4;
284                 iph->ihl        = sizeof(struct iphdr) >> 2;
285                 iph->protocol   = ipproto;
286                 iph->daddr      = OVS_CB(skb)->tun_key->ipv4_dst;
287                 iph->saddr      = saddr;
288                 iph->tos        = OVS_CB(skb)->tun_key->ipv4_tos;
289                 iph->ttl        = OVS_CB(skb)->tun_key->ipv4_ttl;
290                 iph->frag_off   = OVS_CB(skb)->tun_key->tun_flags &
291                                   OVS_TNL_F_DONT_FRAGMENT ?  htons(IP_DF) : 0;
292                 /*
293                  * Allow our local IP stack to fragment the outer packet even
294                  * if the DF bit is set as a last resort.  We also need to
295                  * force selection of an IP ID here with __ip_select_ident(),
296                  * as ip_select_ident() assumes a proper ID is not needed when
297                  * when the DF bit is set.
298                  */
299                 skb->local_df = 1;
300                 __ip_select_ident(iph, skb_dst(skb), 0);
301
302                 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
303
304                 err = ip_local_out(skb);
305                 if (unlikely(net_xmit_eval(err)))
306                         goto next;
307
308                 sent_len += frag_len;
309
310 next:
311                 skb = next_skb;
312         }
313
314         return sent_len;
315
316 err_free_rt:
317         ip_rt_put(rt);
318 error:
319         return err;
320 }