2 * Copyright (c) 2007-2012 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/in_route.h>
23 #include <linux/inetdevice.h>
24 #include <linux/jhash.h>
25 #include <linux/list.h>
26 #include <linux/kernel.h>
27 #include <linux/version.h>
28 #include <linux/workqueue.h>
29 #include <linux/rculist.h>
30 #include <net/route.h>
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
41 #define rt_dst(rt) (rt->dst)
43 #define rt_dst(rt) (rt->u.dst)
47 * ovs_tnl_rcv - ingress point for generic tunnel code
49 * @vport: port this packet was received on
50 * @skb: received packet
51 * @tos: ToS from encapsulating IP packet, used to copy ECN bits
53 * Must be called with rcu_read_lock.
55 * Packets received by this function are in the following state:
56 * - skb->data points to the inner Ethernet header.
57 * - The inner Ethernet header is in the linear data area.
58 * - skb->csum does not include the inner Ethernet header.
59 * - The layer pointers are undefined.
61 void ovs_tnl_rcv(struct vport *vport, struct sk_buff *skb)
65 skb_reset_mac_header(skb);
68 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
69 skb->protocol = eh->h_proto;
71 skb->protocol = htons(ETH_P_802_2);
75 skb_clear_rxhash(skb);
79 if (unlikely(compute_ip_summed(skb, false))) {
84 ovs_vport_receive(vport, skb);
87 static struct rtable *find_route(struct net *net,
88 __be32 *saddr, __be32 daddr, u8 ipproto,
92 /* Tunnel configuration keeps DSCP part of TOS bits, But Linux
93 * router expect RT_TOS bits only. */
95 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
96 struct flowi fl = { .nl_u = { .ip4_u = {
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
102 .tos = RT_TOS(tos) } },
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
108 if (unlikely(ip_route_output_key(net, &rt, &fl)))
109 return ERR_PTR(-EADDRNOTAVAIL);
110 *saddr = fl.nl_u.ip4_u.saddr;
113 struct flowi4 fl = { .daddr = daddr,
115 .flowi4_tos = RT_TOS(tos),
116 .flowi4_mark = skb_mark,
117 .flowi4_proto = ipproto };
119 rt = ip_route_output_key(net, &fl);
125 static bool need_linearize(const struct sk_buff *skb)
129 if (unlikely(skb_shinfo(skb)->frag_list))
133 * Generally speaking we should linearize if there are paged frags.
134 * However, if all of the refcounts are 1 we know nobody else can
135 * change them from underneath us and we can skip the linearization.
137 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
138 if (unlikely(page_count(skb_frag_page(&skb_shinfo(skb)->frags[i])) > 1))
144 static struct sk_buff *handle_offloads(struct sk_buff *skb,
145 const struct rtable *rt,
151 min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
153 + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
155 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
156 int head_delta = SKB_DATA_ALIGN(min_headroom -
159 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
165 forward_ip_summed(skb, true);
167 if (skb_is_gso(skb)) {
168 struct sk_buff *nskb;
170 nskb = __skb_gso_segment(skb, 0, false);
179 } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
180 /* Pages aren't locked and could change at any time.
181 * If this happens after we compute the checksum, the
182 * checksum will be wrong. We linearize now to avoid
185 if (unlikely(need_linearize(skb))) {
186 err = __skb_linearize(skb);
191 err = skb_checksum_help(skb);
196 set_ip_summed(skb, OVS_CSUM_NONE);
206 /* Compute source UDP port for outgoing packet.
207 * Currently we use the flow hash.
209 u16 ovs_tnl_get_src_port(struct sk_buff *skb)
214 u32 hash = OVS_CB(skb)->flow->hash;
216 inet_get_local_port_range(&low, &high);
217 range = (high - low) + 1;
218 return (((u64) hash * range) >> 32) + low;
221 int ovs_tnl_send(struct vport *vport, struct sk_buff *skb)
223 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
229 if (unlikely(!OVS_CB(skb)->tun_key))
233 saddr = OVS_CB(skb)->tun_key->ipv4_src;
234 rt = find_route(ovs_dp_get_net(vport->dp),
236 OVS_CB(skb)->tun_key->ipv4_dst,
237 tnl_vport->tnl_ops->ipproto,
238 OVS_CB(skb)->tun_key->ipv4_tos,
244 tunnel_hlen = tnl_vport->tnl_ops->hdr_len(OVS_CB(skb)->tun_key);
245 tunnel_hlen += sizeof(struct iphdr);
247 skb = handle_offloads(skb, rt, tunnel_hlen);
257 skb_clear_rxhash(skb);
260 struct sk_buff *next_skb = skb->next;
267 if (unlikely(vlan_deaccel_tag(skb)))
271 skb_push(skb, tunnel_hlen);
272 skb_reset_network_header(skb);
273 skb_set_transport_header(skb, sizeof(struct iphdr));
276 skb_dst_set(skb, dst_clone(&rt_dst(rt)));
278 skb_dst_set(skb, &rt_dst(rt));
280 /* Push Tunnel header. */
281 tnl_vport->tnl_ops->build_header(vport, skb, tunnel_hlen);
283 /* Push IP header. */
286 iph->ihl = sizeof(struct iphdr) >> 2;
287 iph->protocol = tnl_vport->tnl_ops->ipproto;
288 iph->daddr = OVS_CB(skb)->tun_key->ipv4_dst;
290 iph->tos = OVS_CB(skb)->tun_key->ipv4_tos;
291 iph->ttl = OVS_CB(skb)->tun_key->ipv4_ttl;
292 iph->frag_off = OVS_CB(skb)->tun_key->tun_flags &
293 OVS_TNL_F_DONT_FRAGMENT ? htons(IP_DF) : 0;
295 * Allow our local IP stack to fragment the outer packet even
296 * if the DF bit is set as a last resort. We also need to
297 * force selection of an IP ID here with __ip_select_ident(),
298 * as ip_select_ident() assumes a proper ID is not needed when
299 * when the DF bit is set.
302 __ip_select_ident(iph, skb_dst(skb), 0);
304 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
306 err = ip_local_out(skb);
307 if (unlikely(net_xmit_eval(err)))
310 sent_len += frag_len;
316 if (unlikely(sent_len == 0))
317 ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
325 ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
329 struct vport *ovs_tnl_create(const struct vport_parms *parms,
330 const struct vport_ops *vport_ops,
331 const struct tnl_ops *tnl_ops)
334 struct tnl_vport *tnl_vport;
337 vport = ovs_vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
339 err = PTR_ERR(vport);
343 tnl_vport = tnl_vport_priv(vport);
345 strcpy(tnl_vport->name, parms->name);
346 tnl_vport->tnl_ops = tnl_ops;
354 static void free_port_rcu(struct rcu_head *rcu)
356 struct tnl_vport *tnl_vport = container_of(rcu,
357 struct tnl_vport, rcu);
359 ovs_vport_free(vport_from_priv(tnl_vport));
362 void ovs_tnl_destroy(struct vport *vport)
364 struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
366 call_rcu(&tnl_vport->rcu, free_port_rcu);
369 const char *ovs_tnl_get_name(const struct vport *vport)
371 const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
372 return tnl_vport->name;