2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #include <linux/skbuff.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
21 #include <net/checksum.h>
25 #include "openvswitch/xflow.h"
28 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom, gfp_t gfp)
30 if (skb_cloned(skb)) {
32 unsigned headroom = max(min_headroom, skb_headroom(skb));
34 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), gfp);
36 set_skb_csum_bits(skb, nskb);
41 unsigned int hdr_len = (skb_transport_offset(skb)
42 + sizeof(struct tcphdr));
43 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
50 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
52 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
55 /* Verify we were given a vlan packet */
56 if (vh->h_vlan_proto != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN)
59 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
60 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
61 + ETH_HLEN, VLAN_HLEN, 0));
63 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
65 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
67 skb->protocol = eh->h_proto;
68 skb->mac_header += VLAN_HLEN;
73 static struct sk_buff *modify_vlan_tci(struct datapath *dp, struct sk_buff *skb,
74 const struct xflow_key *key, const union xflow_action *a,
75 int n_actions, gfp_t gfp)
77 __be16 mask = a->dl_tci.mask;
78 __be16 tci = a->dl_tci.tci;
80 skb = make_writable(skb, VLAN_HLEN, gfp);
82 return ERR_PTR(-ENOMEM);
84 if (skb->protocol == htons(ETH_P_8021Q)) {
85 /* Modify vlan id, but maintain other TCI values */
86 struct vlan_ethhdr *vh;
89 if (skb->len < VLAN_ETH_HLEN)
92 vh = vlan_eth_hdr(skb);
93 old_tci = vh->h_vlan_TCI;
95 vh->h_vlan_TCI = (vh->h_vlan_TCI & ~mask) | tci;
97 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) {
98 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
100 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
106 /* Add vlan header */
108 /* Set up checksumming pointers for checksum-deferred packets
109 * on Xen. Otherwise, dev_queue_xmit() will try to do this
110 * when we send the packet out on the wire, and it will fail at
111 * that point because skb_checksum_setup() will not look inside
112 * an 802.1Q header. */
113 err = vswitch_skb_checksum_setup(skb);
119 /* GSO is not implemented for packets with an 802.1Q header, so
120 * we have to do segmentation before we add that header.
122 * GSO does work with hardware-accelerated VLAN tagging, but we
123 * can't use hardware-accelerated VLAN tagging since it
124 * requires the device to have a VLAN group configured (with
125 * e.g. vconfig(8)) and we don't do that.
127 * Having to do this here may be a performance loss, since we
128 * can't take advantage of TSO hardware support, although it
129 * does not make a measurable network performance difference
130 * for 1G Ethernet. Fixing that would require patching the
131 * kernel (either to add GSO support to the VLAN protocol or to
132 * support hardware-accelerated VLAN tagging without VLAN
133 * groups configured). */
134 if (skb_is_gso(skb)) {
135 struct sk_buff *segs;
137 segs = skb_gso_segment(skb, 0);
139 if (unlikely(IS_ERR(segs)))
140 return ERR_CAST(segs);
143 struct sk_buff *nskb = segs->next;
148 /* GSO can change the checksum type so update.*/
149 compute_ip_summed(segs, true);
151 segs = __vlan_put_tag(segs, ntohs(tci));
154 err = execute_actions(dp, segs,
161 while ((segs = nskb)) {
170 } while (segs->next);
173 compute_ip_summed(skb, true);
176 /* The hardware-accelerated version of vlan_put_tag() works
177 * only for a device that has a VLAN group configured (with
178 * e.g. vconfig(8)), so call the software-only version
179 * __vlan_put_tag() directly instead.
181 skb = __vlan_put_tag(skb, ntohs(tci));
183 return ERR_PTR(-ENOMEM);
185 /* GSO doesn't fix up the hardware computed checksum so this
186 * will only be hit in the non-GSO case. */
187 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
188 skb->csum = csum_add(skb->csum, csum_partial(skb->data
189 + ETH_HLEN, VLAN_HLEN, 0));
195 static struct sk_buff *strip_vlan(struct sk_buff *skb, gfp_t gfp)
197 skb = make_writable(skb, 0, gfp);
204 static struct sk_buff *set_dl_addr(struct sk_buff *skb,
205 const struct xflow_action_dl_addr *a,
208 skb = make_writable(skb, 0, gfp);
210 struct ethhdr *eh = eth_hdr(skb);
211 if (a->type == XFLOWAT_SET_DL_SRC)
212 memcpy(eh->h_source, a->dl_addr, ETH_ALEN);
214 memcpy(eh->h_dest, a->dl_addr, ETH_ALEN);
219 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
220 * covered by the sum has been changed from 'from' to 'to'. If set,
221 * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
222 * Based on nf_proto_csum_replace4. */
223 static void update_csum(__sum16 *sum, struct sk_buff *skb,
224 __be32 from, __be32 to, int pseudohdr)
226 __be32 diff[] = { ~from, to };
228 if (OVS_CB(skb)->ip_summed != OVS_CSUM_PARTIAL) {
229 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
230 ~csum_unfold(*sum)));
231 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE && pseudohdr)
232 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
234 } else if (pseudohdr)
235 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
239 static bool is_ip(struct sk_buff *skb, const struct xflow_key *key)
241 return (key->dl_type == htons(ETH_P_IP) &&
242 skb->transport_header > skb->network_header);
245 static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct xflow_key *key)
247 int transport_len = skb->len - skb_transport_offset(skb);
248 if (key->nw_proto == IPPROTO_TCP) {
249 if (likely(transport_len >= sizeof(struct tcphdr)))
250 return &tcp_hdr(skb)->check;
251 } else if (key->nw_proto == IPPROTO_UDP) {
252 if (likely(transport_len >= sizeof(struct udphdr)))
253 return &udp_hdr(skb)->check;
258 static struct sk_buff *set_nw_addr(struct sk_buff *skb,
259 const struct xflow_key *key,
260 const struct xflow_action_nw_addr *a,
267 if (unlikely(!is_ip(skb, key)))
270 skb = make_writable(skb, 0, gfp);
275 nwaddr = a->type == XFLOWAT_SET_NW_SRC ? &nh->saddr : &nh->daddr;
277 check = get_l4_checksum(skb, key);
279 update_csum(check, skb, *nwaddr, a->nw_addr, 1);
280 update_csum(&nh->check, skb, *nwaddr, a->nw_addr, 0);
282 *nwaddr = a->nw_addr;
287 static struct sk_buff *set_nw_tos(struct sk_buff *skb,
288 const struct xflow_key *key,
289 const struct xflow_action_nw_tos *a,
292 if (unlikely(!is_ip(skb, key)))
295 skb = make_writable(skb, 0, gfp);
297 struct iphdr *nh = ip_hdr(skb);
302 /* Set the DSCP bits and preserve the ECN bits. */
303 new = a->nw_tos | (nh->tos & INET_ECN_MASK);
304 update_csum(&nh->check, skb, htons((u16)old),
311 static struct sk_buff *set_tp_port(struct sk_buff *skb,
312 const struct xflow_key *key,
313 const struct xflow_action_tp_port *a,
320 if (unlikely(!is_ip(skb, key)))
323 skb = make_writable(skb, 0, gfp);
327 /* Must follow make_writable() since that can move the skb data. */
328 check = get_l4_checksum(skb, key);
329 if (unlikely(!check))
333 * Update port and checksum.
335 * This is OK because source and destination port numbers are at the
336 * same offsets in both UDP and TCP headers, and get_l4_checksum() only
337 * supports those protocols.
340 port = a->type == XFLOWAT_SET_TP_SRC ? &th->source : &th->dest;
341 update_csum(check, skb, *port, a->tp_port, 0);
348 * is_spoofed_arp - check for invalid ARP packet
350 * @skb: skbuff containing an Ethernet packet, with network header pointing
351 * just past the Ethernet and optional 802.1Q header.
352 * @key: flow key extracted from @skb by flow_extract()
354 * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
355 * or truncated header fields or one whose inner and outer Ethernet address
358 static bool is_spoofed_arp(struct sk_buff *skb, const struct xflow_key *key)
360 struct arp_eth_header *arp;
362 if (key->dl_type != htons(ETH_P_ARP))
365 if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
368 arp = (struct arp_eth_header *)skb_network_header(skb);
369 return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
370 arp->ar_pro != htons(ETH_P_IP) ||
371 arp->ar_hln != ETH_ALEN ||
373 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
376 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
383 p = rcu_dereference(dp->ports[out_port]);
387 vport_send(p->vport, skb);
394 /* Never consumes 'skb'. Returns a port that 'skb' should be sent to, -1 if
396 static int output_group(struct datapath *dp, __u16 group,
397 struct sk_buff *skb, gfp_t gfp)
399 struct dp_port_group *g = rcu_dereference(dp->groups[group]);
405 for (i = 0; i < g->n_ports; i++) {
406 struct dp_port *p = rcu_dereference(dp->ports[g->ports[i]]);
407 if (!p || OVS_CB(skb)->dp_port == p)
409 if (prev_port != -1) {
410 struct sk_buff *clone = skb_clone(skb, gfp);
413 do_output(dp, clone, prev_port);
415 prev_port = p->port_no;
420 static int output_control(struct datapath *dp, struct sk_buff *skb, u32 arg,
423 skb = skb_clone(skb, gfp);
426 return dp_output_control(dp, skb, _XFLOWL_ACTION_NR, arg);
429 /* Send a copy of this packet up to the sFlow agent, along with extra
430 * information about what happened to it. */
431 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
432 const union xflow_action *a, int n_actions,
433 gfp_t gfp, struct dp_port *dp_port)
435 struct xflow_sflow_sample_header *hdr;
436 unsigned int actlen = n_actions * sizeof(union xflow_action);
437 unsigned int hdrlen = sizeof(struct xflow_sflow_sample_header);
438 struct sk_buff *nskb;
440 nskb = skb_copy_expand(skb, actlen + hdrlen, 0, gfp);
444 memcpy(__skb_push(nskb, actlen), a, actlen);
445 hdr = (struct xflow_sflow_sample_header*)__skb_push(nskb, hdrlen);
446 hdr->n_actions = n_actions;
447 hdr->sample_pool = atomic_read(&dp_port->sflow_pool);
448 dp_output_control(dp, nskb, _XFLOWL_SFLOW_NR, 0);
451 /* Execute a list of actions against 'skb'. */
452 int execute_actions(struct datapath *dp, struct sk_buff *skb,
453 const struct xflow_key *key,
454 const union xflow_action *a, int n_actions,
457 /* Every output action needs a separate clone of 'skb', but the common
458 * case is just a single output action, so that doing a clone and
459 * then freeing the original skbuff is wasteful. So the following code
460 * is slightly obscure just to avoid that. */
462 u32 priority = skb->priority;
465 if (dp->sflow_probability) {
466 struct dp_port *p = OVS_CB(skb)->dp_port;
468 atomic_inc(&p->sflow_pool);
469 if (dp->sflow_probability == UINT_MAX ||
470 net_random() < dp->sflow_probability)
471 sflow_sample(dp, skb, a, n_actions, gfp, p);
475 OVS_CB(skb)->tun_id = 0;
477 for (; n_actions > 0; a++, n_actions--) {
478 if (prev_port != -1) {
479 do_output(dp, skb_clone(skb, gfp), prev_port);
485 prev_port = a->output.port;
488 case XFLOWAT_OUTPUT_GROUP:
489 prev_port = output_group(dp, a->output_group.group,
493 case XFLOWAT_CONTROLLER:
494 err = output_control(dp, skb, a->controller.arg, gfp);
501 case XFLOWAT_SET_TUNNEL:
502 OVS_CB(skb)->tun_id = a->tunnel.tun_id;
505 case XFLOWAT_SET_DL_TCI:
506 skb = modify_vlan_tci(dp, skb, key, a, n_actions, gfp);
511 case XFLOWAT_STRIP_VLAN:
512 skb = strip_vlan(skb, gfp);
515 case XFLOWAT_SET_DL_SRC:
516 case XFLOWAT_SET_DL_DST:
517 skb = set_dl_addr(skb, &a->dl_addr, gfp);
520 case XFLOWAT_SET_NW_SRC:
521 case XFLOWAT_SET_NW_DST:
522 skb = set_nw_addr(skb, key, &a->nw_addr, gfp);
525 case XFLOWAT_SET_NW_TOS:
526 skb = set_nw_tos(skb, key, &a->nw_tos, gfp);
529 case XFLOWAT_SET_TP_SRC:
530 case XFLOWAT_SET_TP_DST:
531 skb = set_tp_port(skb, key, &a->tp_port, gfp);
534 case XFLOWAT_SET_PRIORITY:
535 skb->priority = a->priority.priority;
538 case XFLOWAT_POP_PRIORITY:
539 skb->priority = priority;
542 case XFLOWAT_DROP_SPOOFED_ARP:
543 if (unlikely(is_spoofed_arp(skb, key)))
552 do_output(dp, skb, prev_port);