2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #include <linux/skbuff.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_vlan.h>
18 #include <net/inet_ecn.h>
20 #include <net/checksum.h>
24 #include "openvswitch/xflow.h"
27 static struct sk_buff *
28 make_writable(struct sk_buff *skb, unsigned min_headroom, gfp_t gfp)
30 if (skb_shared(skb) || skb_cloned(skb)) {
32 unsigned headroom = max(min_headroom, skb_headroom(skb));
34 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), gfp);
36 set_skb_csum_bits(skb, nskb);
41 unsigned int hdr_len = (skb_transport_offset(skb)
42 + sizeof(struct tcphdr));
43 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
50 static void set_tunnel(struct sk_buff *skb, struct xflow_key *key,
53 OVS_CB(skb)->tun_id = key->tun_id = tun_id;
56 static struct sk_buff *
57 vlan_pull_tag(struct sk_buff *skb)
59 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
62 /* Verify we were given a vlan packet */
63 if (vh->h_vlan_proto != htons(ETH_P_8021Q))
66 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
67 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
68 + ETH_HLEN, VLAN_HLEN, 0));
70 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
72 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
74 skb->protocol = eh->h_proto;
75 skb->mac_header += VLAN_HLEN;
81 static struct sk_buff *
82 modify_vlan_tci(struct datapath *dp, struct sk_buff *skb,
83 struct xflow_key *key, const union xflow_action *a,
84 int n_actions, gfp_t gfp)
86 __be16 mask = a->dl_tci.mask;
87 __be16 tci = a->dl_tci.tci;
89 key->dl_tci = (key->dl_tci & ~(mask | VLAN_TAG_PRESENT)) | tci;
91 skb = make_writable(skb, VLAN_HLEN, gfp);
93 return ERR_PTR(-ENOMEM);
95 if (skb->protocol == htons(ETH_P_8021Q)) {
96 /* Modify vlan id, but maintain other TCI values */
97 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
98 __be16 old_tci = vh->h_vlan_TCI;
100 vh->h_vlan_TCI = (vh->h_vlan_TCI & ~mask) | tci;
102 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) {
103 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
105 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
111 /* Add vlan header */
113 /* Set up checksumming pointers for checksum-deferred packets
114 * on Xen. Otherwise, dev_queue_xmit() will try to do this
115 * when we send the packet out on the wire, and it will fail at
116 * that point because skb_checksum_setup() will not look inside
117 * an 802.1Q header. */
118 err = vswitch_skb_checksum_setup(skb);
124 /* GSO is not implemented for packets with an 802.1Q header, so
125 * we have to do segmentation before we add that header.
127 * GSO does work with hardware-accelerated VLAN tagging, but we
128 * can't use hardware-accelerated VLAN tagging since it
129 * requires the device to have a VLAN group configured (with
130 * e.g. vconfig(8)) and we don't do that.
132 * Having to do this here may be a performance loss, since we
133 * can't take advantage of TSO hardware support, although it
134 * does not make a measurable network performance difference
135 * for 1G Ethernet. Fixing that would require patching the
136 * kernel (either to add GSO support to the VLAN protocol or to
137 * support hardware-accelerated VLAN tagging without VLAN
138 * groups configured). */
139 if (skb_is_gso(skb)) {
140 struct sk_buff *segs;
142 segs = skb_gso_segment(skb, 0);
144 if (unlikely(IS_ERR(segs)))
145 return ERR_CAST(segs);
148 struct sk_buff *nskb = segs->next;
153 /* GSO can change the checksum type so update.*/
154 compute_ip_summed(segs, true);
156 segs = __vlan_put_tag(segs, ntohs(tci));
159 struct xflow_key segkey = *key;
160 err = execute_actions(dp, segs,
167 while ((segs = nskb)) {
176 } while (segs->next);
179 compute_ip_summed(skb, true);
182 /* The hardware-accelerated version of vlan_put_tag() works
183 * only for a device that has a VLAN group configured (with
184 * e.g. vconfig(8)), so call the software-only version
185 * __vlan_put_tag() directly instead.
187 skb = __vlan_put_tag(skb, ntohs(tci));
189 return ERR_PTR(-ENOMEM);
191 /* GSO doesn't fix up the hardware computed checksum so this
192 * will only be hit in the non-GSO case. */
193 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE)
194 skb->csum = csum_add(skb->csum, csum_partial(skb->data
195 + ETH_HLEN, VLAN_HLEN, 0));
201 static struct sk_buff *strip_vlan(struct sk_buff *skb,
202 struct xflow_key *key, gfp_t gfp)
204 skb = make_writable(skb, 0, gfp);
207 key->dl_tci = htons(0);
212 static struct sk_buff *set_dl_addr(struct sk_buff *skb,
213 struct xflow_key *key,
214 const struct xflow_action_dl_addr *a,
217 skb = make_writable(skb, 0, gfp);
219 struct ethhdr *eh = eth_hdr(skb);
220 if (a->type == XFLOWAT_SET_DL_SRC) {
221 memcpy(eh->h_source, a->dl_addr, ETH_ALEN);
222 memcpy(key->dl_src, a->dl_addr, ETH_ALEN);
224 memcpy(eh->h_dest, a->dl_addr, ETH_ALEN);
225 memcpy(key->dl_dst, a->dl_addr, ETH_ALEN);
231 /* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field
232 * covered by the sum has been changed from 'from' to 'to'. If set,
233 * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header.
234 * Based on nf_proto_csum_replace4. */
235 static void update_csum(__sum16 *sum, struct sk_buff *skb,
236 __be32 from, __be32 to, int pseudohdr)
238 __be32 diff[] = { ~from, to };
240 if (OVS_CB(skb)->ip_summed != OVS_CSUM_PARTIAL) {
241 *sum = csum_fold(csum_partial((char *)diff, sizeof(diff),
242 ~csum_unfold(*sum)));
243 if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE && pseudohdr)
244 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
246 } else if (pseudohdr)
247 *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff),
251 static struct sk_buff *set_nw_addr(struct sk_buff *skb,
252 struct xflow_key *key,
253 const struct xflow_action_nw_addr *a,
256 if (key->dl_type != htons(ETH_P_IP))
259 skb = make_writable(skb, 0, gfp);
261 struct iphdr *nh = ip_hdr(skb);
262 u32 *f = a->type == XFLOWAT_SET_NW_SRC ? &nh->saddr : &nh->daddr;
264 u32 new = a->nw_addr;
266 if (key->nw_proto == IPPROTO_TCP) {
267 struct tcphdr *th = tcp_hdr(skb);
268 update_csum(&th->check, skb, old, new, 1);
269 } else if (key->nw_proto == IPPROTO_UDP) {
270 struct udphdr *th = udp_hdr(skb);
271 update_csum(&th->check, skb, old, new, 1);
273 update_csum(&nh->check, skb, old, new, 0);
276 if (a->type == XFLOWAT_SET_NW_SRC)
277 key->nw_src = a->nw_addr;
279 key->nw_dst = a->nw_addr;
284 static struct sk_buff *set_nw_tos(struct sk_buff *skb,
285 struct xflow_key *key,
286 const struct xflow_action_nw_tos *a,
289 if (key->dl_type != htons(ETH_P_IP))
292 skb = make_writable(skb, 0, gfp);
294 struct iphdr *nh = ip_hdr(skb);
299 /* Set the DSCP bits and preserve the ECN bits. */
300 new = a->nw_tos | (nh->tos & INET_ECN_MASK);
301 update_csum(&nh->check, skb, htons((uint16_t)old),
302 htons((uint16_t)new), 0);
304 key->nw_tos = a->nw_tos;
309 static struct sk_buff *
310 set_tp_port(struct sk_buff *skb, struct xflow_key *key,
311 const struct xflow_action_tp_port *a,
316 if (key->dl_type != htons(ETH_P_IP))
319 if (key->nw_proto == IPPROTO_TCP)
320 check_ofs = offsetof(struct tcphdr, check);
321 else if (key->nw_proto == IPPROTO_UDP)
322 check_ofs = offsetof(struct udphdr, check);
326 skb = make_writable(skb, 0, gfp);
328 struct udphdr *th = udp_hdr(skb);
329 u16 *f = a->type == XFLOWAT_SET_TP_SRC ? &th->source : &th->dest;
331 u16 new = a->tp_port;
332 update_csum((u16*)(skb_transport_header(skb) + check_ofs),
335 if (a->type == XFLOWAT_SET_TP_SRC)
336 key->tp_src = a->tp_port;
338 key->tp_dst = a->tp_port;
343 static inline unsigned packet_length(const struct sk_buff *skb)
345 unsigned length = skb->len - ETH_HLEN;
346 if (skb->protocol == htons(ETH_P_8021Q))
352 do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
360 p = rcu_dereference(dp->ports[out_port]);
364 mtu = vport_get_mtu(p->vport);
365 if (packet_length(skb) > mtu && !skb_is_gso(skb)) {
366 printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n",
367 dp_name(dp), packet_length(skb), mtu);
371 vport_send(p->vport, skb);
378 /* Never consumes 'skb'. Returns a port that 'skb' should be sent to, -1 if
380 static int output_group(struct datapath *dp, __u16 group,
381 struct sk_buff *skb, gfp_t gfp)
383 struct dp_port_group *g = rcu_dereference(dp->groups[group]);
389 for (i = 0; i < g->n_ports; i++) {
390 struct dp_port *p = rcu_dereference(dp->ports[g->ports[i]]);
391 if (!p || OVS_CB(skb)->dp_port == p)
393 if (prev_port != -1) {
394 struct sk_buff *clone = skb_clone(skb, gfp);
397 do_output(dp, clone, prev_port);
399 prev_port = p->port_no;
405 output_control(struct datapath *dp, struct sk_buff *skb, u32 arg, gfp_t gfp)
407 skb = skb_clone(skb, gfp);
410 return dp_output_control(dp, skb, _XFLOWL_ACTION_NR, arg);
413 /* Send a copy of this packet up to the sFlow agent, along with extra
414 * information about what happened to it. */
415 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
416 const union xflow_action *a, int n_actions,
417 gfp_t gfp, struct dp_port *dp_port)
419 struct xflow_sflow_sample_header *hdr;
420 unsigned int actlen = n_actions * sizeof(union xflow_action);
421 unsigned int hdrlen = sizeof(struct xflow_sflow_sample_header);
422 struct sk_buff *nskb;
424 nskb = skb_copy_expand(skb, actlen + hdrlen, 0, gfp);
428 memcpy(__skb_push(nskb, actlen), a, actlen);
429 hdr = (struct xflow_sflow_sample_header*)__skb_push(nskb, hdrlen);
430 hdr->n_actions = n_actions;
431 hdr->sample_pool = atomic_read(&dp_port->sflow_pool);
432 dp_output_control(dp, nskb, _XFLOWL_SFLOW_NR, 0);
435 /* Execute a list of actions against 'skb'. */
436 int execute_actions(struct datapath *dp, struct sk_buff *skb,
437 struct xflow_key *key,
438 const union xflow_action *a, int n_actions,
441 /* Every output action needs a separate clone of 'skb', but the common
442 * case is just a single output action, so that doing a clone and
443 * then freeing the original skbuff is wasteful. So the following code
444 * is slightly obscure just to avoid that. */
446 u32 priority = skb->priority;
449 if (dp->sflow_probability) {
450 struct dp_port *p = OVS_CB(skb)->dp_port;
452 atomic_inc(&p->sflow_pool);
453 if (dp->sflow_probability == UINT_MAX ||
454 net_random() < dp->sflow_probability)
455 sflow_sample(dp, skb, a, n_actions, gfp, p);
459 OVS_CB(skb)->tun_id = 0;
461 for (; n_actions > 0; a++, n_actions--) {
462 WARN_ON_ONCE(skb_shared(skb));
463 if (prev_port != -1) {
464 do_output(dp, skb_clone(skb, gfp), prev_port);
470 prev_port = a->output.port;
473 case XFLOWAT_OUTPUT_GROUP:
474 prev_port = output_group(dp, a->output_group.group,
478 case XFLOWAT_CONTROLLER:
479 err = output_control(dp, skb, a->controller.arg, gfp);
486 case XFLOWAT_SET_TUNNEL:
487 set_tunnel(skb, key, a->tunnel.tun_id);
490 case XFLOWAT_SET_DL_TCI:
491 skb = modify_vlan_tci(dp, skb, key, a, n_actions, gfp);
496 case XFLOWAT_STRIP_VLAN:
497 skb = strip_vlan(skb, key, gfp);
500 case XFLOWAT_SET_DL_SRC:
501 case XFLOWAT_SET_DL_DST:
502 skb = set_dl_addr(skb, key, &a->dl_addr, gfp);
505 case XFLOWAT_SET_NW_SRC:
506 case XFLOWAT_SET_NW_DST:
507 skb = set_nw_addr(skb, key, &a->nw_addr, gfp);
510 case XFLOWAT_SET_NW_TOS:
511 skb = set_nw_tos(skb, key, &a->nw_tos, gfp);
514 case XFLOWAT_SET_TP_SRC:
515 case XFLOWAT_SET_TP_DST:
516 skb = set_tp_port(skb, key, &a->tp_port, gfp);
519 case XFLOWAT_SET_PRIORITY:
520 skb->priority = a->priority.priority;
523 case XFLOWAT_POP_PRIORITY:
524 skb->priority = priority;
531 do_output(dp, skb, prev_port);