2 * Copyright (c) 2007-2011 Nicira Networks.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/in6.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
31 #include <net/checksum.h>
32 #include <net/dsfield.h>
39 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
40 const struct nlattr *attr, int len, bool keep_skb);
42 static int make_writable(struct sk_buff *skb, int write_len)
44 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
47 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
50 /* remove VLAN header from packet and update csum accrodingly. */
51 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
54 struct vlan_ethhdr *veth;
57 err = make_writable(skb, VLAN_ETH_HLEN);
61 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
62 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
63 + ETH_HLEN, VLAN_HLEN, 0));
65 veth = (struct vlan_ethhdr *) skb->data;
66 *current_tci = veth->h_vlan_TCI;
68 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
70 eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN);
72 skb->protocol = eh->h_proto;
73 skb->mac_header += VLAN_HLEN;
78 static int pop_vlan(struct sk_buff *skb)
83 if (likely(vlan_tx_tag_present(skb))) {
86 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
87 skb->len < VLAN_ETH_HLEN))
90 err = __pop_vlan_tci(skb, &tci);
94 /* move next vlan tag to hw accel tag */
95 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
96 skb->len < VLAN_ETH_HLEN))
99 err = __pop_vlan_tci(skb, &tci);
103 __vlan_hwaccel_put_tag(skb, ntohs(tci));
107 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
109 if (unlikely(vlan_tx_tag_present(skb))) {
112 /* push down current VLAN tag */
113 current_tag = vlan_tx_tag_get(skb);
115 if (!__vlan_put_tag(skb, current_tag))
118 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
119 skb->csum = csum_add(skb->csum, csum_partial(skb->data
120 + ETH_HLEN, VLAN_HLEN, 0));
123 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
127 static int set_eth_addr(struct sk_buff *skb,
128 const struct ovs_key_ethernet *eth_key)
131 err = make_writable(skb, ETH_HLEN);
135 memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
136 memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
141 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
142 __be32 *addr, __be32 new_addr)
144 int transport_len = skb->len - skb_transport_offset(skb);
146 if (nh->protocol == IPPROTO_TCP) {
147 if (likely(transport_len >= sizeof(struct tcphdr)))
148 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
150 } else if (nh->protocol == IPPROTO_UDP) {
151 if (likely(transport_len >= sizeof(struct udphdr)))
152 inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
156 csum_replace4(&nh->check, *addr, new_addr);
157 skb_clear_rxhash(skb);
161 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
163 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
167 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
172 err = make_writable(skb, skb_network_offset(skb) +
173 sizeof(struct iphdr));
179 if (ipv4_key->ipv4_src != nh->saddr)
180 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
182 if (ipv4_key->ipv4_dst != nh->daddr)
183 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
185 if (ipv4_key->ipv4_tos != nh->tos)
186 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
188 if (ipv4_key->ipv4_ttl != nh->ttl)
189 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
194 /* Must follow make_writable() since that can move the skb data. */
195 static void set_tp_port(struct sk_buff *skb, __be16 *port,
196 __be16 new_port, __sum16 *check)
198 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
200 skb_clear_rxhash(skb);
203 static int set_udp_port(struct sk_buff *skb,
204 const struct ovs_key_udp *udp_port_key)
209 err = make_writable(skb, skb_transport_offset(skb) +
210 sizeof(struct udphdr));
215 if (udp_port_key->udp_src != uh->source)
216 set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
218 if (udp_port_key->udp_dst != uh->dest)
219 set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
224 static int set_tcp_port(struct sk_buff *skb,
225 const struct ovs_key_tcp *tcp_port_key)
230 err = make_writable(skb, skb_transport_offset(skb) +
231 sizeof(struct tcphdr));
236 if (tcp_port_key->tcp_src != th->source)
237 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
239 if (tcp_port_key->tcp_dst != th->dest)
240 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
245 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
252 vport = rcu_dereference(dp->ports[out_port]);
253 if (unlikely(!vport)) {
258 vport_send(vport, skb);
262 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
263 const struct nlattr *attr)
265 struct dp_upcall_info upcall;
266 const struct nlattr *a;
269 upcall.cmd = OVS_PACKET_CMD_ACTION;
270 upcall.key = &OVS_CB(skb)->flow->key;
271 upcall.userdata = NULL;
274 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
275 a = nla_next(a, &rem)) {
276 switch (nla_type(a)) {
277 case OVS_USERSPACE_ATTR_USERDATA:
281 case OVS_USERSPACE_ATTR_PID:
282 upcall.pid = nla_get_u32(a);
287 return dp_upcall(dp, skb, &upcall);
290 static int sample(struct datapath *dp, struct sk_buff *skb,
291 const struct nlattr *attr)
293 const struct nlattr *acts_list = NULL;
294 const struct nlattr *a;
297 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
298 a = nla_next(a, &rem)) {
299 switch (nla_type(a)) {
300 case OVS_SAMPLE_ATTR_PROBABILITY:
301 if (net_random() >= nla_get_u32(a))
305 case OVS_SAMPLE_ATTR_ACTIONS:
311 return do_execute_actions(dp, skb, nla_data(acts_list),
312 nla_len(acts_list), true);
315 static int execute_set_action(struct sk_buff *skb,
316 const struct nlattr *nested_attr)
320 switch (nla_type(nested_attr)) {
321 case OVS_KEY_ATTR_PRIORITY:
322 skb->priority = nla_get_u32(nested_attr);
325 case OVS_KEY_ATTR_TUN_ID:
326 OVS_CB(skb)->tun_id = nla_get_be64(nested_attr);
329 case OVS_KEY_ATTR_ETHERNET:
330 err = set_eth_addr(skb, nla_data(nested_attr));
333 case OVS_KEY_ATTR_IPV4:
334 err = set_ipv4(skb, nla_data(nested_attr));
337 case OVS_KEY_ATTR_TCP:
338 err = set_tcp_port(skb, nla_data(nested_attr));
341 case OVS_KEY_ATTR_UDP:
342 err = set_udp_port(skb, nla_data(nested_attr));
349 /* Execute a list of actions against 'skb'. */
350 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
351 const struct nlattr *attr, int len, bool keep_skb)
353 /* Every output action needs a separate clone of 'skb', but the common
354 * case is just a single output action, so that doing a clone and
355 * then freeing the original skbuff is wasteful. So the following code
356 * is slightly obscure just to avoid that. */
358 const struct nlattr *a;
361 for (a = attr, rem = len; rem > 0;
362 a = nla_next(a, &rem)) {
365 if (prev_port != -1) {
366 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
370 switch (nla_type(a)) {
371 case OVS_ACTION_ATTR_OUTPUT:
372 prev_port = nla_get_u32(a);
375 case OVS_ACTION_ATTR_USERSPACE:
376 output_userspace(dp, skb, a);
379 case OVS_ACTION_ATTR_PUSH_VLAN:
380 err = push_vlan(skb, nla_data(a));
381 if (unlikely(err)) /* skb already freed. */
385 case OVS_ACTION_ATTR_POP_VLAN:
389 case OVS_ACTION_ATTR_SET:
390 err = execute_set_action(skb, nla_data(a));
393 case OVS_ACTION_ATTR_SAMPLE:
394 err = sample(dp, skb, a);
404 if (prev_port != -1) {
406 skb = skb_clone(skb, GFP_ATOMIC);
408 do_output(dp, skb, prev_port);
409 } else if (!keep_skb)
415 /* We limit the number of times that we pass into execute_actions()
416 * to avoid blowing out the stack in the event that we have a loop. */
419 struct loop_counter {
420 u8 count; /* Count. */
421 bool looping; /* Loop detected? */
424 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
426 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
429 pr_warn("%s: flow looped %d times, dropping\n",
430 dp_name(dp), MAX_LOOPS);
431 actions->actions_len = 0;
435 /* Execute a list of actions against 'skb'. */
436 int execute_actions(struct datapath *dp, struct sk_buff *skb)
438 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
439 struct loop_counter *loop;
442 /* Check whether we've looped too much. */
443 loop = &__get_cpu_var(loop_counters);
444 if (unlikely(++loop->count > MAX_LOOPS))
445 loop->looping = true;
446 if (unlikely(loop->looping)) {
447 error = loop_suppress(dp, acts);
452 OVS_CB(skb)->tun_id = 0;
453 error = do_execute_actions(dp, skb, acts->actions,
454 acts->actions_len, false);
456 /* Check whether sub-actions looped too much. */
457 if (unlikely(loop->looping))
458 error = loop_suppress(dp, acts);
461 /* Decrement loop counter. */
463 loop->looping = false;