/*
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
*/
-/* Functions for executing flow actions. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/openvswitch.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
-#include <net/inet_ecn.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <net/checksum.h>
+#include <net/dsfield.h>
-#include "actions.h"
#include "checksum.h"
#include "datapath.h"
-#include "openvswitch/datapath-protocol.h"
+#include "vlan.h"
#include "vport.h"
-static int do_execute_actions(struct datapath *, struct sk_buff *,
- const struct odp_flow_key *,
- const struct nlattr *actions, u32 actions_len);
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr, int len, bool keep_skb);
-static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
+static int make_writable(struct sk_buff *skb, int write_len)
{
- if (skb_cloned(skb)) {
- struct sk_buff *nskb;
- unsigned headroom = max(min_headroom, skb_headroom(skb));
+ if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
+ return 0;
- nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
- if (nskb) {
- set_skb_csum_bits(skb, nskb);
- kfree_skb(skb);
- return nskb;
- }
- } else {
- unsigned int hdr_len = (skb_transport_offset(skb)
- + sizeof(struct tcphdr));
- if (pskb_may_pull(skb, min(hdr_len, skb->len)))
- return skb;
- }
- kfree_skb(skb);
- return NULL;
+ return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
}
-static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
+/* remove VLAN header from packet and update csum accordingly. */
+static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
{
- struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
- struct ethhdr *eh;
+ struct vlan_hdr *vhdr;
+ int err;
- /* Verify we were given a vlan packet */
- if (vh->h_vlan_proto != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN)
- return skb;
+ err = make_writable(skb, VLAN_ETH_HLEN);
+ if (unlikely(err))
+ return err;
if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
skb->csum = csum_sub(skb->csum, csum_partial(skb->data
- + ETH_HLEN, VLAN_HLEN, 0));
+ + (2 * ETH_ALEN), VLAN_HLEN, 0));
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
+ vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
+ *current_tci = vhdr->h_vlan_TCI;
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ __skb_pull(skb, VLAN_HLEN);
- skb->protocol = eh->h_proto;
+ vlan_set_encap_proto(skb, vhdr);
skb->mac_header += VLAN_HLEN;
+ skb_reset_mac_len(skb);
- return skb;
+ return 0;
}
-static struct sk_buff *modify_vlan_tci(struct datapath *dp, struct sk_buff *skb,
- const struct odp_flow_key *key,
- const struct nlattr *a, u32 actions_len)
+static int pop_vlan(struct sk_buff *skb)
{
- __be16 tci = nla_get_be16(a);
-
- skb = make_writable(skb, VLAN_HLEN);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- /* Modify vlan id, but maintain other TCI values */
- struct vlan_ethhdr *vh;
- __be16 old_tci;
+ __be16 tci;
+ int err;
- if (skb->len < VLAN_ETH_HLEN)
- return skb;
+ if (likely(vlan_tx_tag_present(skb))) {
+ vlan_set_tci(skb, 0);
+ } else {
+ if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
- vh = vlan_eth_hdr(skb);
- old_tci = vh->h_vlan_TCI;
+ err = __pop_vlan_tci(skb, &tci);
+ if (err)
+ return err;
+ }
+ /* move next vlan tag to hw accel tag */
+ if (likely(skb->protocol != htons(ETH_P_8021Q) ||
+ skb->len < VLAN_ETH_HLEN))
+ return 0;
- vh->h_vlan_TCI = tci;
+ err = __pop_vlan_tci(skb, &tci);
+ if (unlikely(err))
+ return err;
- if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
- __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
+ __vlan_hwaccel_put_tag(skb, ntohs(tci));
+ return 0;
+}
- skb->csum = ~csum_partial((char *)diff, sizeof(diff),
- ~skb->csum);
- }
- } else {
- int err;
+static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+{
+ if (unlikely(vlan_tx_tag_present(skb))) {
+ u16 current_tag;
- /* Add vlan header */
+ /* push down current VLAN tag */
+ current_tag = vlan_tx_tag_get(skb);
- /* Set up checksumming pointers for checksum-deferred packets
- * on Xen. Otherwise, dev_queue_xmit() will try to do this
- * when we send the packet out on the wire, and it will fail at
- * that point because skb_checksum_setup() will not look inside
- * an 802.1Q header. */
- err = vswitch_skb_checksum_setup(skb);
- if (unlikely(err)) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- /* GSO is not implemented for packets with an 802.1Q header, so
- * we have to do segmentation before we add that header.
- *
- * GSO does work with hardware-accelerated VLAN tagging, but we
- * can't use hardware-accelerated VLAN tagging since it
- * requires the device to have a VLAN group configured (with
- * e.g. vconfig(8)) and we don't do that.
- *
- * Having to do this here may be a performance loss, since we
- * can't take advantage of TSO hardware support, although it
- * does not make a measurable network performance difference
- * for 1G Ethernet. Fixing that would require patching the
- * kernel (either to add GSO support to the VLAN protocol or to
- * support hardware-accelerated VLAN tagging without VLAN
- * groups configured). */
- if (skb_is_gso(skb)) {
- const struct nlattr *actions_left;
- u32 actions_len_left;
- struct sk_buff *segs;
-
- segs = skb_gso_segment(skb, 0);
- kfree_skb(skb);
- if (IS_ERR(segs))
- return ERR_CAST(segs);
-
- actions_len_left = actions_len;
- actions_left = nla_next(a, &actions_len_left);
-
- do {
- struct sk_buff *nskb = segs->next;
-
- segs->next = NULL;
-
- /* GSO can change the checksum type so update.*/
- compute_ip_summed(segs, true);
-
- segs = __vlan_put_tag(segs, ntohs(tci));
- err = -ENOMEM;
- if (segs) {
- err = do_execute_actions(
- dp, segs, key, actions_left,
- actions_len_left);
- }
-
- if (unlikely(err)) {
- while ((segs = nskb)) {
- nskb = segs->next;
- segs->next = NULL;
- kfree_skb(segs);
- }
- return ERR_PTR(err);
- }
-
- segs = nskb;
- } while (segs->next);
-
- skb = segs;
- compute_ip_summed(skb, true);
- }
+ if (!__vlan_put_tag(skb, current_tag))
+ return -ENOMEM;
- /* The hardware-accelerated version of vlan_put_tag() works
- * only for a device that has a VLAN group configured (with
- * e.g. vconfig(8)), so call the software-only version
- * __vlan_put_tag() directly instead.
- */
- skb = __vlan_put_tag(skb, ntohs(tci));
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- /* GSO doesn't fix up the hardware computed checksum so this
- * will only be hit in the non-GSO case. */
if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
skb->csum = csum_add(skb->csum, csum_partial(skb->data
- + ETH_HLEN, VLAN_HLEN, 0));
- }
+ + (2 * ETH_ALEN), VLAN_HLEN, 0));
- return skb;
+ }
+ __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+ return 0;
}
-static struct sk_buff *strip_vlan(struct sk_buff *skb)
+static int set_eth_addr(struct sk_buff *skb,
+ const struct ovs_key_ethernet *eth_key)
{
- skb = make_writable(skb, 0);
- if (skb)
- vlan_pull_tag(skb);
- return skb;
+ int err;
+ err = make_writable(skb, ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
+
+ return 0;
}
-static bool is_ip(struct sk_buff *skb, const struct odp_flow_key *key)
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
{
- return (key->dl_type == htons(ETH_P_IP) &&
- skb->transport_header > skb->network_header);
+ int transport_len = skb->len - skb_transport_offset(skb);
+
+ if (nh->protocol == IPPROTO_TCP) {
+ if (likely(transport_len >= sizeof(struct tcphdr)))
+ inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+ *addr, new_addr, 1);
+ } else if (nh->protocol == IPPROTO_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check ||
+ get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ *addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
+ }
+
+ csum_replace4(&nh->check, *addr, new_addr);
+ skb_clear_rxhash(skb);
+ *addr = new_addr;
}
-static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct odp_flow_key *key)
+static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
+ __be32 addr[4], const __be32 new_addr[4])
{
int transport_len = skb->len - skb_transport_offset(skb);
- if (key->nw_proto == IPPROTO_TCP) {
+
+ if (l4_proto == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
- return &tcp_hdr(skb)->check;
- } else if (key->nw_proto == IPPROTO_UDP) {
- if (likely(transport_len >= sizeof(struct udphdr)))
- return &udp_hdr(skb)->check;
+ inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
+ addr, new_addr, 1);
+ } else if (l4_proto == IPPROTO_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check ||
+ get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
+ inet_proto_csum_replace16(&uh->check, skb,
+ addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
}
- return NULL;
}
-static struct sk_buff *set_nw_addr(struct sk_buff *skb,
- const struct odp_flow_key *key,
- const struct nlattr *a)
+static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
+ __be32 addr[4], const __be32 new_addr[4],
+ bool recalculate_csum)
{
- __be32 new_nwaddr = nla_get_be32(a);
- struct iphdr *nh;
- __sum16 *check;
- __be32 *nwaddr;
+ if (recalculate_csum)
+ update_ipv6_checksum(skb, l4_proto, addr, new_addr);
- if (unlikely(!is_ip(skb, key)))
- return skb;
+ skb_clear_rxhash(skb);
+ memcpy(addr, new_addr, sizeof(__be32[4]));
+}
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
+static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
+{
+ nh->priority = tc >> 4;
+ nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
+}
+
+static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
+{
+ nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
+ nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
+ nh->flow_lbl[2] = fl & 0x000000FF;
+}
+
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
+{
+ csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+ nh->ttl = new_ttl;
+}
+
+static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+{
+ struct iphdr *nh;
+ int err;
+
+ err = make_writable(skb, skb_network_offset(skb) +
+ sizeof(struct iphdr));
+ if (unlikely(err))
+ return err;
nh = ip_hdr(skb);
- nwaddr = nla_type(a) == ODPAT_SET_NW_SRC ? &nh->saddr : &nh->daddr;
- check = get_l4_checksum(skb, key);
- if (likely(check))
- inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
- csum_replace4(&nh->check, *nwaddr, new_nwaddr);
+ if (ipv4_key->ipv4_src != nh->saddr)
+ set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
+
+ if (ipv4_key->ipv4_dst != nh->daddr)
+ set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
- *nwaddr = new_nwaddr;
+ if (ipv4_key->ipv4_tos != nh->tos)
+ ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
- return skb;
+ if (ipv4_key->ipv4_ttl != nh->ttl)
+ set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
+
+ return 0;
}
-static struct sk_buff *set_nw_tos(struct sk_buff *skb,
- const struct odp_flow_key *key,
- u8 nw_tos)
+static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
{
- if (unlikely(!is_ip(skb, key)))
- return skb;
-
- skb = make_writable(skb, 0);
- if (skb) {
- struct iphdr *nh = ip_hdr(skb);
- u8 *f = &nh->tos;
- u8 old = *f;
- u8 new;
-
- /* Set the DSCP bits and preserve the ECN bits. */
- new = nw_tos | (nh->tos & INET_ECN_MASK);
- csum_replace4(&nh->check, (__force __be32)old,
- (__force __be32)new);
- *f = new;
+ struct ipv6hdr *nh;
+ int err;
+ __be32 *saddr;
+ __be32 *daddr;
+
+ err = make_writable(skb, skb_network_offset(skb) +
+ sizeof(struct ipv6hdr));
+ if (unlikely(err))
+ return err;
+
+ nh = ipv6_hdr(skb);
+ saddr = (__be32 *)&nh->saddr;
+ daddr = (__be32 *)&nh->daddr;
+
+ if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
+ set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
+ ipv6_key->ipv6_src, true);
+
+ if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
+ unsigned int offset = 0;
+ int flags = OVS_IP6T_FH_F_SKIP_RH;
+ bool recalc_csum = true;
+
+ if (ipv6_ext_hdr(nh->nexthdr))
+ recalc_csum = ipv6_find_hdr(skb, &offset,
+ NEXTHDR_ROUTING, NULL,
+ &flags) != NEXTHDR_ROUTING;
+
+ set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
+ ipv6_key->ipv6_dst, recalc_csum);
}
- return skb;
+
+ set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
+ set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
+ nh->hop_limit = ipv6_key->ipv6_hlimit;
+
+ return 0;
}
-static struct sk_buff *set_tp_port(struct sk_buff *skb,
- const struct odp_flow_key *key,
- const struct nlattr *a)
+/* Must follow make_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+ __be16 new_port, __sum16 *check)
{
- struct udphdr *th;
- __sum16 *check;
- __be16 *port;
+ inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+ *port = new_port;
+ skb_clear_rxhash(skb);
+}
+
+static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
+{
+ struct udphdr *uh = udp_hdr(skb);
- if (unlikely(!is_ip(skb, key)))
- return skb;
+ if (uh->check && get_ip_summed(skb) != OVS_CSUM_PARTIAL) {
+ set_tp_port(skb, port, new_port, &uh->check);
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
-
- /* Must follow make_writable() since that can move the skb data. */
- check = get_l4_checksum(skb, key);
- if (unlikely(!check))
- return skb;
-
- /*
- * Update port and checksum.
- *
- * This is OK because source and destination port numbers are at the
- * same offsets in both UDP and TCP headers, and get_l4_checksum() only
- * supports those protocols.
- */
- th = udp_hdr(skb);
- port = nla_type(a) == ODPAT_SET_TP_SRC ? &th->source : &th->dest;
- inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
- *port = nla_get_be16(a);
-
- return skb;
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ *port = new_port;
+ skb_clear_rxhash(skb);
+ }
}
-/**
- * is_spoofed_arp - check for invalid ARP packet
- *
- * @skb: skbuff containing an Ethernet packet, with network header pointing
- * just past the Ethernet and optional 802.1Q header.
- * @key: flow key extracted from @skb by flow_extract()
- *
- * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
- * or truncated header fields or one whose inner and outer Ethernet address
- * differ.
- */
-static bool is_spoofed_arp(struct sk_buff *skb, const struct odp_flow_key *key)
+static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
{
- struct arp_eth_header *arp;
+ struct udphdr *uh;
+ int err;
- if (key->dl_type != htons(ETH_P_ARP))
- return false;
+ err = make_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+ if (unlikely(err))
+ return err;
- if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
- return true;
+ uh = udp_hdr(skb);
+ if (udp_port_key->udp_src != uh->source)
+ set_udp_port(skb, &uh->source, udp_port_key->udp_src);
- arp = (struct arp_eth_header *)skb_network_header(skb);
- return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
- arp->ar_pro != htons(ETH_P_IP) ||
- arp->ar_hln != ETH_ALEN ||
- arp->ar_pln != 4 ||
- compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
+ if (udp_port_key->udp_dst != uh->dest)
+ set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
+
+ return 0;
}
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
{
- struct vport *p;
+ struct tcphdr *th;
+ int err;
- if (!skb)
- goto error;
+ err = make_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct tcphdr));
+ if (unlikely(err))
+ return err;
- p = rcu_dereference(dp->ports[out_port]);
- if (!p)
- goto error;
+ th = tcp_hdr(skb);
+ if (tcp_port_key->tcp_src != th->source)
+ set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
- vport_send(p, skb);
- return;
+ if (tcp_port_key->tcp_dst != th->dest)
+ set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
-error:
- kfree_skb(skb);
+ return 0;
}
-static int output_control(struct datapath *dp, struct sk_buff *skb, u32 arg)
+static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
{
- skb = skb_clone(skb, GFP_ATOMIC);
- if (!skb)
+ struct vport *vport;
+
+ if (unlikely(!skb))
return -ENOMEM;
- return dp_output_control(dp, skb, _ODPL_ACTION_NR, arg);
+
+ vport = ovs_vport_rcu(dp, out_port);
+ if (unlikely(!vport)) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ ovs_vport_send(vport, skb);
+ return 0;
+}
+
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ struct dp_upcall_info upcall;
+ const struct nlattr *a;
+ int rem;
+
+ upcall.cmd = OVS_PACKET_CMD_ACTION;
+ upcall.key = &OVS_CB(skb)->flow->key;
+ upcall.userdata = NULL;
+ upcall.portid = 0;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ switch (nla_type(a)) {
+ case OVS_USERSPACE_ATTR_USERDATA:
+ upcall.userdata = a;
+ break;
+
+ case OVS_USERSPACE_ATTR_PID:
+ upcall.portid = nla_get_u32(a);
+ break;
+ }
+ }
+
+ return ovs_dp_upcall(dp, skb, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+ const struct nlattr *attr)
+{
+ const struct nlattr *acts_list = NULL;
+ const struct nlattr *a;
+ int rem;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ switch (nla_type(a)) {
+ case OVS_SAMPLE_ATTR_PROBABILITY:
+ if (net_random() >= nla_get_u32(a))
+ return 0;
+ break;
+
+ case OVS_SAMPLE_ATTR_ACTIONS:
+ acts_list = a;
+ break;
+ }
+ }
+
+ return do_execute_actions(dp, skb, nla_data(acts_list),
+ nla_len(acts_list), true);
+}
+
+static int execute_set_action(struct sk_buff *skb,
+ const struct nlattr *nested_attr)
+{
+ int err = 0;
+
+ switch (nla_type(nested_attr)) {
+ case OVS_KEY_ATTR_PRIORITY:
+ skb->priority = nla_get_u32(nested_attr);
+ break;
+
+ case OVS_KEY_ATTR_SKB_MARK:
+ skb_set_mark(skb, nla_get_u32(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_IPV4_TUNNEL:
+ OVS_CB(skb)->tun_key = nla_data(nested_attr);
+ break;
+
+ case OVS_KEY_ATTR_ETHERNET:
+ err = set_eth_addr(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_IPV4:
+ err = set_ipv4(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_IPV6:
+ err = set_ipv6(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_TCP:
+ err = set_tcp(skb, nla_data(nested_attr));
+ break;
+
+ case OVS_KEY_ATTR_UDP:
+ err = set_udp(skb, nla_data(nested_attr));
+ break;
+ }
+
+ return err;
}
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
- const struct odp_flow_key *key,
- const struct nlattr *actions, u32 actions_len)
+ const struct nlattr *attr, int len, bool keep_skb)
{
/* Every output action needs a separate clone of 'skb', but the common
* case is just a single output action, so that doing a clone and
* then freeing the original skbuff is wasteful. So the following code
* is slightly obscure just to avoid that. */
int prev_port = -1;
- u32 priority = skb->priority;
const struct nlattr *a;
- int rem, err;
+ int rem;
+
+ for (a = attr, rem = len; rem > 0;
+ a = nla_next(a, &rem)) {
+ int err = 0;
- for (a = actions, rem = actions_len; rem > 0; a = nla_next(a, &rem)) {
if (prev_port != -1) {
do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
prev_port = -1;
}
switch (nla_type(a)) {
- case ODPAT_OUTPUT:
+ case OVS_ACTION_ATTR_OUTPUT:
prev_port = nla_get_u32(a);
break;
- case ODPAT_CONTROLLER:
- err = output_control(dp, skb, nla_get_u64(a));
- if (err) {
- kfree_skb(skb);
- return err;
- }
+ case OVS_ACTION_ATTR_USERSPACE:
+ output_userspace(dp, skb, a);
break;
- case ODPAT_SET_TUNNEL:
- OVS_CB(skb)->tun_id = nla_get_be64(a);
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ err = push_vlan(skb, nla_data(a));
+ if (unlikely(err)) /* skb already freed. */
+ return err;
break;
- case ODPAT_SET_DL_TCI:
- skb = modify_vlan_tci(dp, skb, key, a, rem);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ case OVS_ACTION_ATTR_POP_VLAN:
+ err = pop_vlan(skb);
break;
- case ODPAT_STRIP_VLAN:
- skb = strip_vlan(skb);
+ case OVS_ACTION_ATTR_SET:
+ err = execute_set_action(skb, nla_data(a));
break;
- case ODPAT_SET_DL_SRC:
- skb = make_writable(skb, 0);
- if (!skb)
- return -ENOMEM;
- memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = sample(dp, skb, a);
break;
+ }
- case ODPAT_SET_DL_DST:
- skb = make_writable(skb, 0);
- if (!skb)
- return -ENOMEM;
- memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
- break;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ return err;
+ }
+ }
- case ODPAT_SET_NW_SRC:
- case ODPAT_SET_NW_DST:
- skb = set_nw_addr(skb, key, a);
- break;
+ if (prev_port != -1) {
+ if (keep_skb)
+ skb = skb_clone(skb, GFP_ATOMIC);
- case ODPAT_SET_NW_TOS:
- skb = set_nw_tos(skb, key, nla_get_u8(a));
- break;
+ do_output(dp, skb, prev_port);
+ } else if (!keep_skb)
+ consume_skb(skb);
- case ODPAT_SET_TP_SRC:
- case ODPAT_SET_TP_DST:
- skb = set_tp_port(skb, key, a);
- break;
+ return 0;
+}
- case ODPAT_SET_PRIORITY:
- skb->priority = nla_get_u32(a);
- break;
+/* We limit the number of times that we pass into execute_actions()
+ * to avoid blowing out the stack in the event that we have a loop. */
+#define MAX_LOOPS 4
- case ODPAT_POP_PRIORITY:
- skb->priority = priority;
- break;
+struct loop_counter {
+ u8 count; /* Count. */
+ bool looping; /* Loop detected? */
+};
- case ODPAT_DROP_SPOOFED_ARP:
- if (unlikely(is_spoofed_arp(skb, key)))
- goto exit;
- break;
- }
- if (!skb)
- return -ENOMEM;
- }
-exit:
- if (prev_port != -1)
- do_output(dp, skb, prev_port);
- else
- kfree_skb(skb);
- return 0;
-}
+static DEFINE_PER_CPU(struct loop_counter, loop_counters);
-/* Send a copy of this packet up to the sFlow agent, along with extra
- * information about what happened to it. */
-static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *a, u32 actions_len,
- struct vport *vport)
+static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
{
- struct odp_sflow_sample_header *hdr;
- unsigned int hdrlen = sizeof(struct odp_sflow_sample_header);
- struct sk_buff *nskb;
-
- nskb = skb_copy_expand(skb, actions_len + hdrlen, 0, GFP_ATOMIC);
- if (!nskb)
- return;
-
- memcpy(__skb_push(nskb, actions_len), a, actions_len);
- hdr = (struct odp_sflow_sample_header*)__skb_push(nskb, hdrlen);
- hdr->actions_len = actions_len;
- hdr->sample_pool = atomic_read(&vport->sflow_pool);
- dp_output_control(dp, nskb, _ODPL_SFLOW_NR, 0);
+ if (net_ratelimit())
+ pr_warn("%s: flow looped %d times, dropping\n",
+ ovs_dp_name(dp), MAX_LOOPS);
+ actions->actions_len = 0;
+ return -ELOOP;
}
/* Execute a list of actions against 'skb'. */
-int execute_actions(struct datapath *dp, struct sk_buff *skb,
- const struct odp_flow_key *key,
- const struct nlattr *actions, u32 actions_len)
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
- if (dp->sflow_probability) {
- struct vport *p = OVS_CB(skb)->vport;
- if (p) {
- atomic_inc(&p->sflow_pool);
- if (dp->sflow_probability == UINT_MAX ||
- net_random() < dp->sflow_probability)
- sflow_sample(dp, skb, actions, actions_len, p);
- }
+ struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ struct loop_counter *loop;
+ int error;
+
+ /* Check whether we've looped too much. */
+ loop = &__get_cpu_var(loop_counters);
+ if (unlikely(++loop->count > MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ error = loop_suppress(dp, acts);
+ kfree_skb(skb);
+ goto out_loop;
}
- OVS_CB(skb)->tun_id = 0;
+ OVS_CB(skb)->tun_key = NULL;
+ error = do_execute_actions(dp, skb, acts->actions,
+ acts->actions_len, false);
+
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ error = loop_suppress(dp, acts);
+
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
- return do_execute_actions(dp, skb, key, actions, actions_len);
+ return error;
}