X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Factions.c;h=7fe2f5454b44203108e7b97c6d1a408f8b2bd972;hb=e2f3178f0582eda302bdc5629189b6a56d9fbcdd;hp=ff67372a0ce13cc43d8068947b04da57bcdf3e93;hpb=c1c9c9c4b636ab2acf2f75024c282a9a497ca9a9;p=sliver-openvswitch.git diff --git a/datapath/actions.c b/datapath/actions.c index ff67372a0..7fe2f5454 100644 --- a/datapath/actions.c +++ b/datapath/actions.c @@ -1,537 +1,694 @@ /* - * Distributed under the terms of the GNU GPL version 2. - * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks. + * Copyright (c) 2007-2014 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ -/* Functions for executing flow actions. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include +#include +#include #include #include #include +#include #include -#include #include +#include #include +#include +#include -#include "actions.h" #include "datapath.h" -#include "openvswitch/datapath-protocol.h" +#include "vlan.h" #include "vport.h" -static struct sk_buff * -make_writable(struct sk_buff *skb, unsigned min_headroom, gfp_t gfp) -{ - if (skb_shared(skb) || skb_cloned(skb)) { - struct sk_buff *nskb; - unsigned headroom = max(min_headroom, skb_headroom(skb)); +static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr, int len, bool keep_skb); - nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), gfp); - if (nskb) { - set_skb_csum_bits(skb, nskb); - kfree_skb(skb); - return nskb; - } - } else { - unsigned int hdr_len = (skb_transport_offset(skb) - + sizeof(struct tcphdr)); - if (pskb_may_pull(skb, min(hdr_len, skb->len))) - return skb; - } - kfree_skb(skb); - return NULL; -} - -static void set_tunnel(struct sk_buff *skb, struct odp_flow_key *key, - __be32 tun_id) +static int make_writable(struct sk_buff *skb, int write_len) { - OVS_CB(skb)->tun_id = key->tun_id = tun_id; + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } -static struct sk_buff * -vlan_pull_tag(struct sk_buff *skb) +/* remove VLAN header from packet and update csum accordingly. */ +static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) { - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); - struct ethhdr *eh; + struct vlan_hdr *vhdr; + int err; - /* Verify we were given a vlan packet */ - if (vh->h_vlan_proto != htons(ETH_P_8021Q)) - return skb; + err = make_writable(skb, VLAN_ETH_HLEN); + if (unlikely(err)) + return err; - if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); - memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN); + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + *current_tci = vhdr->h_vlan_TCI; - eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); - skb->protocol = eh->h_proto; + vlan_set_encap_proto(skb, vhdr); skb->mac_header += VLAN_HLEN; + skb_reset_mac_len(skb); - return skb; + return 0; } - -static struct sk_buff * -modify_vlan_tci(struct datapath *dp, struct sk_buff *skb, - struct odp_flow_key *key, const union odp_action *a, - int n_actions, gfp_t gfp) +static int pop_vlan(struct sk_buff *skb) { - u16 tci, mask; + __be16 tci; + int err; - if (a->type == ODPAT_SET_VLAN_VID) { - tci = ntohs(a->vlan_vid.vlan_vid); - mask = VLAN_VID_MASK; - key->dl_vlan = a->vlan_vid.vlan_vid; + if (likely(vlan_tx_tag_present(skb))) { + vlan_set_tci(skb, 0); } else { - tci = a->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT; - mask = VLAN_PCP_MASK; - key->dl_vlan_pcp = a->vlan_pcp.vlan_pcp; + if (unlikely(skb->protocol != htons(ETH_P_8021Q) || + skb->len < VLAN_ETH_HLEN)) + return 0; + + err = __pop_vlan_tci(skb, &tci); + if (err) + return err; } + /* move next vlan tag to hw accel tag */ + if (likely(skb->protocol != htons(ETH_P_8021Q) || + skb->len < VLAN_ETH_HLEN)) + return 0; - skb = make_writable(skb, VLAN_HLEN, gfp); - if (!skb) - return ERR_PTR(-ENOMEM); + err = __pop_vlan_tci(skb, &tci); + if (unlikely(err)) + return err; - if (skb->protocol == htons(ETH_P_8021Q)) { - /* Modify vlan id, but maintain other TCI values */ - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); - __be16 old_tci = vh->h_vlan_TCI; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); + return 0; +} - vh->h_vlan_TCI = htons((ntohs(vh->h_vlan_TCI) & ~mask) | tci); +static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) +{ + if (unlikely(vlan_tx_tag_present(skb))) { + u16 current_tag; - if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) { - __be16 diff[] = { ~old_tci, vh->h_vlan_TCI }; + /* push down current VLAN tag */ + current_tag = vlan_tx_tag_get(skb); - skb->csum = ~csum_partial((char *)diff, sizeof(diff), - ~skb->csum); - } - } else { - /* Add vlan header */ - - /* Set up checksumming pointers for checksum-deferred packets - * on Xen. Otherwise, dev_queue_xmit() will try to do this - * when we send the packet out on the wire, and it will fail at - * that point because skb_checksum_setup() will not look inside - * an 802.1Q header. */ - vswitch_skb_checksum_setup(skb); - - /* GSO is not implemented for packets with an 802.1Q header, so - * we have to do segmentation before we add that header. - * - * GSO does work with hardware-accelerated VLAN tagging, but we - * can't use hardware-accelerated VLAN tagging since it - * requires the device to have a VLAN group configured (with - * e.g. vconfig(8)) and we don't do that. - * - * Having to do this here may be a performance loss, since we - * can't take advantage of TSO hardware support, although it - * does not make a measurable network performance difference - * for 1G Ethernet. Fixing that would require patching the - * kernel (either to add GSO support to the VLAN protocol or to - * support hardware-accelerated VLAN tagging without VLAN - * groups configured). */ - if (skb_is_gso(skb)) { - struct sk_buff *segs; - - segs = skb_gso_segment(skb, 0); - kfree_skb(skb); - if (unlikely(IS_ERR(segs))) - return ERR_CAST(segs); - - do { - struct sk_buff *nskb = segs->next; - int err; - - segs->next = NULL; - - /* GSO can change the checksum type so update.*/ - compute_ip_summed(segs, true); - - segs = __vlan_put_tag(segs, tci); - err = -ENOMEM; - if (segs) { - struct odp_flow_key segkey = *key; - err = execute_actions(dp, segs, - &segkey, a + 1, - n_actions - 1, - gfp); - } - - if (unlikely(err)) { - while ((segs = nskb)) { - nskb = segs->next; - segs->next = NULL; - kfree_skb(segs); - } - return ERR_PTR(err); - } - - segs = nskb; - } while (segs->next); - - skb = segs; - compute_ip_summed(skb, true); - } + if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) + return -ENOMEM; - /* The hardware-accelerated version of vlan_put_tag() works - * only for a device that has a VLAN group configured (with - * e.g. vconfig(8)), so call the software-only version - * __vlan_put_tag() directly instead. - */ - skb = __vlan_put_tag(skb, tci); - if (!skb) - return ERR_PTR(-ENOMEM); - - /* GSO doesn't fix up the hardware computed checksum so this - * will only be hit in the non-GSO case. */ - if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); + } + __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); + return 0; +} - return skb; +static int set_eth_addr(struct sk_buff *skb, + const struct ovs_key_ethernet *eth_key) +{ + int err; + err = make_writable(skb, ETH_HLEN); + if (unlikely(err)) + return err; + + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + + ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); + ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); + + ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + + return 0; } -static struct sk_buff *strip_vlan(struct sk_buff *skb, - struct odp_flow_key *key, gfp_t gfp) +static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, + __be32 *addr, __be32 new_addr) { - skb = make_writable(skb, 0, gfp); - if (skb) { - vlan_pull_tag(skb); - key->dl_vlan = htons(ODP_VLAN_NONE); + int transport_len = skb->len - skb_transport_offset(skb); + + if (nh->protocol == IPPROTO_TCP) { + if (likely(transport_len >= sizeof(struct tcphdr))) + inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, + *addr, new_addr, 1); + } else if (nh->protocol == IPPROTO_UDP) { + if (likely(transport_len >= sizeof(struct udphdr))) { + struct udphdr *uh = udp_hdr(skb); + + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace4(&uh->check, skb, + *addr, new_addr, 1); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + } } - return skb; + + csum_replace4(&nh->check, *addr, new_addr); + skb_clear_hash(skb); + *addr = new_addr; } -static struct sk_buff *set_dl_addr(struct sk_buff *skb, - struct odp_flow_key *key, - const struct odp_action_dl_addr *a, - gfp_t gfp) +static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, + __be32 addr[4], const __be32 new_addr[4]) { - skb = make_writable(skb, 0, gfp); - if (skb) { - struct ethhdr *eh = eth_hdr(skb); - if (a->type == ODPAT_SET_DL_SRC) { - memcpy(eh->h_source, a->dl_addr, ETH_ALEN); - memcpy(key->dl_src, a->dl_addr, ETH_ALEN); - } else { - memcpy(eh->h_dest, a->dl_addr, ETH_ALEN); - memcpy(key->dl_dst, a->dl_addr, ETH_ALEN); + int transport_len = skb->len - skb_transport_offset(skb); + + if (l4_proto == IPPROTO_TCP) { + if (likely(transport_len >= sizeof(struct tcphdr))) + inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, + addr, new_addr, 1); + } else if (l4_proto == IPPROTO_UDP) { + if (likely(transport_len >= sizeof(struct udphdr))) { + struct udphdr *uh = udp_hdr(skb); + + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace16(&uh->check, skb, + addr, new_addr, 1); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } } } - return skb; } -/* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field - * covered by the sum has been changed from 'from' to 'to'. If set, - * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header. - * Based on nf_proto_csum_replace4. */ -static void update_csum(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr) +static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, + __be32 addr[4], const __be32 new_addr[4], + bool recalculate_csum) { - __be32 diff[] = { ~from, to }; - - if (OVS_CB(skb)->ip_summed != OVS_CSUM_PARTIAL) { - *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), - ~csum_unfold(*sum))); - if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE && pseudohdr) - skb->csum = ~csum_partial((char *)diff, sizeof(diff), - ~skb->csum); - } else if (pseudohdr) - *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff), - csum_unfold(*sum))); + if (recalculate_csum) + update_ipv6_checksum(skb, l4_proto, addr, new_addr); + + skb_clear_hash(skb); + memcpy(addr, new_addr, sizeof(__be32[4])); } -static struct sk_buff *set_nw_addr(struct sk_buff *skb, - struct odp_flow_key *key, - const struct odp_action_nw_addr *a, - gfp_t gfp) +static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) { - if (key->dl_type != htons(ETH_P_IP)) - return skb; - - skb = make_writable(skb, 0, gfp); - if (skb) { - struct iphdr *nh = ip_hdr(skb); - u32 *f = a->type == ODPAT_SET_NW_SRC ? &nh->saddr : &nh->daddr; - u32 old = *f; - u32 new = a->nw_addr; - - if (key->nw_proto == IPPROTO_TCP) { - struct tcphdr *th = tcp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } else if (key->nw_proto == IPPROTO_UDP) { - struct udphdr *th = udp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } - update_csum(&nh->check, skb, old, new, 0); - *f = new; + nh->priority = tc >> 4; + nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); +} - if (a->type == ODPAT_SET_NW_SRC) - key->nw_src = a->nw_addr; - else - key->nw_dst = a->nw_addr; - } - return skb; +static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) +{ + nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; + nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; + nh->flow_lbl[2] = fl & 0x000000FF; +} + +static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) +{ + csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); + nh->ttl = new_ttl; +} + +static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) +{ + struct iphdr *nh; + int err; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); + if (unlikely(err)) + return err; + + nh = ip_hdr(skb); + + if (ipv4_key->ipv4_src != nh->saddr) + set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); + + if (ipv4_key->ipv4_dst != nh->daddr) + set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); + + if (ipv4_key->ipv4_tos != nh->tos) + ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); + + if (ipv4_key->ipv4_ttl != nh->ttl) + set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); + + return 0; } -static struct sk_buff *set_nw_tos(struct sk_buff *skb, - struct odp_flow_key *key, - const struct odp_action_nw_tos *a, - gfp_t gfp) +static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) { - if (key->dl_type != htons(ETH_P_IP)) - return skb; - - skb = make_writable(skb, 0, gfp); - if (skb) { - struct iphdr *nh = ip_hdr(skb); - u8 *f = &nh->tos; - u8 old = *f; - u8 new; - - /* Set the DSCP bits and preserve the ECN bits. */ - new = a->nw_tos | (nh->tos & INET_ECN_MASK); - update_csum(&nh->check, skb, htons((uint16_t)old), - htons((uint16_t)new), 0); - *f = new; - key->nw_tos = a->nw_tos; + struct ipv6hdr *nh; + int err; + __be32 *saddr; + __be32 *daddr; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr)); + if (unlikely(err)) + return err; + + nh = ipv6_hdr(skb); + saddr = (__be32 *)&nh->saddr; + daddr = (__be32 *)&nh->daddr; + + if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) + set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, + ipv6_key->ipv6_src, true); + + if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { + unsigned int offset = 0; + int flags = OVS_IP6T_FH_F_SKIP_RH; + bool recalc_csum = true; + + if (ipv6_ext_hdr(nh->nexthdr)) + recalc_csum = ipv6_find_hdr(skb, &offset, + NEXTHDR_ROUTING, NULL, + &flags) != NEXTHDR_ROUTING; + + set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, + ipv6_key->ipv6_dst, recalc_csum); } - return skb; + + set_ipv6_tc(nh, ipv6_key->ipv6_tclass); + set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); + nh->hop_limit = ipv6_key->ipv6_hlimit; + + return 0; } -static struct sk_buff * -set_tp_port(struct sk_buff *skb, struct odp_flow_key *key, - const struct odp_action_tp_port *a, - gfp_t gfp) +/* Must follow make_writable() since that can move the skb data. */ +static void set_tp_port(struct sk_buff *skb, __be16 *port, + __be16 new_port, __sum16 *check) { - int check_ofs; - - if (key->dl_type != htons(ETH_P_IP)) - return skb; - - if (key->nw_proto == IPPROTO_TCP) - check_ofs = offsetof(struct tcphdr, check); - else if (key->nw_proto == IPPROTO_UDP) - check_ofs = offsetof(struct udphdr, check); - else - return skb; - - skb = make_writable(skb, 0, gfp); - if (skb) { - struct udphdr *th = udp_hdr(skb); - u16 *f = a->type == ODPAT_SET_TP_SRC ? &th->source : &th->dest; - u16 old = *f; - u16 new = a->tp_port; - update_csum((u16*)(skb_transport_header(skb) + check_ofs), - skb, old, new, 0); - *f = new; - if (a->type == ODPAT_SET_TP_SRC) - key->tp_src = a->tp_port; - else - key->tp_dst = a->tp_port; + inet_proto_csum_replace2(check, skb, *port, new_port, 0); + *port = new_port; + skb_clear_hash(skb); +} + +static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) +{ + struct udphdr *uh = udp_hdr(skb); + + if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { + set_tp_port(skb, port, new_port, &uh->check); + + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } else { + *port = new_port; + skb_clear_hash(skb); } - return skb; } -static inline unsigned packet_length(const struct sk_buff *skb) +static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) { - unsigned length = skb->len - ETH_HLEN; - if (skb->protocol == htons(ETH_P_8021Q)) - length -= VLAN_HLEN; - return length; + struct udphdr *uh; + int err; + + err = make_writable(skb, skb_transport_offset(skb) + + sizeof(struct udphdr)); + if (unlikely(err)) + return err; + + uh = udp_hdr(skb); + if (udp_port_key->udp_src != uh->source) + set_udp_port(skb, &uh->source, udp_port_key->udp_src); + + if (udp_port_key->udp_dst != uh->dest) + set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); + + return 0; } -static void -do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) { - struct dp_port *p; - int mtu; + struct tcphdr *th; + int err; + + err = make_writable(skb, skb_transport_offset(skb) + + sizeof(struct tcphdr)); + if (unlikely(err)) + return err; + + th = tcp_hdr(skb); + if (tcp_port_key->tcp_src != th->source) + set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); + + if (tcp_port_key->tcp_dst != th->dest) + set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); + + return 0; +} + +static int set_sctp(struct sk_buff *skb, + const struct ovs_key_sctp *sctp_port_key) +{ + struct sctphdr *sh; + int err; + unsigned int sctphoff = skb_transport_offset(skb); + + err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); + if (unlikely(err)) + return err; + + sh = sctp_hdr(skb); + if (sctp_port_key->sctp_src != sh->source || + sctp_port_key->sctp_dst != sh->dest) { + __le32 old_correct_csum, new_csum, old_csum; + + old_csum = sh->checksum; + old_correct_csum = sctp_compute_cksum(skb, sctphoff); - if (!skb) - goto error; + sh->source = sctp_port_key->sctp_src; + sh->dest = sctp_port_key->sctp_dst; - p = rcu_dereference(dp->ports[out_port]); - if (!p) - goto error; + new_csum = sctp_compute_cksum(skb, sctphoff); - mtu = vport_get_mtu(p->vport); - if (packet_length(skb) > mtu && !skb_is_gso(skb)) { - printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n", - dp_name(dp), packet_length(skb), mtu); - goto error; + /* Carry any checksum errors through. */ + sh->checksum = old_csum ^ old_correct_csum ^ new_csum; + + skb_clear_hash(skb); } - vport_send(p->vport, skb); - return; + return 0; +} + +static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +{ + struct vport *vport; -error: - kfree_skb(skb); + if (unlikely(!skb)) + return -ENOMEM; + + vport = ovs_vport_rcu(dp, out_port); + if (unlikely(!vport)) { + kfree_skb(skb); + return -ENODEV; + } + + ovs_vport_send(vport, skb); + return 0; } -/* Never consumes 'skb'. Returns a port that 'skb' should be sent to, -1 if - * none. */ -static int output_group(struct datapath *dp, __u16 group, - struct sk_buff *skb, gfp_t gfp) +static int output_userspace(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) { - struct dp_port_group *g = rcu_dereference(dp->groups[group]); - int prev_port = -1; - int i; - - if (!g) - return -1; - for (i = 0; i < g->n_ports; i++) { - struct dp_port *p = rcu_dereference(dp->ports[g->ports[i]]); - if (!p || OVS_CB(skb)->dp_port == p) - continue; - if (prev_port != -1) { - struct sk_buff *clone = skb_clone(skb, gfp); - if (!clone) - return -1; - do_output(dp, clone, prev_port); + struct dp_upcall_info upcall; + const struct nlattr *a; + int rem; + + BUG_ON(!OVS_CB(skb)->pkt_key); + + upcall.cmd = OVS_PACKET_CMD_ACTION; + upcall.key = OVS_CB(skb)->pkt_key; + upcall.userdata = NULL; + upcall.portid = 0; + + for (a = nla_data(attr), rem = nla_len(attr); rem > 0; + a = nla_next(a, &rem)) { + switch (nla_type(a)) { + case OVS_USERSPACE_ATTR_USERDATA: + upcall.userdata = a; + break; + + case OVS_USERSPACE_ATTR_PID: + upcall.portid = nla_get_u32(a); + break; } - prev_port = p->port_no; } - return prev_port; + + return ovs_dp_upcall(dp, skb, &upcall); } -static int -output_control(struct datapath *dp, struct sk_buff *skb, u32 arg, gfp_t gfp) +static int sample(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) { - skb = skb_clone(skb, gfp); - if (!skb) - return -ENOMEM; - return dp_output_control(dp, skb, _ODPL_ACTION_NR, arg); + const struct nlattr *acts_list = NULL; + const struct nlattr *a; + int rem; + + for (a = nla_data(attr), rem = nla_len(attr); rem > 0; + a = nla_next(a, &rem)) { + switch (nla_type(a)) { + case OVS_SAMPLE_ATTR_PROBABILITY: + if (prandom_u32() >= nla_get_u32(a)) + return 0; + break; + + case OVS_SAMPLE_ATTR_ACTIONS: + acts_list = a; + break; + } + } + + return do_execute_actions(dp, skb, nla_data(acts_list), + nla_len(acts_list), true); } -/* Send a copy of this packet up to the sFlow agent, along with extra - * information about what happened to it. */ -static void sflow_sample(struct datapath *dp, struct sk_buff *skb, - const union odp_action *a, int n_actions, - gfp_t gfp, struct dp_port *dp_port) +static void execute_hash(struct sk_buff *skb, const struct nlattr *attr) { - struct odp_sflow_sample_header *hdr; - unsigned int actlen = n_actions * sizeof(union odp_action); - unsigned int hdrlen = sizeof(struct odp_sflow_sample_header); - struct sk_buff *nskb; - - nskb = skb_copy_expand(skb, actlen + hdrlen, 0, gfp); - if (!nskb) - return; - - memcpy(__skb_push(nskb, actlen), a, actlen); - hdr = (struct odp_sflow_sample_header*)__skb_push(nskb, hdrlen); - hdr->n_actions = n_actions; - hdr->sample_pool = atomic_read(&dp_port->sflow_pool); - dp_output_control(dp, nskb, _ODPL_SFLOW_NR, 0); + struct sw_flow_key *key = OVS_CB(skb)->pkt_key; + struct ovs_action_hash *hash_act = nla_data(attr); + u32 hash = 0; + + /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ + hash = skb_get_hash(skb); + hash = jhash_1word(hash, hash_act->hash_basis); + if (!hash) + hash = 0x1; + + key->ovs_flow_hash = hash; +} + +static int execute_set_action(struct sk_buff *skb, + const struct nlattr *nested_attr) +{ + int err = 0; + + switch (nla_type(nested_attr)) { + case OVS_KEY_ATTR_PRIORITY: + skb->priority = nla_get_u32(nested_attr); + break; + + case OVS_KEY_ATTR_SKB_MARK: + skb->mark = nla_get_u32(nested_attr); + break; + + case OVS_KEY_ATTR_IPV4_TUNNEL: + OVS_CB(skb)->tun_key = nla_data(nested_attr); + break; + + case OVS_KEY_ATTR_ETHERNET: + err = set_eth_addr(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_IPV4: + err = set_ipv4(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_IPV6: + err = set_ipv6(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_TCP: + err = set_tcp(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_UDP: + err = set_udp(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_SCTP: + err = set_sctp(skb, nla_data(nested_attr)); + break; + } + + return err; +} + +static int execute_recirc(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *a) +{ + struct sw_flow_key recirc_key; + const struct vport *p = OVS_CB(skb)->input_vport; + uint32_t hash = OVS_CB(skb)->pkt_key->ovs_flow_hash; + int err; + + err = ovs_flow_extract(skb, p->port_no, &recirc_key); + if (err) + return err; + + recirc_key.ovs_flow_hash = hash; + recirc_key.recirc_id = nla_get_u32(a); + + ovs_dp_process_packet_with_key(skb, &recirc_key, true); + + return 0; } /* Execute a list of actions against 'skb'. */ -int execute_actions(struct datapath *dp, struct sk_buff *skb, - struct odp_flow_key *key, - const union odp_action *a, int n_actions, - gfp_t gfp) +static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr, int len, bool keep_skb) { /* Every output action needs a separate clone of 'skb', but the common * case is just a single output action, so that doing a clone and * then freeing the original skbuff is wasteful. So the following code * is slightly obscure just to avoid that. */ int prev_port = -1; - u32 priority = skb->priority; - int err; + const struct nlattr *a; + int rem; - if (dp->sflow_probability) { - struct dp_port *p = OVS_CB(skb)->dp_port; - if (p) { - atomic_inc(&p->sflow_pool); - if (dp->sflow_probability == UINT_MAX || - net_random() < dp->sflow_probability) - sflow_sample(dp, skb, a, n_actions, gfp, p); - } - } + for (a = attr, rem = len; rem > 0; + a = nla_next(a, &rem)) { + int err = 0; - OVS_CB(skb)->tun_id = 0; - - for (; n_actions > 0; a++, n_actions--) { - WARN_ON_ONCE(skb_shared(skb)); if (prev_port != -1) { - do_output(dp, skb_clone(skb, gfp), prev_port); + do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); prev_port = -1; } - switch (a->type) { - case ODPAT_OUTPUT: - prev_port = a->output.port; + switch (nla_type(a)) { + case OVS_ACTION_ATTR_OUTPUT: + prev_port = nla_get_u32(a); break; - case ODPAT_OUTPUT_GROUP: - prev_port = output_group(dp, a->output_group.group, - skb, gfp); + case OVS_ACTION_ATTR_USERSPACE: + output_userspace(dp, skb, a); break; - case ODPAT_CONTROLLER: - err = output_control(dp, skb, a->controller.arg, gfp); - if (err) { - kfree_skb(skb); - return err; - } + case OVS_ACTION_ATTR_HASH: + execute_hash(skb, a); break; - case ODPAT_SET_TUNNEL: - set_tunnel(skb, key, a->tunnel.tun_id); + case OVS_ACTION_ATTR_PUSH_VLAN: + err = push_vlan(skb, nla_data(a)); + if (unlikely(err)) /* skb already freed. */ + return err; break; - case ODPAT_SET_VLAN_VID: - case ODPAT_SET_VLAN_PCP: - skb = modify_vlan_tci(dp, skb, key, a, n_actions, gfp); - if (IS_ERR(skb)) - return PTR_ERR(skb); + case OVS_ACTION_ATTR_POP_VLAN: + err = pop_vlan(skb); break; - case ODPAT_STRIP_VLAN: - skb = strip_vlan(skb, key, gfp); - break; + case OVS_ACTION_ATTR_RECIRC: { + struct sk_buff *recirc_skb; + const bool last_action = (a->nla_len == rem); - case ODPAT_SET_DL_SRC: - case ODPAT_SET_DL_DST: - skb = set_dl_addr(skb, key, &a->dl_addr, gfp); - break; + if (!last_action || keep_skb) + recirc_skb = skb_clone(skb, GFP_ATOMIC); + else + recirc_skb = skb; - case ODPAT_SET_NW_SRC: - case ODPAT_SET_NW_DST: - skb = set_nw_addr(skb, key, &a->nw_addr, gfp); - break; + err = execute_recirc(dp, recirc_skb, a); - case ODPAT_SET_NW_TOS: - skb = set_nw_tos(skb, key, &a->nw_tos, gfp); - break; + if (last_action || err) + return err; - case ODPAT_SET_TP_SRC: - case ODPAT_SET_TP_DST: - skb = set_tp_port(skb, key, &a->tp_port, gfp); break; + } - case ODPAT_SET_PRIORITY: - skb->priority = a->priority.priority; + case OVS_ACTION_ATTR_SET: + err = execute_set_action(skb, nla_data(a)); break; - case ODPAT_POP_PRIORITY: - skb->priority = priority; + case OVS_ACTION_ATTR_SAMPLE: + err = sample(dp, skb, a); + if (unlikely(err)) /* skb already freed. */ + return err; break; } - if (!skb) - return -ENOMEM; + + if (unlikely(err)) { + kfree_skb(skb); + return err; + } } - if (prev_port != -1) + + if (prev_port != -1) { + if (keep_skb) + skb = skb_clone(skb, GFP_ATOMIC); + do_output(dp, skb, prev_port); - else - kfree_skb(skb); + } else if (!keep_skb) + consume_skb(skb); + return 0; } + +/* We limit the number of times that we pass into execute_actions() + * to avoid blowing out the stack in the event that we have a loop. + * + * Each loop adds some (estimated) cost to the kernel stack. + * The loop terminates when the max cost is exceeded. + * */ +#define RECIRC_STACK_COST 1 +#define DEFAULT_STACK_COST 4 +/* Allow up to 4 regular services, and up to 3 recirculations */ +#define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3) + +struct loop_counter { + u8 stack_cost; /* loop stack cost. */ + bool looping; /* Loop detected? */ +}; + +static DEFINE_PER_CPU(struct loop_counter, loop_counters); + +static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions) +{ + if (net_ratelimit()) + pr_warn("%s: flow loop detected, dropping\n", + ovs_dp_name(dp)); + actions->actions_len = 0; + return -ELOOP; +} + +/* Execute a list of actions against 'skb'. */ +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, bool recirc) +{ + struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST; + struct loop_counter *loop; + int error; + + /* Check whether we've looped too much. */ + loop = &__get_cpu_var(loop_counters); + loop->stack_cost += stack_cost; + if (unlikely(loop->stack_cost > MAX_STACK_COST)) + loop->looping = true; + if (unlikely(loop->looping)) { + error = loop_suppress(dp, acts); + kfree_skb(skb); + goto out_loop; + } + + OVS_CB(skb)->tun_key = NULL; + error = do_execute_actions(dp, skb, acts->actions, + acts->actions_len, false); + + /* Check whether sub-actions looped too much. */ + if (unlikely(loop->looping)) + error = loop_suppress(dp, acts); + +out_loop: + /* Decrement loop stack cost. */ + loop->stack_cost -= stack_cost; + if (!loop->stack_cost) + loop->looping = false; + + return error; +}