X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=datapath%2Factions.c;h=7fe2f5454b44203108e7b97c6d1a408f8b2bd972;hb=e2f3178f0582eda302bdc5629189b6a56d9fbcdd;hp=ac7187bc3000bea61ec736ed2f753d16b5809862;hpb=39640c1b7174299194dbc2756257693ec6e1c8c3;p=sliver-openvswitch.git diff --git a/datapath/actions.c b/datapath/actions.c index ac7187bc3..7fe2f5454 100644 --- a/datapath/actions.c +++ b/datapath/actions.c @@ -1,35 +1,45 @@ /* - * Distributed under the terms of the GNU GPL version 2. - * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks. + * Copyright (c) 2007-2014 Nicira, Inc. * - * Significant portions of this file may be copied from parts of the Linux - * kernel, by Linus Torvalds and others. + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA */ -/* Functions for executing flow actions. */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include +#include #include #include #include #include #include #include +#include #include #include +#include -#include "checksum.h" #include "datapath.h" #include "vlan.h" #include "vport.h" static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, - const struct nlattr *attr, int len, bool keep_skb); + const struct nlattr *attr, int len, bool keep_skb); static int make_writable(struct sk_buff *skb, int write_len) { @@ -39,30 +49,29 @@ static int make_writable(struct sk_buff *skb, int write_len) return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } -/* remove VLAN header from packet and update csum accrodingly. */ +/* remove VLAN header from packet and update csum accordingly. */ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) { - struct ethhdr *eh; - struct vlan_ethhdr *veth; + struct vlan_hdr *vhdr; int err; err = make_writable(skb, VLAN_ETH_HLEN); if (unlikely(err)) return err; - if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); - veth = (struct vlan_ethhdr *) skb->data; - *current_tci = veth->h_vlan_TCI; + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + *current_tci = vhdr->h_vlan_TCI; memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); - eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN); - - skb->protocol = eh->h_proto; + vlan_set_encap_proto(skb, vhdr); skb->mac_header += VLAN_HLEN; + skb_reset_mac_len(skb); return 0; } @@ -92,11 +101,11 @@ static int pop_vlan(struct sk_buff *skb) if (unlikely(err)) return err; - __vlan_hwaccel_put_tag(skb, ntohs(tci)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); return 0; } -static int push_vlan(struct sk_buff *skb, const struct ovs_key_8021q *q_key) +static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) { if (unlikely(vlan_tx_tag_present(skb))) { u16 current_tag; @@ -104,15 +113,15 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_key_8021q *q_key) /* push down current VLAN tag */ current_tag = vlan_tx_tag_get(skb); - if (!__vlan_put_tag(skb, current_tag)) + if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) return -ENOMEM; - if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) + if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + + (2 * ETH_ALEN), VLAN_HLEN, 0)); } - __vlan_hwaccel_put_tag(skb, ntohs(q_key->q_tci)); + __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); return 0; } @@ -124,8 +133,12 @@ static int set_eth_addr(struct sk_buff *skb, if (unlikely(err)) return err; - memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN); - memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + + ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); + ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); + + ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); return 0; } @@ -140,16 +153,70 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, *addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { - if (likely(transport_len >= sizeof(struct udphdr))) - inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, - *addr, new_addr, 1); + if (likely(transport_len >= sizeof(struct udphdr))) { + struct udphdr *uh = udp_hdr(skb); + + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace4(&uh->check, skb, + *addr, new_addr, 1); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + } } csum_replace4(&nh->check, *addr, new_addr); - skb_clear_rxhash(skb); + skb_clear_hash(skb); *addr = new_addr; } +static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, + __be32 addr[4], const __be32 new_addr[4]) +{ + int transport_len = skb->len - skb_transport_offset(skb); + + if (l4_proto == IPPROTO_TCP) { + if (likely(transport_len >= sizeof(struct tcphdr))) + inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, + addr, new_addr, 1); + } else if (l4_proto == IPPROTO_UDP) { + if (likely(transport_len >= sizeof(struct udphdr))) { + struct udphdr *uh = udp_hdr(skb); + + if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace16(&uh->check, skb, + addr, new_addr, 1); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + } + } +} + +static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, + __be32 addr[4], const __be32 new_addr[4], + bool recalculate_csum) +{ + if (recalculate_csum) + update_ipv6_checksum(skb, l4_proto, addr, new_addr); + + skb_clear_hash(skb); + memcpy(addr, new_addr, sizeof(__be32[4])); +} + +static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) +{ + nh->priority = tc >> 4; + nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); +} + +static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) +{ + nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; + nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; + nh->flow_lbl[2] = fl & 0x000000FF; +} + static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) { csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); @@ -183,17 +250,72 @@ static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) return 0; } +static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) +{ + struct ipv6hdr *nh; + int err; + __be32 *saddr; + __be32 *daddr; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr)); + if (unlikely(err)) + return err; + + nh = ipv6_hdr(skb); + saddr = (__be32 *)&nh->saddr; + daddr = (__be32 *)&nh->daddr; + + if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) + set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, + ipv6_key->ipv6_src, true); + + if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { + unsigned int offset = 0; + int flags = OVS_IP6T_FH_F_SKIP_RH; + bool recalc_csum = true; + + if (ipv6_ext_hdr(nh->nexthdr)) + recalc_csum = ipv6_find_hdr(skb, &offset, + NEXTHDR_ROUTING, NULL, + &flags) != NEXTHDR_ROUTING; + + set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, + ipv6_key->ipv6_dst, recalc_csum); + } + + set_ipv6_tc(nh, ipv6_key->ipv6_tclass); + set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); + nh->hop_limit = ipv6_key->ipv6_hlimit; + + return 0; +} + /* Must follow make_writable() since that can move the skb data. */ static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { inet_proto_csum_replace2(check, skb, *port, new_port, 0); *port = new_port; - skb_clear_rxhash(skb); + skb_clear_hash(skb); +} + +static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) +{ + struct udphdr *uh = udp_hdr(skb); + + if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { + set_tp_port(skb, port, new_port, &uh->check); + + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } else { + *port = new_port; + skb_clear_hash(skb); + } } -static int set_udp_port(struct sk_buff *skb, - const struct ovs_key_udp *udp_port_key) +static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) { struct udphdr *uh; int err; @@ -205,16 +327,15 @@ static int set_udp_port(struct sk_buff *skb, uh = udp_hdr(skb); if (udp_port_key->udp_src != uh->source) - set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); + set_udp_port(skb, &uh->source, udp_port_key->udp_src); if (udp_port_key->udp_dst != uh->dest) - set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); + set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); return 0; } -static int set_tcp_port(struct sk_buff *skb, - const struct ovs_key_tcp *tcp_port_key) +static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) { struct tcphdr *th; int err; @@ -234,6 +355,39 @@ static int set_tcp_port(struct sk_buff *skb, return 0; } +static int set_sctp(struct sk_buff *skb, + const struct ovs_key_sctp *sctp_port_key) +{ + struct sctphdr *sh; + int err; + unsigned int sctphoff = skb_transport_offset(skb); + + err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); + if (unlikely(err)) + return err; + + sh = sctp_hdr(skb); + if (sctp_port_key->sctp_src != sh->source || + sctp_port_key->sctp_dst != sh->dest) { + __le32 old_correct_csum, new_csum, old_csum; + + old_csum = sh->checksum; + old_correct_csum = sctp_compute_cksum(skb, sctphoff); + + sh->source = sctp_port_key->sctp_src; + sh->dest = sctp_port_key->sctp_dst; + + new_csum = sctp_compute_cksum(skb, sctphoff); + + /* Carry any checksum errors through. */ + sh->checksum = old_csum ^ old_correct_csum ^ new_csum; + + skb_clear_hash(skb); + } + + return 0; +} + static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { struct vport *vport; @@ -241,13 +395,13 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) if (unlikely(!skb)) return -ENOMEM; - vport = rcu_dereference(dp->ports[out_port]); + vport = ovs_vport_rcu(dp, out_port); if (unlikely(!vport)) { kfree_skb(skb); return -ENODEV; } - vport_send(vport, skb); + ovs_vport_send(vport, skb); return 0; } @@ -258,10 +412,12 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, const struct nlattr *a; int rem; + BUG_ON(!OVS_CB(skb)->pkt_key); + upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = &OVS_CB(skb)->flow->key; + upcall.key = OVS_CB(skb)->pkt_key; upcall.userdata = NULL; - upcall.pid = 0; + upcall.portid = 0; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { @@ -271,12 +427,12 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, break; case OVS_USERSPACE_ATTR_PID: - upcall.pid = nla_get_u32(a); + upcall.portid = nla_get_u32(a); break; } } - return dp_upcall(dp, skb, &upcall); + return ovs_dp_upcall(dp, skb, &upcall); } static int sample(struct datapath *dp, struct sk_buff *skb, @@ -290,7 +446,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, a = nla_next(a, &rem)) { switch (nla_type(a)) { case OVS_SAMPLE_ATTR_PROBABILITY: - if (net_random() >= nla_get_u32(a)) + if (prandom_u32() >= nla_get_u32(a)) return 0; break; @@ -301,7 +457,22 @@ static int sample(struct datapath *dp, struct sk_buff *skb, } return do_execute_actions(dp, skb, nla_data(acts_list), - nla_len(acts_list), true); + nla_len(acts_list), true); +} + +static void execute_hash(struct sk_buff *skb, const struct nlattr *attr) +{ + struct sw_flow_key *key = OVS_CB(skb)->pkt_key; + struct ovs_action_hash *hash_act = nla_data(attr); + u32 hash = 0; + + /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ + hash = skb_get_hash(skb); + hash = jhash_1word(hash, hash_act->hash_basis); + if (!hash) + hash = 0x1; + + key->ovs_flow_hash = hash; } static int execute_set_action(struct sk_buff *skb, @@ -314,8 +485,12 @@ static int execute_set_action(struct sk_buff *skb, skb->priority = nla_get_u32(nested_attr); break; - case OVS_KEY_ATTR_TUN_ID: - OVS_CB(skb)->tun_id = nla_get_be64(nested_attr); + case OVS_KEY_ATTR_SKB_MARK: + skb->mark = nla_get_u32(nested_attr); + break; + + case OVS_KEY_ATTR_IPV4_TUNNEL: + OVS_CB(skb)->tun_key = nla_data(nested_attr); break; case OVS_KEY_ATTR_ETHERNET: @@ -326,18 +501,46 @@ static int execute_set_action(struct sk_buff *skb, err = set_ipv4(skb, nla_data(nested_attr)); break; + case OVS_KEY_ATTR_IPV6: + err = set_ipv6(skb, nla_data(nested_attr)); + break; + case OVS_KEY_ATTR_TCP: - err = set_tcp_port(skb, nla_data(nested_attr)); + err = set_tcp(skb, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: - err = set_udp_port(skb, nla_data(nested_attr)); + err = set_udp(skb, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_SCTP: + err = set_sctp(skb, nla_data(nested_attr)); break; } return err; } +static int execute_recirc(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *a) +{ + struct sw_flow_key recirc_key; + const struct vport *p = OVS_CB(skb)->input_vport; + uint32_t hash = OVS_CB(skb)->pkt_key->ovs_flow_hash; + int err; + + err = ovs_flow_extract(skb, p->port_no, &recirc_key); + if (err) + return err; + + recirc_key.ovs_flow_hash = hash; + recirc_key.recirc_id = nla_get_u32(a); + + ovs_dp_process_packet_with_key(skb, &recirc_key, true); + + return 0; +} + /* Execute a list of actions against 'skb'. */ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, const struct nlattr *attr, int len, bool keep_skb) @@ -368,24 +571,45 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, output_userspace(dp, skb, a); break; - case OVS_ACTION_ATTR_PUSH: - /* Only supported push action is on vlan tag. */ - err = push_vlan(skb, nla_data(nla_data(a))); + case OVS_ACTION_ATTR_HASH: + execute_hash(skb, a); + break; + + case OVS_ACTION_ATTR_PUSH_VLAN: + err = push_vlan(skb, nla_data(a)); if (unlikely(err)) /* skb already freed. */ return err; break; - case OVS_ACTION_ATTR_POP: - /* Only supported pop action is on vlan tag. */ + case OVS_ACTION_ATTR_POP_VLAN: err = pop_vlan(skb); break; + case OVS_ACTION_ATTR_RECIRC: { + struct sk_buff *recirc_skb; + const bool last_action = (a->nla_len == rem); + + if (!last_action || keep_skb) + recirc_skb = skb_clone(skb, GFP_ATOMIC); + else + recirc_skb = skb; + + err = execute_recirc(dp, recirc_skb, a); + + if (last_action || err) + return err; + + break; + } + case OVS_ACTION_ATTR_SET: err = execute_set_action(skb, nla_data(a)); break; case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, a); + if (unlikely(err)) /* skb already freed. */ + return err; break; } @@ -407,11 +631,18 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, } /* We limit the number of times that we pass into execute_actions() - * to avoid blowing out the stack in the event that we have a loop. */ -#define MAX_LOOPS 5 + * to avoid blowing out the stack in the event that we have a loop. + * + * Each loop adds some (estimated) cost to the kernel stack. + * The loop terminates when the max cost is exceeded. + * */ +#define RECIRC_STACK_COST 1 +#define DEFAULT_STACK_COST 4 +/* Allow up to 4 regular services, and up to 3 recirculations */ +#define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3) struct loop_counter { - u8 count; /* Count. */ + u8 stack_cost; /* loop stack cost. */ bool looping; /* Loop detected? */ }; @@ -420,22 +651,24 @@ static DEFINE_PER_CPU(struct loop_counter, loop_counters); static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions) { if (net_ratelimit()) - pr_warn("%s: flow looped %d times, dropping\n", - dp_name(dp), MAX_LOOPS); + pr_warn("%s: flow loop detected, dropping\n", + ovs_dp_name(dp)); actions->actions_len = 0; return -ELOOP; } /* Execute a list of actions against 'skb'. */ -int execute_actions(struct datapath *dp, struct sk_buff *skb) +int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, bool recirc) { struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST; struct loop_counter *loop; int error; /* Check whether we've looped too much. */ loop = &__get_cpu_var(loop_counters); - if (unlikely(++loop->count > MAX_LOOPS)) + loop->stack_cost += stack_cost; + if (unlikely(loop->stack_cost > MAX_STACK_COST)) loop->looping = true; if (unlikely(loop->looping)) { error = loop_suppress(dp, acts); @@ -443,7 +676,7 @@ int execute_actions(struct datapath *dp, struct sk_buff *skb) goto out_loop; } - OVS_CB(skb)->tun_id = 0; + OVS_CB(skb)->tun_key = NULL; error = do_execute_actions(dp, skb, acts->actions, acts->actions_len, false); @@ -452,8 +685,9 @@ int execute_actions(struct datapath *dp, struct sk_buff *skb) error = loop_suppress(dp, acts); out_loop: - /* Decrement loop counter. */ - if (!--loop->count) + /* Decrement loop stack cost. */ + loop->stack_cost -= stack_cost; + if (!loop->stack_cost) loop->looping = false; return error;