datapath: Allow the number of hash entries to exceed TBL_MAX_BUCKETS
[sliver-openvswitch.git] / datapath / tunnel.c
index 07bb00b..1ef81ab 100644 (file)
@@ -186,7 +186,7 @@ struct port_lookup_key {
  * Modifies 'target' to store the rcu_dereferenced pointer that was used to do
  * the comparision.
  */
-static int port_cmp(const struct tbl_node *node, void *target)
+static int port_cmp(const struct tbl_node *node, void *target, int unused)
 {
        const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
        struct port_lookup_key *lookup = target;
@@ -249,11 +249,13 @@ static int add_port(struct vport *vport)
                struct tbl *new_table;
 
                new_table = tbl_expand(cur_table);
-               if (IS_ERR(new_table))
-                       return PTR_ERR(new_table);
-
-               rcu_assign_pointer(port_table, new_table);
-               tbl_deferred_destroy(cur_table, NULL);
+               if (IS_ERR(new_table)) {
+                       if (PTR_ERR(new_table) != -ENOSPC)
+                               return PTR_ERR(new_table);
+               } else {
+                       rcu_assign_pointer(port_table, new_table);
+                       tbl_deferred_destroy(cur_table, NULL);
+               }
        }
 
        err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
@@ -337,7 +339,8 @@ struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
                lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
 
                if (key_local_remote_ports) {
-                       tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
+                       tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
+                                             port_hash(&lookup), port_cmp);
                        if (tbl_node)
                                goto found;
                }
@@ -345,7 +348,8 @@ struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
                if (key_remote_ports) {
                        lookup.saddr = 0;
 
-                       tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
+                       tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
+                                             port_hash(&lookup), port_cmp);
                        if (tbl_node)
                                goto found;
 
@@ -358,7 +362,8 @@ struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
                lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
 
                if (local_remote_ports) {
-                       tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
+                       tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
+                                             port_hash(&lookup), port_cmp);
                        if (tbl_node)
                                goto found;
                }
@@ -366,7 +371,8 @@ struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
                if (remote_ports) {
                        lookup.saddr = 0;
 
-                       tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
+                       tbl_node = tbl_lookup(table, &lookup, sizeof(lookup),
+                                             port_hash(&lookup), port_cmp);
                        if (tbl_node)
                                goto found;
                }
@@ -379,18 +385,14 @@ found:
        return tnl_vport_to_vport(tnl_vport_table_cast(tbl_node));
 }
 
-static inline void ecn_decapsulate(struct sk_buff *skb)
+static void ecn_decapsulate(struct sk_buff *skb, u8 tos)
 {
-       /* This is accessing the outer IP header of the tunnel, which we've
-        * already validated to be OK.  skb->data is currently set to the start
-        * of the inner Ethernet header, and we've validated ETH_HLEN.
-        */
-       if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
+       if (unlikely(INET_ECN_is_ce(tos))) {
                __be16 protocol = skb->protocol;
 
                skb_set_network_header(skb, ETH_HLEN);
 
-               if (skb->protocol == htons(ETH_P_8021Q)) {
+               if (protocol == htons(ETH_P_8021Q)) {
                        if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
                                return;
 
@@ -417,17 +419,27 @@ static inline void ecn_decapsulate(struct sk_buff *skb)
        }
 }
 
-/* Called with rcu_read_lock. */
-void tnl_rcv(struct vport *vport, struct sk_buff *skb)
+/**
+ *     tnl_rcv - ingress point for generic tunnel code
+ *
+ * @vport: port this packet was received on
+ * @skb: received packet
+ * @tos: ToS from encapsulating IP packet, used to copy ECN bits
+ *
+ * Must be called with rcu_read_lock.
+ *
+ * Packets received by this function are in the following state:
+ * - skb->data points to the inner Ethernet header.
+ * - The inner Ethernet header is in the linear data area.
+ * - skb->csum does not include the inner Ethernet header.
+ * - The layer pointers are undefined.
+ */
+void tnl_rcv(struct vport *vport, struct sk_buff *skb, u8 tos)
 {
-       /* Packets received by this function are in the following state:
-        * - skb->data points to the inner Ethernet header.
-        * - The inner Ethernet header is in the linear data area.
-        * - skb->csum does not include the inner Ethernet header.
-        * - The layer pointers point at the outer headers.
-        */
+       struct ethhdr *eh;
 
-       struct ethhdr *eh = (struct ethhdr *)skb->data;
+       skb_reset_mac_header(skb);
+       eh = eth_hdr(skb);
 
        if (likely(ntohs(eh->h_proto) >= 1536))
                skb->protocol = eh->h_proto;
@@ -439,10 +451,14 @@ void tnl_rcv(struct vport *vport, struct sk_buff *skb)
        skb_clear_rxhash(skb);
        secpath_reset(skb);
 
-       ecn_decapsulate(skb);
-       compute_ip_summed(skb, false);
+       ecn_decapsulate(skb, tos);
        vlan_set_tci(skb, 0);
 
+       if (unlikely(compute_ip_summed(skb, false))) {
+               kfree_skb(skb);
+               return;
+       }
+
        vport_receive(vport, skb);
 }
 
@@ -708,7 +724,11 @@ bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutab
            (TNL_F_IN_KEY_MATCH | TNL_F_OUT_KEY_ACTION))
                OVS_CB(nskb)->tun_id = flow_key;
 
-       compute_ip_summed(nskb, false);
+       if (unlikely(compute_ip_summed(nskb, false))) {
+               kfree_skb(nskb);
+               return false;
+       }
+
        vport_receive(vport, nskb);
 
        return true;
@@ -719,8 +739,9 @@ static bool check_mtu(struct sk_buff *skb,
                      const struct tnl_mutable_config *mutable,
                      const struct rtable *rt, __be16 *frag_offp)
 {
+       bool df_inherit = mutable->flags & TNL_F_DF_INHERIT;
        bool pmtud = mutable->flags & TNL_F_PMTUD;
-       __be16 frag_off = 0;
+       __be16 frag_off = mutable->flags & TNL_F_DF_DEFAULT ? htons(IP_DF) : 0;
        int mtu = 0;
        unsigned int packet_length = skb->len - ETH_HLEN;
 
@@ -732,8 +753,6 @@ static bool check_mtu(struct sk_buff *skb,
        if (pmtud) {
                int vlan_header = 0;
 
-               frag_off = htons(IP_DF);
-
                /* The tag needs to go in packet regardless of where it
                 * currently is, so subtract it from the MTU.
                 */
@@ -750,7 +769,8 @@ static bool check_mtu(struct sk_buff *skb,
        if (skb->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
 
-               frag_off |= iph->frag_off & htons(IP_DF);
+               if (df_inherit)
+                       frag_off = iph->frag_off & htons(IP_DF);
 
                if (pmtud && iph->frag_off & htons(IP_DF)) {
                        mtu = max(mtu, IP_MIN_MTU);
@@ -763,8 +783,10 @@ static bool check_mtu(struct sk_buff *skb,
        }
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        else if (skb->protocol == htons(ETH_P_IPV6)) {
-               /* IPv6 requires PMTUD if the packet is above the minimum MTU. */
-               if (packet_length > IPV6_MIN_MTU)
+               /* IPv6 requires end hosts to do fragmentation
+                * if the packet is above the minimum MTU.
+                */
+               if (df_inherit && packet_length > IPV6_MIN_MTU)
                        frag_off = htons(IP_DF);
 
                if (pmtud) {
@@ -798,7 +820,7 @@ static void create_tunnel_header(const struct vport *vport,
        iph->saddr      = rt->rt_src;
        iph->ttl        = mutable->ttl;
        if (!iph->ttl)
-               iph->ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
+               iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
 
        tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
 }
@@ -932,6 +954,7 @@ static struct tnl_cache *build_cache(struct vport *vport,
                struct sk_buff *skb;
                bool is_frag;
                int err;
+               int flow_key_len;
 
                dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
                if (!dst_vport)
@@ -944,14 +967,16 @@ static struct tnl_cache *build_cache(struct vport *vport,
                __skb_put(skb, cache->len);
                memcpy(skb->data, get_cached_header(cache), cache->len);
 
-               err = flow_extract(skb, dst_vport->port_no, &flow_key, &is_frag);
+               err = flow_extract(skb, dst_vport->port_no, &flow_key,
+                                  &flow_key_len, &is_frag);
 
-               kfree_skb(skb);
+               consume_skb(skb);
                if (err || is_frag)
                        goto done;
 
                flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
-                                      &flow_key, flow_hash(&flow_key),
+                                      &flow_key, flow_key_len,
+                                      flow_hash(&flow_key, flow_key_len),
                                       flow_cmp);
                if (flow_node) {
                        struct sw_flow *flow = flow_cast(flow_node);
@@ -985,6 +1010,7 @@ static struct rtable *find_route(struct vport *vport,
                return cur_cache->rt;
        } else {
                struct rtable *rt;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
                struct flowi fl = { .nl_u = { .ip4_u =
                                              { .daddr = mutable->daddr,
                                                .saddr = mutable->saddr,
@@ -993,6 +1019,16 @@ static struct rtable *find_route(struct vport *vport,
 
                if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
                        return NULL;
+#else
+               struct flowi4 fl = { .daddr = mutable->daddr,
+                                    .saddr = mutable->saddr,
+                                    .flowi4_tos = tos,
+                                    .flowi4_proto = tnl_vport->tnl_ops->ipproto };
+
+               rt = ip_route_output_key(&init_net, &fl);
+               if (IS_ERR(rt))
+                       return NULL;
+#endif
 
                if (likely(tos == mutable->tos))
                        *cache = build_cache(vport, mutable, rt);
@@ -1001,27 +1037,6 @@ static struct rtable *find_route(struct vport *vport,
        }
 }
 
-static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
-{
-       if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
-               struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
-               if (unlikely(!nskb)) {
-                       kfree_skb(skb);
-                       return ERR_PTR(-ENOMEM);
-               }
-
-               set_skb_csum_bits(skb, nskb);
-
-               if (skb->sk)
-                       skb_set_owner_w(nskb, skb->sk);
-
-               kfree_skb(skb);
-               return nskb;
-       }
-
-       return skb;
-}
-
 static inline bool need_linearize(const struct sk_buff *skb)
 {
        int i;
@@ -1048,34 +1063,35 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb,
        int min_headroom;
        int err;
 
-       forward_ip_summed(skb);
-
-       err = vswitch_skb_checksum_setup(skb);
-       if (unlikely(err))
-               goto error_free;
-
        min_headroom = LL_RESERVED_SPACE(rt_dst(rt).dev) + rt_dst(rt).header_len
                        + mutable->tunnel_hlen
                        + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
 
-       skb = check_headroom(skb, min_headroom);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
-               goto error;
+       if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+               int head_delta = SKB_DATA_ALIGN(min_headroom -
+                                               skb_headroom(skb) +
+                                               16);
+               err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+                                       0, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto error_free;
        }
 
+       forward_ip_summed(skb, true);
+
        if (skb_is_gso(skb)) {
                struct sk_buff *nskb;
 
                nskb = skb_gso_segment(skb, 0);
-               kfree_skb(skb);
                if (IS_ERR(nskb)) {
+                       kfree_skb(skb);
                        err = PTR_ERR(nskb);
                        goto error;
                }
 
+               consume_skb(skb);
                skb = nskb;
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+       } else if (get_ip_summed(skb) == OVS_CSUM_PARTIAL) {
                /* Pages aren't locked and could change at any time.
                 * If this happens after we compute the checksum, the
                 * checksum will be wrong.  We linearize now to avoid
@@ -1090,8 +1106,9 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb,
                err = skb_checksum_help(skb);
                if (unlikely(err))
                        goto error_free;
-       } else if (skb->ip_summed == CHECKSUM_COMPLETE)
-               skb->ip_summed = CHECKSUM_NONE;
+       }
+
+       set_ip_summed(skb, OVS_CSUM_NONE);
 
        return skb;
 
@@ -1227,7 +1244,7 @@ int tnl_send(struct vport *vport, struct sk_buff *skb)
        /* TTL */
        ttl = mutable->ttl;
        if (!ttl)
-               ttl = dst_metric(&rt_dst(rt), RTAX_HOPLIMIT);
+               ttl = ip4_dst_hoplimit(&rt_dst(rt));
 
        if (mutable->flags & TNL_F_TTL_INHERIT) {
                if (skb->protocol == htons(ETH_P_IP))
@@ -1286,8 +1303,12 @@ int tnl_send(struct vport *vport, struct sk_buff *skb)
                        ip_send_check(iph);
 
                        if (cache_vport) {
+                               if (unlikely(compute_ip_summed(skb, true))) {
+                                       kfree_skb(skb);
+                                       goto next;
+                               }
+
                                OVS_CB(skb)->flow = cache->flow;
-                               compute_ip_summed(skb, true);
                                vport_receive(cache_vport, skb);
                                sent_len += orig_len;
                        } else {
@@ -1314,9 +1335,9 @@ next:
 error_free:
        tnl_free_linked_skbs(skb);
 error:
-       dst_release(unattached_dst);
        vport_record_error(vport, err);
 out:
+       dst_release(unattached_dst);
        return sent_len;
 }
 
@@ -1571,9 +1592,6 @@ const unsigned char *tnl_get_addr(const struct vport *vport)
 
 void tnl_free_linked_skbs(struct sk_buff *skb)
 {
-       if (unlikely(!skb))
-               return;
-
        while (skb) {
                struct sk_buff *next = skb->next;
                kfree_skb(skb);