-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
-
- if (!(mutable->port_config.flags & TNL_F_HDR_CACHE))
- return NULL;
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
- if (!rt_dst(rt).hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock_bh(&tnl_vport->cache_lock))
- return NULL;
-
- cache = tnl_vport->cache;
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- cache->len = cache_len;
-
- create_eth_hdr(cache, rt);
- cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
-
- create_tunnel_header(vport, mutable, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (is_internal_dev(rt_dst(rt).dev)) {
- int err;
- struct vport *vport;
- struct dp_port *dp_port;
- struct sk_buff *skb;
- bool is_frag;
- struct odp_flow_key flow_key;
- struct tbl_node *flow_node;
-
- vport = internal_dev_get_vport(rt_dst(rt).dev);
- if (!vport)
- goto done;
-
- dp_port = vport_get_dp_port(vport);
- if (!dp_port)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = flow_extract(skb, dp_port->port_no, &flow_key, &is_frag);
-
- kfree_skb(skb);
- if (err || is_frag)
- goto done;
-
- flow_node = tbl_lookup(rcu_dereference(dp_port->dp->table),
- &flow_key, flow_hash(&flow_key),
- flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
-
- cache->flow = flow;
- flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock_bh(&tnl_vport->cache_lock);
-
- return cache;
-}
-
-static struct rtable *find_route(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- u8 tos, struct tnl_cache **cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
-
- *cache = NULL;
- tos = RT_TOS(tos);
-
- if (likely(tos == mutable->port_config.tos &&
- check_cache_valid(cur_cache, mutable))) {
- *cache = cur_cache;
- return cur_cache->rt;
- } else {
- struct rtable *rt;
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = mutable->port_config.daddr,
- .saddr = mutable->port_config.saddr,
- .tos = tos } },
- .proto = tnl_vport->tnl_ops->ipproto };
-
- if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
- return NULL;
-
- if (likely(tos == mutable->port_config.tos))
- *cache = build_cache(vport, mutable, rt);
-
- return rt;
- }
-}
-
-static struct sk_buff *check_headroom(struct sk_buff *skb, int headroom)
-{
- if (skb_headroom(skb) < headroom || skb_header_cloned(skb)) {
- struct sk_buff *nskb = skb_realloc_headroom(skb, headroom + 16);
- if (unlikely(!nskb)) {
- kfree_skb(skb);
- return ERR_PTR(-ENOMEM);
- }
-
- set_skb_csum_bits(skb, nskb);
-
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
-
- kfree_skb(skb);
- return nskb;
- }
-
- return skb;
-}
-
-static inline bool need_linearize(const struct sk_buff *skb)