- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct iphdr *iph = header;
-
- iph->version = 4;
- iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = htons(IP_DF);
- iph->protocol = tnl_vport->tnl_ops->ipproto;
- iph->tos = mutable->tos;
- iph->daddr = rt->rt_dst;
- iph->saddr = rt->rt_src;
- iph->ttl = mutable->ttl;
- if (!iph->ttl)
- iph->ttl = ip4_dst_hoplimit(&rt_dst(rt));
-
- tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
-}
-
-static void *get_cached_header(const struct tnl_cache *cache)
-{
- return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
-}
-
-static bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
-{
- struct hh_cache *hh;
-
- if (!cache)
- return false;
-
- hh = rt_hh(cache->rt);
- return hh &&
-#ifdef NEED_CACHE_TIMEOUT
- time_before(jiffies, cache->expiration) &&
-#endif
-#ifdef HAVE_RT_GENID
- atomic_read(&init_net.ipv4.rt_genid) == cache->rt->rt_genid &&
-#endif
-#ifdef HAVE_HH_SEQ
- hh->hh_lock.sequence == cache->hh_seq &&
-#endif
- mutable->seq == cache->mutable_seq &&
- (!is_internal_dev(rt_dst(cache->rt).dev) ||
- (cache->flow && !cache->flow->dead));
-}
-
-static void __cache_cleaner(struct tnl_vport *tnl_vport)
-{
- const struct tnl_mutable_config *mutable =
- rcu_dereference(tnl_vport->mutable);
- const struct tnl_cache *cache = rcu_dereference(tnl_vport->cache);
-
- if (cache && !check_cache_valid(cache, mutable) &&
- spin_trylock_bh(&tnl_vport->cache_lock)) {
- assign_cache_rcu(tnl_vport_to_vport(tnl_vport), NULL);
- spin_unlock_bh(&tnl_vport->cache_lock);
- }
-}
-
-static void cache_cleaner(struct work_struct *work)
-{
- int i;
-
- schedule_cache_cleaner();
-
- rcu_read_lock();
- for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct tnl_vport *tnl_vport;
-
- bucket = &port_table[i];
- hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
- __cache_cleaner(tnl_vport);
- }
- rcu_read_unlock();
-}
-
-static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
-{
- void *cache_data = get_cached_header(cache);
- int hh_off;
-
-#ifdef HAVE_HH_SEQ
- unsigned hh_seq;
-
- do {
- hh_seq = read_seqbegin(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- } while (read_seqretry(&hh->hh_lock, hh_seq));
-
- cache->hh_seq = hh_seq;
-#else
- read_lock(&hh->hh_lock);
- hh_off = HH_DATA_ALIGN(hh->hh_len) - hh->hh_len;
- memcpy(cache_data, (void *)hh->hh_data + hh_off, hh->hh_len);
- cache->hh_len = hh->hh_len;
- read_unlock(&hh->hh_lock);
-#endif
-}
-
-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
- struct hh_cache *hh;
-
- if (!(mutable->flags & TNL_F_HDR_CACHE))
- return NULL;
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
-
- hh = rt_hh(rt);
- if (!hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock(&tnl_vport->cache_lock))
- return NULL;
-
- cache = cache_dereference(tnl_vport);
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = LL_RESERVED_SPACE(rt_dst(rt).dev) + mutable->tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- create_eth_hdr(cache, hh);
- cache_data = get_cached_header(cache) + cache->hh_len;
- cache->len = cache->hh_len + mutable->tunnel_hlen;
-
- create_tunnel_header(vport, mutable, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (is_internal_dev(rt_dst(rt).dev)) {
- struct sw_flow_key flow_key;
- struct vport *dst_vport;
- struct sk_buff *skb;
- int err;
- int flow_key_len;
- struct sw_flow *flow;
-
- dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
- if (!dst_vport)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len);
-
- consume_skb(skb);
- if (err)
- goto done;
-
- flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
- if (flow) {
- cache->flow = flow;
- flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock(&tnl_vport->cache_lock);
-
- return cache;
-}