- int i;
-
- schedule_cache_cleaner();
-
- rcu_read_lock();
- for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct tnl_vport *tnl_vport;
-
- bucket = &port_table[i];
- hlist_for_each_entry_rcu(tnl_vport, n, bucket, hash_node)
- __cache_cleaner(tnl_vport);
- }
- rcu_read_unlock();
-}
-
-static inline void create_eth_hdr(struct tnl_cache *cache,
- const struct rtable *rt)
-{
- void *cache_data = get_cached_header(cache);
- int hh_len = rt_dst(rt).hh->hh_len;
- int hh_off = HH_DATA_ALIGN(rt_dst(rt).hh->hh_len) - hh_len;
-
-#ifdef HAVE_HH_SEQ
- unsigned hh_seq;
-
- do {
- hh_seq = read_seqbegin(&rt_dst(rt).hh->hh_lock);
- memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
- } while (read_seqretry(&rt_dst(rt).hh->hh_lock, hh_seq));
-
- cache->hh_seq = hh_seq;
-#else
- read_lock(&rt_dst(rt).hh->hh_lock);
- memcpy(cache_data, (void *)rt_dst(rt).hh->hh_data + hh_off, hh_len);
- read_unlock(&rt_dst(rt).hh->hh_lock);
-#endif
-}
-
-static struct tnl_cache *build_cache(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- struct rtable *rt)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cache;
- void *cache_data;
- int cache_len;
-
- if (!(mutable->flags & TNL_F_HDR_CACHE))
- return NULL;
-
- /*
- * If there is no entry in the ARP cache or if this device does not
- * support hard header caching just fall back to the IP stack.
- */
- if (!rt_dst(rt).hh)
- return NULL;
-
- /*
- * If lock is contended fall back to directly building the header.
- * We're not going to help performance by sitting here spinning.
- */
- if (!spin_trylock(&tnl_vport->cache_lock))
- return NULL;
-
- cache = cache_dereference(tnl_vport);
- if (check_cache_valid(cache, mutable))
- goto unlock;
- else
- cache = NULL;
-
- cache_len = rt_dst(rt).hh->hh_len + mutable->tunnel_hlen;
-
- cache = kzalloc(ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN) +
- cache_len, GFP_ATOMIC);
- if (!cache)
- goto unlock;
-
- cache->len = cache_len;
-
- create_eth_hdr(cache, rt);
- cache_data = get_cached_header(cache) + rt_dst(rt).hh->hh_len;
-
- create_tunnel_header(vport, mutable, rt, cache_data);
-
- cache->mutable_seq = mutable->seq;
- cache->rt = rt;
-#ifdef NEED_CACHE_TIMEOUT
- cache->expiration = jiffies + tnl_vport->cache_exp_interval;
-#endif
-
- if (is_internal_dev(rt_dst(rt).dev)) {
- struct sw_flow_key flow_key;
- struct vport *dst_vport;
- struct sk_buff *skb;
- bool is_frag;
- int err;
- int flow_key_len;
- struct sw_flow *flow;
-
- dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
- if (!dst_vport)
- goto done;
-
- skb = alloc_skb(cache->len, GFP_ATOMIC);
- if (!skb)
- goto done;
-
- __skb_put(skb, cache->len);
- memcpy(skb->data, get_cached_header(cache), cache->len);
-
- err = flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len, &is_frag);
-
- consume_skb(skb);
- if (err || is_frag)
- goto done;
-
- flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
- &flow_key, flow_key_len);
- if (flow) {
- cache->flow = flow;
- flow_hold(flow);
- }
- }
-
-done:
- assign_cache_rcu(vport, cache);
-
-unlock:
- spin_unlock(&tnl_vport->cache_lock);
-
- return cache;
-}
-
-static struct rtable *find_route(struct vport *vport,
- const struct tnl_mutable_config *mutable,
- u8 tos, struct tnl_cache **cache)
-{
- struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_cache *cur_cache = rcu_dereference(tnl_vport->cache);
-
- *cache = NULL;
- tos = RT_TOS(tos);