static struct tbl *port_table __read_mostly;
static void cache_cleaner(struct work_struct *work);
-DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
+static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
/*
* These are just used as an optimization: they don't require any kind of
}
struct port_lookup_key {
+ const struct tnl_mutable_config *mutable;
+ __be64 key;
u32 tunnel_type;
__be32 saddr;
__be32 daddr;
- __be32 key;
- const struct tnl_mutable_config *mutable;
};
/*
static u32 port_hash(struct port_lookup_key *k)
{
- return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
+ u32 x = jhash_3words(k->saddr, k->daddr, k->tunnel_type, 0);
+ return jhash_2words(k->key >> 32, k->key, x);
}
static u32 mutable_hash(const struct tnl_mutable_config *mutable)
err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
if (err) {
+ (*find_port_pool(tnl_vport->mutable))--;
check_table_empty();
return err;
}
table_updated:
+ (*find_port_pool(tnl_vport->mutable))--;
assign_config_rcu(vport, new_mutable);
+ (*find_port_pool(tnl_vport->mutable))++;
return 0;
}
return 0;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
+struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
int tunnel_type,
const struct tnl_mutable_config **mutable)
{
static inline void ecn_decapsulate(struct sk_buff *skb)
{
- u8 tos = ip_hdr(skb)->tos;
-
- if (INET_ECN_is_ce(tos)) {
+ /* This is accessing the outer IP header of the tunnel, which we've
+ * already validated to be OK. skb->data is currently set to the start
+ * of the inner Ethernet header, and we've validated ETH_HLEN.
+ */
+ if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
__be16 protocol = skb->protocol;
- unsigned int nw_header = skb_network_offset(skb);
+
+ skb_set_network_header(skb, ETH_HLEN);
if (skb->protocol == htons(ETH_P_8021Q)) {
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return;
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- nw_header += VLAN_HLEN;
+ skb_set_network_header(skb, VLAN_ETH_HLEN);
}
if (protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, nw_header
+ if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
+ sizeof(struct iphdr))))
return;
- IP_ECN_set_ce((struct iphdr *)(skb->data + nw_header));
+ IP_ECN_set_ce(ip_hdr(skb));
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, nw_header
+ if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
+ sizeof(struct ipv6hdr))))
return;
- IP6_ECN_set_ce((struct ipv6hdr *)(skb->data + nw_header));
+ IP6_ECN_set_ce(ipv6_hdr(skb));
}
#endif
}
skb_dst_drop(skb);
nf_reset(skb);
secpath_reset(skb);
- skb_set_network_header(skb, ETH_HLEN);
ecn_decapsulate(skb);
compute_ip_summed(skb, false);
#endif /* IPv6 */
bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
schedule_cache_cleaner();
rcu_read_lock();
- tbl_foreach(port_table, cache_cleaner_cb, NULL);
+ tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
rcu_read_unlock();
}