/*
- * Copyright (c) 2010 Nicira Networks.
+ * Copyright (c) 2010, 2011 Nicira Networks.
* Distributed under the terms of the GNU GPL version 2.
*
* Significant portions of this file may be copied from parts of the Linux
#define CACHE_DATA_ALIGN 16
-/* Protected by RCU. */
-static struct tbl *port_table __read_mostly;
+static struct tbl __rcu *port_table __read_mostly;
static void cache_cleaner(struct work_struct *work);
static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
return container_of(node, struct tnl_vport, tbl_node);
}
+/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
+ * cache_lock is held, so it is only for update side code.
+ */
+static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
+{
+ return rcu_dereference_protected(tnl_vport->cache,
+ lockdep_is_held(&tnl_vport->cache_lock));
+}
+
static inline void schedule_cache_cleaner(void)
{
schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *old_config;
- old_config = tnl_vport->mutable;
+ old_config = rtnl_dereference(tnl_vport->mutable);
rcu_assign_pointer(tnl_vport->mutable, new_config);
call_rcu(&old_config->rcu, free_config_rcu);
}
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_cache *old_cache;
- old_cache = tnl_vport->cache;
+ old_cache = cache_dereference(tnl_vport);
rcu_assign_pointer(tnl_vport->cache, new_cache);
if (old_cache)
}
struct port_lookup_key {
+ const struct tnl_mutable_config *mutable;
+ __be64 key;
u32 tunnel_type;
__be32 saddr;
__be32 daddr;
- __be32 key;
- const struct tnl_mutable_config *mutable;
};
/*
const struct tnl_vport *tnl_vport = tnl_vport_table_cast(node);
struct port_lookup_key *lookup = target;
- lookup->mutable = rcu_dereference(tnl_vport->mutable);
+ lookup->mutable = rcu_dereference_rtnl(tnl_vport->mutable);
return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
lookup->mutable->port_config.daddr == lookup->daddr &&
static u32 port_hash(struct port_lookup_key *k)
{
- return jhash_3words(k->key, k->saddr, k->daddr, k->tunnel_type);
+ u32 x = jhash_3words((__force u32)k->saddr, (__force u32)k->daddr,
+ k->tunnel_type, 0);
+ return jhash_2words((__force u64)k->key >> 32, (__force u32)k->key, x);
}
static u32 mutable_hash(const struct tnl_mutable_config *mutable)
static void check_table_empty(void)
{
- if (tbl_count(port_table) == 0) {
- struct tbl *old_table = port_table;
+ struct tbl *old_table = rtnl_dereference(port_table);
+ if (tbl_count(old_table) == 0) {
cancel_delayed_work_sync(&cache_cleaner_wq);
rcu_assign_pointer(port_table, NULL);
tbl_deferred_destroy(old_table, NULL);
static int add_port(struct vport *vport)
{
+ struct tbl *cur_table = rtnl_dereference(port_table);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
int err;
if (!port_table) {
struct tbl *new_table;
- new_table = tbl_create(0);
+ new_table = tbl_create(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(port_table, new_table);
schedule_cache_cleaner();
- } else if (tbl_count(port_table) > tbl_n_buckets(port_table)) {
- struct tbl *old_table = port_table;
+ } else if (tbl_count(cur_table) > tbl_n_buckets(cur_table)) {
struct tbl *new_table;
- new_table = tbl_expand(old_table);
+ new_table = tbl_expand(cur_table);
if (IS_ERR(new_table))
return PTR_ERR(new_table);
rcu_assign_pointer(port_table, new_table);
- tbl_deferred_destroy(old_table, NULL);
+ tbl_deferred_destroy(cur_table, NULL);
}
- err = tbl_insert(port_table, &tnl_vport->tbl_node, mutable_hash(tnl_vport->mutable));
+ err = tbl_insert(rtnl_dereference(port_table), &tnl_vport->tbl_node,
+ mutable_hash(rtnl_dereference(tnl_vport->mutable)));
if (err) {
- (*find_port_pool(tnl_vport->mutable))--;
check_table_empty();
return err;
}
- (*find_port_pool(tnl_vport->mutable))++;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
return 0;
}
static int move_port(struct vport *vport, struct tnl_mutable_config *new_mutable)
{
int err;
+ struct tbl *cur_table = rtnl_dereference(port_table);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
u32 hash;
* finding tunnels or the possibility of failure. However, if we do
* find a tunnel it will always be consistent.
*/
- err = tbl_remove(port_table, &tnl_vport->tbl_node);
+ err = tbl_remove(cur_table, &tnl_vport->tbl_node);
if (err)
return err;
- err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
+ err = tbl_insert(cur_table, &tnl_vport->tbl_node, hash);
if (err) {
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
check_table_empty();
return err;
}
table_updated:
- (*find_port_pool(tnl_vport->mutable))--;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
assign_config_rcu(vport, new_mutable);
- (*find_port_pool(tnl_vport->mutable))++;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))++;
return 0;
}
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
int err;
- err = tbl_remove(port_table, &tnl_vport->tbl_node);
+ err = tbl_remove(rtnl_dereference(port_table), &tnl_vport->tbl_node);
if (err)
return err;
check_table_empty();
- (*find_port_pool(tnl_vport->mutable))--;
+ (*find_port_pool(rtnl_dereference(tnl_vport->mutable)))--;
return 0;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
+struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
int tunnel_type,
const struct tnl_mutable_config **mutable)
{
struct port_lookup_key lookup;
- struct tbl *table = rcu_dereference(port_table);
+ struct tbl *table = rcu_dereference_rtnl(port_table);
struct tbl_node *tbl_node;
if (unlikely(!table))
#endif /* IPv6 */
bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
if (!spin_trylock_bh(&tnl_vport->cache_lock))
return NULL;
- cache = tnl_vport->cache;
+ cache = cache_dereference(tnl_vport);
if (check_cache_valid(cache, mutable))
goto unlock;
else
#endif
if (is_internal_dev(rt_dst(rt).dev)) {
- struct odp_flow_key flow_key;
+ struct sw_flow_key flow_key;
struct tbl_node *flow_node;
- struct vport *vport;
+ struct vport *dst_vport;
struct sk_buff *skb;
bool is_frag;
int err;
- vport = internal_dev_get_vport(rt_dst(rt).dev);
- if (!vport)
+ dst_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ if (!dst_vport)
goto done;
skb = alloc_skb(cache->len, GFP_ATOMIC);
__skb_put(skb, cache->len);
memcpy(skb->data, get_cached_header(cache), cache->len);
- err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
+ err = flow_extract(skb, dst_vport->port_no, &flow_key, &is_frag);
kfree_skb(skb);
if (err || is_frag)
goto done;
- flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
+ flow_node = tbl_lookup(rcu_dereference(dst_vport->dp->table),
&flow_key, flow_hash(&flow_key),
flow_cmp);
if (flow_node) {
*/
if (skb_headroom(skb) < min_headroom) {
skb = check_headroom(skb, min_headroom);
- if (unlikely(IS_ERR(skb))) {
+ if (IS_ERR(skb)) {
err = PTR_ERR(skb);
goto error;
}
nskb = skb_gso_segment(skb, 0);
kfree_skb(skb);
- if (unlikely(IS_ERR(nskb))) {
+ if (IS_ERR(nskb)) {
err = PTR_ERR(nskb);
goto error;
}
skb = nskb;
} else {
skb = check_headroom(skb, min_headroom);
- if (unlikely(IS_ERR(skb))) {
+ if (IS_ERR(skb)) {
err = PTR_ERR(skb);
goto error;
}
/* Offloading */
skb = handle_offloads(skb, mutable, rt);
- if (unlikely(IS_ERR(skb)))
+ if (IS_ERR(skb))
goto error;
/* MTU */
struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
skb->protocol = htons(ETH_P_IP);
+ iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - skb_network_offset(skb));
ip_send_check(iph);
vport_receive(cache_vport, skb);
sent_len += orig_len;
} else {
- int err;
+ int xmit_err;
skb->dev = rt_dst(rt).dev;
- err = dev_queue_xmit(skb);
+ xmit_err = dev_queue_xmit(skb);
- if (likely(net_xmit_eval(err) == 0))
+ if (likely(net_xmit_eval(xmit_err) == 0))
sent_len += orig_len;
}
} else
return sent_len;
}
-static int set_config(const void *config, const struct tnl_ops *tnl_ops,
- const struct vport *cur_vport,
- struct tnl_mutable_config *mutable)
+static int tnl_set_config(const void *config, const struct tnl_ops *tnl_ops,
+ const struct vport *cur_vport,
+ struct tnl_mutable_config *mutable)
{
const struct vport *old_vport;
const struct tnl_mutable_config *old_mutable;
{
struct vport *vport;
struct tnl_vport *tnl_vport;
+ struct tnl_mutable_config *mutable;
int initial_frag_id;
int err;
strcpy(tnl_vport->name, parms->name);
tnl_vport->tnl_ops = tnl_ops;
- tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
- if (!tnl_vport->mutable) {
+ mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ if (!mutable) {
err = -ENOMEM;
goto error_free_vport;
}
- vport_gen_rand_ether_addr(tnl_vport->mutable->eth_addr);
- tnl_vport->mutable->mtu = ETH_DATA_LEN;
+ vport_gen_rand_ether_addr(mutable->eth_addr);
+ mutable->mtu = ETH_DATA_LEN;
get_random_bytes(&initial_frag_id, sizeof(int));
atomic_set(&tnl_vport->frag_id, initial_frag_id);
- err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
+ err = tnl_set_config(parms->config, tnl_ops, NULL, mutable);
if (err)
goto error_free_mutable;
#ifdef NEED_CACHE_TIMEOUT
tnl_vport->cache_exp_interval = MAX_CACHE_EXP -
- (net_random() % (MAX_CACHE_EXP / 2));
+ (net_random() % (MAX_CACHE_EXP / 2));
#endif
+ rcu_assign_pointer(tnl_vport->mutable, mutable);
+
err = add_port(vport);
if (err)
goto error_free_mutable;
return vport;
error_free_mutable:
- kfree(tnl_vport->mutable);
+ kfree(mutable);
error_free_vport:
vport_free(vport);
error:
struct tnl_mutable_config *mutable;
int err;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
+ sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable) {
err = -ENOMEM;
goto error;
}
- err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
+ err = tnl_set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
if (err)
goto error_free;
static void free_port_rcu(struct rcu_head *rcu)
{
- struct tnl_vport *tnl_vport = container_of(rcu, struct tnl_vport, rcu);
-
- spin_lock_bh(&tnl_vport->cache_lock);
- free_cache(tnl_vport->cache);
- spin_unlock_bh(&tnl_vport->cache_lock);
+ struct tnl_vport *tnl_vport = container_of(rcu,
+ struct tnl_vport, rcu);
- kfree(tnl_vport->mutable);
+ free_cache((struct tnl_cache __force *)tnl_vport->cache);
+ kfree((struct tnl_mutable __force *)tnl_vport->mutable);
vport_free(tnl_vport_to_vport(tnl_vport));
}
int tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- const struct tnl_mutable_config *old_mutable;
+ const struct tnl_mutable_config *mutable, *old_mutable;
+
+ mutable = rtnl_dereference(tnl_vport->mutable);
- if (vport == tnl_find_port(tnl_vport->mutable->port_config.saddr,
- tnl_vport->mutable->port_config.daddr,
- tnl_vport->mutable->port_config.in_key,
- tnl_vport->mutable->tunnel_type,
- &old_mutable))
+ if (vport == tnl_find_port(mutable->port_config.saddr,
+ mutable->port_config.daddr, mutable->port_config.in_key,
+ mutable->tunnel_type, &old_mutable))
del_port(vport);
call_rcu(&tnl_vport->rcu, free_port_rcu);
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
+ sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable)
return -ENOMEM;
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
- mutable = kmemdup(tnl_vport->mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
+ sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable)
return -ENOMEM;
const unsigned char *tnl_get_addr(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- return rcu_dereference(tnl_vport->mutable)->eth_addr;
+ return rcu_dereference_rtnl(tnl_vport->mutable)->eth_addr;
+}
+
+void tnl_get_config(const struct vport *vport, void *config)
+{
+ const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
+ struct tnl_port_config *port_config;
+
+ port_config = &rcu_dereference_rtnl(tnl_vport->mutable)->port_config;
+ memcpy(config, port_config, sizeof(*port_config));
}
int tnl_get_mtu(const struct vport *vport)
{
const struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- return rcu_dereference(tnl_vport->mutable)->mtu;
+ return rcu_dereference_rtnl(tnl_vport->mutable)->mtu;
}
void tnl_free_linked_skbs(struct sk_buff *skb)