#include <net/xfrm.h>
#include "actions.h"
+#include "checksum.h"
#include "datapath.h"
#include "table.h"
#include "tunnel.h"
#define CACHE_DATA_ALIGN 16
/* Protected by RCU. */
-static struct tbl *port_table;
+static struct tbl *port_table __read_mostly;
static void cache_cleaner(struct work_struct *work);
-DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
+static DECLARE_DELAYED_WORK(cache_cleaner_wq, cache_cleaner);
/*
* These are just used as an optimization: they don't require any kind of
* synchronization because we could have just as easily read the value before
* the port change happened.
*/
-static unsigned int key_local_remote_ports;
-static unsigned int key_remote_ports;
-static unsigned int local_remote_ports;
-static unsigned int remote_ports;
+static unsigned int key_local_remote_ports __read_mostly;
+static unsigned int key_remote_ports __read_mostly;
+static unsigned int local_remote_ports __read_mostly;
+static unsigned int remote_ports __read_mostly;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
#define rt_dst(rt) (rt->dst)
}
}
-enum lookup_key {
- LOOKUP_TUNNEL_TYPE = 0,
- LOOKUP_SADDR = 1,
- LOOKUP_DADDR = 2,
- LOOKUP_KEY = 3,
-};
-
struct port_lookup_key {
- u32 vals[4]; /* Contains enum lookup_key keys. */
const struct tnl_mutable_config *mutable;
+ __be64 key;
+ u32 tunnel_type;
+ __be32 saddr;
+ __be32 daddr;
};
/*
lookup->mutable = rcu_dereference(tnl_vport->mutable);
- return (lookup->mutable->tunnel_type == lookup->vals[LOOKUP_TUNNEL_TYPE]) &&
- lookup->mutable->port_config.daddr == lookup->vals[LOOKUP_DADDR] &&
- lookup->mutable->port_config.in_key == lookup->vals[LOOKUP_KEY] &&
- lookup->mutable->port_config.saddr == lookup->vals[LOOKUP_SADDR];
+ return (lookup->mutable->tunnel_type == lookup->tunnel_type &&
+ lookup->mutable->port_config.daddr == lookup->daddr &&
+ lookup->mutable->port_config.in_key == lookup->key &&
+ lookup->mutable->port_config.saddr == lookup->saddr);
}
-static u32 port_hash(struct port_lookup_key *lookup)
+static u32 port_hash(struct port_lookup_key *k)
{
- return jhash2(lookup->vals, ARRAY_SIZE(lookup->vals), 0);
+ u32 x = jhash_3words(k->saddr, k->daddr, k->tunnel_type, 0);
+ return jhash_2words(k->key >> 32, k->key, x);
}
static u32 mutable_hash(const struct tnl_mutable_config *mutable)
{
struct port_lookup_key lookup;
- lookup.vals[LOOKUP_SADDR] = mutable->port_config.saddr;
- lookup.vals[LOOKUP_DADDR] = mutable->port_config.daddr;
- lookup.vals[LOOKUP_KEY] = mutable->port_config.in_key;
- lookup.vals[LOOKUP_TUNNEL_TYPE] = mutable->tunnel_type;
+ lookup.saddr = mutable->port_config.saddr;
+ lookup.daddr = mutable->port_config.daddr;
+ lookup.key = mutable->port_config.in_key;
+ lookup.tunnel_type = mutable->tunnel_type;
return port_hash(&lookup);
}
err = tbl_insert(port_table, &tnl_vport->tbl_node, hash);
if (err) {
+ (*find_port_pool(tnl_vport->mutable))--;
check_table_empty();
return err;
}
table_updated:
+ (*find_port_pool(tnl_vport->mutable))--;
assign_config_rcu(vport, new_mutable);
+ (*find_port_pool(tnl_vport->mutable))++;
return 0;
}
return 0;
}
-struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be32 key,
+struct vport *tnl_find_port(__be32 saddr, __be32 daddr, __be64 key,
int tunnel_type,
const struct tnl_mutable_config **mutable)
{
if (unlikely(!table))
return NULL;
- lookup.vals[LOOKUP_SADDR] = saddr;
- lookup.vals[LOOKUP_DADDR] = daddr;
+ lookup.saddr = saddr;
+ lookup.daddr = daddr;
if (tunnel_type & TNL_T_KEY_EXACT) {
- lookup.vals[LOOKUP_KEY] = key;
- lookup.vals[LOOKUP_TUNNEL_TYPE] = tunnel_type & ~TNL_T_KEY_MATCH;
+ lookup.key = key;
+ lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_MATCH;
if (key_local_remote_ports) {
tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
}
if (key_remote_ports) {
- lookup.vals[LOOKUP_SADDR] = 0;
+ lookup.saddr = 0;
tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
if (tbl_node)
goto found;
- lookup.vals[LOOKUP_SADDR] = saddr;
+ lookup.saddr = saddr;
}
}
if (tunnel_type & TNL_T_KEY_MATCH) {
- lookup.vals[LOOKUP_KEY] = 0;
- lookup.vals[LOOKUP_TUNNEL_TYPE] = tunnel_type & ~TNL_T_KEY_EXACT;
+ lookup.key = 0;
+ lookup.tunnel_type = tunnel_type & ~TNL_T_KEY_EXACT;
if (local_remote_ports) {
tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
}
if (remote_ports) {
- lookup.vals[LOOKUP_SADDR] = 0;
+ lookup.saddr = 0;
tbl_node = tbl_lookup(table, &lookup, port_hash(&lookup), port_cmp);
if (tbl_node)
static inline void ecn_decapsulate(struct sk_buff *skb)
{
- u8 tos = ip_hdr(skb)->tos;
-
- if (INET_ECN_is_ce(tos)) {
+ /* This is accessing the outer IP header of the tunnel, which we've
+ * already validated to be OK. skb->data is currently set to the start
+ * of the inner Ethernet header, and we've validated ETH_HLEN.
+ */
+ if (unlikely(INET_ECN_is_ce(ip_hdr(skb)->tos))) {
__be16 protocol = skb->protocol;
- unsigned int nw_header = skb_network_offset(skb);
+
+ skb_set_network_header(skb, ETH_HLEN);
if (skb->protocol == htons(ETH_P_8021Q)) {
if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
return;
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
- nw_header += VLAN_HLEN;
+ skb_set_network_header(skb, VLAN_ETH_HLEN);
}
if (protocol == htons(ETH_P_IP)) {
- if (unlikely(!pskb_may_pull(skb, nw_header
+ if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
+ sizeof(struct iphdr))))
return;
- IP_ECN_set_ce((struct iphdr *)(skb->data + nw_header));
+ IP_ECN_set_ce(ip_hdr(skb));
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (protocol == htons(ETH_P_IPV6)) {
- if (unlikely(!pskb_may_pull(skb, nw_header
+ if (unlikely(!pskb_may_pull(skb, skb_network_offset(skb)
+ sizeof(struct ipv6hdr))))
return;
- IP6_ECN_set_ce((struct ipv6hdr *)(skb->data + nw_header));
+ IP6_ECN_set_ce(ipv6_hdr(skb));
}
#endif
}
/* Called with rcu_read_lock. */
void tnl_rcv(struct vport *vport, struct sk_buff *skb)
{
- skb->pkt_type = PACKET_HOST;
- skb->protocol = eth_type_trans(skb, skb->dev);
+ /* Packets received by this function are in the following state:
+ * - skb->data points to the inner Ethernet header.
+ * - The inner Ethernet header is in the linear data area.
+ * - skb->csum does not include the inner Ethernet header.
+ * - The layer pointers point at the outer headers.
+ */
+
+ struct ethhdr *eh = (struct ethhdr *)skb->data;
+
+ if (likely(ntohs(eh->h_proto) >= 1536))
+ skb->protocol = eh->h_proto;
+ else
+ skb->protocol = htons(ETH_P_802_2);
skb_dst_drop(skb);
nf_reset(skb);
secpath_reset(skb);
- skb_reset_network_header(skb);
ecn_decapsulate(skb);
-
- skb_push(skb, ETH_HLEN);
compute_ip_summed(skb, false);
vport_receive(vport, skb);
#endif /* IPv6 */
bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
- struct sk_buff *skb, unsigned int mtu, __be32 flow_key)
+ struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
unsigned int total_length = 0, header_length = 0, payload_length;
schedule_cache_cleaner();
rcu_read_lock();
- tbl_foreach(port_table, cache_cleaner_cb, NULL);
+ tbl_foreach(rcu_dereference(port_table), cache_cleaner_cb, NULL);
rcu_read_unlock();
}
#endif
if (is_internal_dev(rt_dst(rt).dev)) {
- int err;
+ struct odp_flow_key flow_key;
+ struct tbl_node *flow_node;
struct vport *vport;
- struct dp_port *dp_port;
struct sk_buff *skb;
bool is_frag;
- struct odp_flow_key flow_key;
- struct tbl_node *flow_node;
+ int err;
vport = internal_dev_get_vport(rt_dst(rt).dev);
if (!vport)
goto done;
- dp_port = vport_get_dp_port(vport);
- if (!dp_port)
- goto done;
-
skb = alloc_skb(cache->len, GFP_ATOMIC);
if (!skb)
goto done;
__skb_put(skb, cache->len);
memcpy(skb->data, get_cached_header(cache), cache->len);
- err = flow_extract(skb, dp_port->port_no, &flow_key, &is_frag);
+ err = flow_extract(skb, vport->port_no, &flow_key, &is_frag);
kfree_skb(skb);
if (err || is_frag)
goto done;
- flow_node = tbl_lookup(rcu_dereference(dp_port->dp->table),
+ flow_node = tbl_lookup(rcu_dereference(vport->dp->table),
&flow_key, flow_hash(&flow_key),
flow_cmp);
if (flow_node) {
int frag_len = skb->len - mutable->tunnel_hlen;
skb->next = NULL;
-
- memset(&IPCB(skb)->opt, 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags = 0;
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
err = ip_local_out(skb);
if (likely(net_xmit_eval(err) == 0))
return sent_len;
}
-static int set_config(const void __user *uconfig, const struct tnl_ops *tnl_ops,
+static int set_config(const void *config, const struct tnl_ops *tnl_ops,
const struct vport *cur_vport,
struct tnl_mutable_config *mutable)
{
const struct vport *old_vport;
const struct tnl_mutable_config *old_mutable;
- if (copy_from_user(&mutable->port_config, uconfig, sizeof(struct tnl_port_config)))
- return -EFAULT;
+ mutable->port_config = *(struct tnl_port_config *)config;
if (mutable->port_config.daddr == 0)
return -EINVAL;
return 0;
}
-struct vport *tnl_create(const char *name, const void __user *config,
+struct vport *tnl_create(const struct vport_parms *parms,
const struct vport_ops *vport_ops,
const struct tnl_ops *tnl_ops)
{
int initial_frag_id;
int err;
- vport = vport_alloc(sizeof(struct tnl_vport), vport_ops);
+ vport = vport_alloc(sizeof(struct tnl_vport), vport_ops, parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
goto error;
tnl_vport = tnl_vport_priv(vport);
- strcpy(tnl_vport->name, name);
+ strcpy(tnl_vport->name, parms->name);
tnl_vport->tnl_ops = tnl_ops;
tnl_vport->mutable = kzalloc(sizeof(struct tnl_mutable_config), GFP_KERNEL);
get_random_bytes(&initial_frag_id, sizeof(int));
atomic_set(&tnl_vport->frag_id, initial_frag_id);
- err = set_config(config, tnl_ops, NULL, tnl_vport->mutable);
+ err = set_config(parms->config, tnl_ops, NULL, tnl_vport->mutable);
if (err)
goto error_free_mutable;
return ERR_PTR(err);
}
-int tnl_modify(struct vport *vport, const void __user *config)
+int tnl_modify(struct vport *vport, struct odp_port *port)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
struct tnl_mutable_config *mutable;
goto error;
}
- err = set_config(config, tnl_vport->tnl_ops, vport, mutable);
+ err = set_config(port->config, tnl_vport->tnl_ops, vport, mutable);
if (err)
goto error_free;