#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/if_vlan.h>
+#include <linux/igmp.h>
#include <linux/in.h>
#include <linux/in_route.h>
+#include <linux/inetdevice.h>
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/kernel.h>
#define rt_dst(rt) (rt->u.dst)
#endif
-static inline struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)
+static struct hh_cache *rt_hh(struct rtable *rt)
+{
+ struct neighbour *neigh = dst_get_neighbour(&rt->dst);
+ if (!neigh || !(neigh->nud_state & NUD_CONNECTED) ||
+ !neigh->hh.hh_len)
+ return NULL;
+ return &neigh->hh;
+}
+#else
+#define rt_hh(rt) (rt_dst(rt).hh)
+#endif
+
+static struct vport *tnl_vport_to_vport(const struct tnl_vport *tnl_vport)
{
return vport_from_priv(tnl_vport);
}
/* This is analogous to rtnl_dereference for the tunnel cache. It checks that
* cache_lock is held, so it is only for update side code.
*/
-static inline struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
+static struct tnl_cache *cache_dereference(struct tnl_vport *tnl_vport)
{
return rcu_dereference_protected(tnl_vport->cache,
- lockdep_is_held(&tnl_vport->cache_lock));
+ lockdep_is_held(&tnl_vport->cache_lock));
}
-static inline void schedule_cache_cleaner(void)
+static void schedule_cache_cleaner(void)
{
schedule_delayed_work(&cache_cleaner_wq, CACHE_CLEANER_INTERVAL);
}
free_cache(c);
}
+/* Frees the portion of 'mutable' that requires RTNL and thus can't happen
+ * within an RCU callback. Fortunately this part doesn't require waiting for
+ * an RCU grace period.
+ */
+static void free_mutable_rtnl(struct tnl_mutable_config *mutable)
+{
+ ASSERT_RTNL();
+ if (ipv4_is_multicast(mutable->key.daddr) && mutable->mlink) {
+ struct in_device *in_dev;
+ in_dev = inetdev_by_index(&init_net, mutable->mlink);
+ if (in_dev)
+ ip_mc_dec_group(in_dev, mutable->key.daddr);
+ }
+}
+
static void assign_config_rcu(struct vport *vport,
struct tnl_mutable_config *new_config)
{
old_config = rtnl_dereference(tnl_vport->mutable);
rcu_assign_pointer(tnl_vport->mutable, new_config);
+
+ free_mutable_rtnl(old_config);
call_rcu(&old_config->rcu, free_config_rcu);
}
static u32 port_hash(const struct port_lookup_key *key)
{
- return jhash2((u32*)key, (PORT_KEY_LEN / sizeof(u32)), 0);
+ return jhash2((u32 *)key, (PORT_KEY_LEN / sizeof(u32)), 0);
}
-static inline struct hlist_head *find_bucket(u32 hash)
+static struct hlist_head *find_bucket(u32 hash)
{
return &port_table[(hash & (PORT_TABLE_SIZE - 1))];
}
struct hlist_node *n;
struct hlist_head *bucket;
u32 hash = port_hash(key);
- struct tnl_vport * tnl_vport;
+ struct tnl_vport *tnl_vport;
bucket = find_bucket(hash);
struct port_lookup_key lookup;
struct vport *vport;
+ if (ipv4_is_multicast(saddr)) {
+ lookup.saddr = 0;
+ lookup.daddr = saddr;
+ if (key_remote_ports) {
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_EXACT;
+ lookup.in_key = key;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
+ if (remote_ports) {
+ lookup.tunnel_type = tunnel_type | TNL_T_KEY_MATCH;
+ lookup.in_key = 0;
+ vport = port_table_lookup(&lookup, mutable);
+ if (vport)
+ return vport;
+ }
+ return NULL;
+ }
+
lookup.saddr = saddr;
lookup.daddr = daddr;
}
#endif /* IPv6 */
-bool tnl_frag_needed(struct vport *vport, const struct tnl_mutable_config *mutable,
+bool tnl_frag_needed(struct vport *vport,
+ const struct tnl_mutable_config *mutable,
struct sk_buff *skb, unsigned int mtu, __be64 flow_key)
{
unsigned int eth_hdr_len = ETH_HLEN;
tnl_vport->tnl_ops->build_header(vport, mutable, iph + 1);
}
-static inline void *get_cached_header(const struct tnl_cache *cache)
+static void *get_cached_header(const struct tnl_cache *cache)
{
return (void *)cache + ALIGN(sizeof(struct tnl_cache), CACHE_DATA_ALIGN);
}
-static inline bool check_cache_valid(const struct tnl_cache *cache,
- const struct tnl_mutable_config *mutable)
+static bool check_cache_valid(const struct tnl_cache *cache,
+ const struct tnl_mutable_config *mutable)
{
struct hh_cache *hh;
if (!cache)
return false;
- hh = rt_dst(cache->rt).hh;
+ hh = rt_hh(cache->rt);
return hh &&
#ifdef NEED_CACHE_TIMEOUT
time_before(jiffies, cache->expiration) &&
rcu_read_unlock();
}
-static inline void create_eth_hdr(struct tnl_cache *cache,
- struct hh_cache *hh)
+static void create_eth_hdr(struct tnl_cache *cache, struct hh_cache *hh)
{
void *cache_data = get_cached_header(cache);
int hh_off;
* support hard header caching just fall back to the IP stack.
*/
- hh = rt_dst(rt).hh;
+ hh = rt_hh(rt);
if (!hh)
return NULL;
struct sw_flow_key flow_key;
struct vport *dst_vport;
struct sk_buff *skb;
- bool is_frag;
int err;
int flow_key_len;
struct sw_flow *flow;
memcpy(skb->data, get_cached_header(cache), cache->len);
err = flow_extract(skb, dst_vport->port_no, &flow_key,
- &flow_key_len, &is_frag);
+ &flow_key_len);
consume_skb(skb);
- if (err || is_frag)
+ if (err)
goto done;
flow = flow_tbl_lookup(rcu_dereference(dst_vport->dp->table),
return cache;
}
+static struct rtable *__find_route(const struct tnl_mutable_config *mutable,
+ u8 ipproto, u8 tos)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
+ struct flowi fl = { .nl_u = { .ip4_u = {
+ .daddr = mutable->key.daddr,
+ .saddr = mutable->key.saddr,
+ .tos = tos } },
+ .proto = ipproto };
+ struct rtable *rt;
+
+ if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
+ return ERR_PTR(-EADDRNOTAVAIL);
+
+ return rt;
+#else
+ struct flowi4 fl = { .daddr = mutable->key.daddr,
+ .saddr = mutable->key.saddr,
+ .flowi4_tos = tos,
+ .flowi4_proto = ipproto };
+
+ return ip_route_output_key(&init_net, &fl);
+#endif
+}
+
static struct rtable *find_route(struct vport *vport,
const struct tnl_mutable_config *mutable,
u8 tos, struct tnl_cache **cache)
*cache = NULL;
tos = RT_TOS(tos);
- if (likely(tos == mutable->tos && check_cache_valid(cur_cache, mutable))) {
+ if (likely(tos == mutable->tos &&
+ check_cache_valid(cur_cache, mutable))) {
*cache = cur_cache;
return cur_cache->rt;
} else {
struct rtable *rt;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = mutable->key.daddr,
- .saddr = mutable->key.saddr,
- .tos = tos } },
- .proto = tnl_vport->tnl_ops->ipproto };
- if (unlikely(ip_route_output_key(&init_net, &rt, &fl)))
- return NULL;
-#else
- struct flowi4 fl = { .daddr = mutable->key.daddr,
- .saddr = mutable->key.saddr,
- .flowi4_tos = tos,
- .flowi4_proto = tnl_vport->tnl_ops->ipproto };
-
- rt = ip_route_output_key(&init_net, &fl);
+ rt = __find_route(mutable, tnl_vport->tnl_ops->ipproto, tos);
if (IS_ERR(rt))
return NULL;
-#endif
if (likely(tos == mutable->tos))
*cache = build_cache(vport, mutable, rt);
}
}
-static inline bool need_linearize(const struct sk_buff *skb)
+static bool need_linearize(const struct sk_buff *skb)
{
int i;
iph->frag_off = frag_off;
ip_select_ident(iph, &rt_dst(rt), NULL);
- skb = tnl_vport->tnl_ops->update_header(vport, mutable, &rt_dst(rt), skb);
+ skb = tnl_vport->tnl_ops->update_header(vport, mutable,
+ &rt_dst(rt), skb);
if (unlikely(!skb))
goto next;
if (likely(cache)) {
int orig_len = skb->len - cache->len;
- struct vport *cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
+ struct vport *cache_vport;
+ cache_vport = internal_dev_get_vport(rt_dst(rt).dev);
skb->protocol = htons(ETH_P_IP);
iph = ip_hdr(skb);
iph->tot_len = htons(skb->len - skb_network_offset(skb));
[OVS_TUNNEL_ATTR_TTL] = { .type = NLA_U8 },
};
-/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be zeroed. */
+/* Sets OVS_TUNNEL_ATTR_* fields in 'mutable', which must initially be
+ * zeroed. */
static int tnl_set_config(struct nlattr *options, const struct tnl_ops *tnl_ops,
const struct vport *cur_vport,
struct tnl_mutable_config *mutable)
mutable->flags = nla_get_u32(a[OVS_TUNNEL_ATTR_FLAGS]) & TNL_F_PUBLIC;
- if (a[OVS_TUNNEL_ATTR_SRC_IPV4])
- mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
mutable->key.daddr = nla_get_be32(a[OVS_TUNNEL_ATTR_DST_IPV4]);
+ if (a[OVS_TUNNEL_ATTR_SRC_IPV4]) {
+ if (ipv4_is_multicast(mutable->key.daddr))
+ return -EINVAL;
+ mutable->key.saddr = nla_get_be32(a[OVS_TUNNEL_ATTR_SRC_IPV4]);
+ }
if (a[OVS_TUNNEL_ATTR_TOS]) {
mutable->tos = nla_get_u8(a[OVS_TUNNEL_ATTR_TOS]);
if (old_vport && old_vport != cur_vport)
return -EEXIST;
+ mutable->mlink = 0;
+ if (ipv4_is_multicast(mutable->key.daddr)) {
+ struct net_device *dev;
+ struct rtable *rt;
+
+ rt = __find_route(mutable, tnl_ops->ipproto, mutable->tos);
+ if (IS_ERR(rt))
+ return -EADDRNOTAVAIL;
+ dev = rt_dst(rt).dev;
+ ip_rt_put(rt);
+ if (__in_dev_get_rtnl(dev) == NULL)
+ return -EADDRNOTAVAIL;
+ mutable->mlink = dev->ifindex;
+ ip_mc_inc_group(__in_dev_get_rtnl(dev), mutable->key.daddr);
+ }
+
return 0;
}
return vport;
error_free_mutable:
+ free_mutable_rtnl(mutable);
kfree(mutable);
error_free_vport:
vport_free(vport);
return 0;
error_free:
+ free_mutable_rtnl(mutable);
kfree(mutable);
error:
return err;
void tnl_destroy(struct vport *vport)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- const struct tnl_mutable_config *mutable;
+ struct tnl_mutable_config *mutable;
mutable = rtnl_dereference(tnl_vport->mutable);
port_table_remove_port(vport);
+ free_mutable_rtnl(mutable);
call_rcu(&tnl_vport->rcu, free_port_rcu);
}
int tnl_set_addr(struct vport *vport, const unsigned char *addr)
{
struct tnl_vport *tnl_vport = tnl_vport_priv(vport);
- struct tnl_mutable_config *mutable;
+ struct tnl_mutable_config *old_mutable, *mutable;
- mutable = kmemdup(rtnl_dereference(tnl_vport->mutable),
- sizeof(struct tnl_mutable_config), GFP_KERNEL);
+ old_mutable = rtnl_dereference(tnl_vport->mutable);
+ mutable = kmemdup(old_mutable, sizeof(struct tnl_mutable_config), GFP_KERNEL);
if (!mutable)
return -ENOMEM;
+ old_mutable->mlink = 0;
+
memcpy(mutable->eth_addr, addr, ETH_ALEN);
assign_config_rcu(vport, mutable);
int i;
for (i = 0; i < PORT_TABLE_SIZE; i++) {
- struct tnl_vport * tnl_vport;
+ struct tnl_vport *tnl_vport;
struct hlist_head *hash_head;
struct hlist_node *n;