*/
#include <linux/version.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
}
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+ int poff = proto_ports_offset(ip_proto);
+
+ if (poff >= 0) {
+ __be32 *ports, _ports;
+
+ ports = skb_header_pointer(skb, thoff + poff,
+ sizeof(_ports), &_ports);
+ if (ports)
+ return *ports;
+ }
+
+ return 0;
+}
+
static bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
{
- int poff, nhoff = skb_network_offset(skb);
+ int nhoff = skb_network_offset(skb);
u8 ip_proto;
__be16 proto = skb->protocol;
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
}
case IPPROTO_IPIP:
goto again;
+ case IPPROTO_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
default:
break;
}
flow->ip_proto = ip_proto;
- poff = proto_ports_offset(ip_proto);
- if (poff >= 0) {
- __be32 *ports, _ports;
-
- nhoff += poff;
- ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
- if (ports)
- flow->ports = *ports;
- }
-
+ flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
flow->thoff = (u16) nhoff;
return true;
}
static u32 hashrnd __read_mostly;
+static __always_inline void __flow_hash_secret_init(void)
+{
+ net_get_random_once(&hashrnd, sizeof(hashrnd));
+}
-static void init_hashrnd(void)
+static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
{
- if (likely(hashrnd))
- return;
- get_random_bytes(&hashrnd, sizeof(hashrnd));
+ __flow_hash_secret_init();
+ return jhash_3words(a, b, c, hashrnd);
}
u32 __skb_get_rxhash(struct sk_buff *skb)
swap(keys.port16[0], keys.port16[1]);
}
- init_hashrnd();
-
- hash = jhash_3words((__force u32)keys.dst,
- (__force u32)keys.src,
- (__force u32)keys.ports, hashrnd);
+ hash = __flow_hash_3words((__force u32)keys.dst,
+ (__force u32)keys.src,
+ (__force u32)keys.ports);
if (!hash)
hash = 1;
#define __LINUX_NET_WRAPPER_H 1
#include_next <linux/net.h>
+#include <linux/atomic.h>
#ifndef net_ratelimited_function
#define net_ratelimited_function(function, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#endif
+#ifndef net_get_random_once
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ atomic_t *done_key);
+
+#define ___NET_RANDOM_STATIC_KEY_INIT ATOMIC_INIT(0)
+
+
+#define net_get_random_once(buf, nbytes) \
+({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static atomic_t ___done_key = \
+ ___NET_RANDOM_STATIC_KEY_INIT; \
+ if (!atomic_read(&___done_key)) \
+ ___ret = __net_get_random_once(buf, \
+ nbytes, \
+ &___done, \
+ &___done_key); \
+ ___ret; \
+})
+#endif
+
#endif
csum_unfold(*sum)));
}
#endif
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ atomic_t *done_key)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (*done) {
+ spin_unlock_irqrestore(&lock, flags);
+ return false;
+ }
+
+ get_random_bytes(buf, nbytes);
+ *done = true;
+ spin_unlock_irqrestore(&lock, flags);
+
+ atomic_set(done_key, 1);
+
+ return true;
+}