#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
+#include <linux/smp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
return cur_ms - idle_ms;
}
-#define TCP_FLAGS_OFFSET 13
-#define TCP_FLAG_MASK 0x3f
+#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
-void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
{
- u8 tcp_flags = 0;
+ struct sw_flow_stats *stats = &flow->stats[smp_processor_id()];
+ __be16 tcp_flags = 0;
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
- u8 *tcp = (u8 *)tcp_hdr(skb);
- tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
+ tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
}
- spin_lock(&flow->lock);
- flow->used = jiffies;
- flow->packet_count++;
- flow->byte_count += skb->len;
- flow->tcp_flags |= tcp_flags;
- spin_unlock(&flow->lock);
+ spin_lock(&stats->lock);
+ stats->used = jiffies;
+ stats->packet_count++;
+ stats->byte_count += skb->len;
+ stats->tcp_flags |= tcp_flags;
+ spin_unlock(&stats->lock);
+}
+
+void ovs_flow_stats_get(struct sw_flow *flow, struct sw_flow_stats *res)
+{
+ int cpu, cur_cpu;
+
+ memset(res, 0, sizeof(*res));
+
+ cur_cpu = get_cpu();
+ for_each_possible_cpu(cpu) {
+ struct sw_flow_stats *stats = &flow->stats[cpu];
+
+ if (cpu == cur_cpu)
+ local_bh_disable();
+
+ spin_lock(&stats->lock);
+ if (time_after(stats->used, res->used))
+ res->used = stats->used;
+ res->packet_count += stats->packet_count;
+ res->byte_count += stats->byte_count;
+ res->tcp_flags |= stats->tcp_flags;
+ spin_unlock(&stats->lock);
+
+ if (cpu == cur_cpu)
+ local_bh_enable();
+
+ }
+ put_cpu();
+}
+
+void ovs_flow_stats_clear(struct sw_flow *flow)
+{
+ int cpu, cur_cpu;
+
+ cur_cpu = get_cpu();
+ for_each_possible_cpu(cpu) {
+ struct sw_flow_stats *stats = &flow->stats[cpu];
+
+ if (cpu == cur_cpu)
+ local_bh_disable();
+
+ spin_lock(&stats->lock);
+ stats->used = 0;
+ stats->packet_count = 0;
+ stats->byte_count = 0;
+ stats->tcp_flags = 0;
+ spin_unlock(&stats->lock);
+
+ if (cpu == cur_cpu)
+ local_bh_enable();
+ }
+ put_cpu();
}
static int check_header(struct sk_buff *skb, int len)
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv4.tp.src = tcp->source;
key->ipv4.tp.dst = tcp->dest;
+ key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp);
}
} else if (key->ip.proto == IPPROTO_UDP) {
if (udphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv6.tp.src = tcp->source;
key->ipv6.tp.dst = tcp->dest;
+ key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp);
}
} else if (key->ip.proto == NEXTHDR_UDP) {
if (udphdr_ok(skb)) {