struct flow_stats *stats;
__be16 tcp_flags = 0;
- if (!flow->stats.is_percpu)
- stats = flow->stats.stat;
- else
- stats = this_cpu_ptr(flow->stats.cpu_stats);
+ stats = this_cpu_ptr(flow->stats);
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
+ flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
spin_unlock(&stats->lock);
}
-static void stats_read(struct flow_stats *stats,
+static void stats_read(struct flow_stats *stats, bool lock_bh,
struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
- spin_lock(&stats->lock);
+ if (lock_bh)
+ spin_lock_bh(&stats->lock);
+ else
+ spin_lock(&stats->lock);
+
if (time_after(stats->used, *used))
*used = stats->used;
*tcp_flags |= stats->tcp_flags;
ovs_stats->n_packets += stats->packet_count;
ovs_stats->n_bytes += stats->byte_count;
- spin_unlock(&stats->lock);
+
+ if (lock_bh)
+ spin_unlock_bh(&stats->lock);
+ else
+ spin_unlock(&stats->lock);
}
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
- if (!flow->stats.is_percpu) {
- stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
- } else {
- cur_cpu = get_cpu();
- for_each_possible_cpu(cpu) {
- struct flow_stats *stats;
+ cur_cpu = get_cpu();
- if (cpu == cur_cpu)
- local_bh_disable();
+ for_each_possible_cpu(cpu) {
+ struct flow_stats *stats;
+ bool lock_bh;
- stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
- stats_read(stats, ovs_stats, used, tcp_flags);
-
- if (cpu == cur_cpu)
- local_bh_enable();
- }
- put_cpu();
+ stats = per_cpu_ptr(flow->stats, cpu);
+ lock_bh = (cpu == cur_cpu);
+ stats_read(stats, lock_bh, ovs_stats, used, tcp_flags);
}
+ put_cpu();
}
-static void stats_reset(struct flow_stats *stats)
+static void stats_reset(struct flow_stats *stats, bool lock_bh)
{
- spin_lock(&stats->lock);
+ if (lock_bh)
+ spin_lock_bh(&stats->lock);
+ else
+ spin_lock(&stats->lock);
+
stats->used = 0;
stats->packet_count = 0;
stats->byte_count = 0;
stats->tcp_flags = 0;
- spin_unlock(&stats->lock);
+
+ if (lock_bh)
+ spin_unlock_bh(&stats->lock);
+ else
+ spin_unlock(&stats->lock);
}
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu, cur_cpu;
- if (!flow->stats.is_percpu) {
- stats_reset(flow->stats.stat);
- } else {
- cur_cpu = get_cpu();
-
- for_each_possible_cpu(cpu) {
+ cur_cpu = get_cpu();
- if (cpu == cur_cpu)
- local_bh_disable();
+ for_each_possible_cpu(cpu) {
+ bool lock_bh;
- stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
-
- if (cpu == cur_cpu)
- local_bh_enable();
- }
- put_cpu();
+ lock_bh = (cpu == cur_cpu);
+ stats_reset(per_cpu_ptr(flow->stats, cpu), lock_bh);
}
+ put_cpu();
}
static int check_header(struct sk_buff *skb, int len)