We currently document that BHs need to be disabled when handling
received packets. However, this isn't actually generally the
case (usually preemption is disabled but not BHs). Only one place
actually relies on BHs being disabled so fix that and update the
documentation of our expectations.
-/* Must be called with rcu_read_lock and with bottom-halves disabled. */
+/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
void dp_process_received_packet(struct dp_port *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
struct odp_flow_key key;
struct tbl_node *flow_node;
struct odp_flow_key key;
struct tbl_node *flow_node;
OVS_CB(skb)->dp_port = p;
OVS_CB(skb)->dp_port = p;
- /* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
- stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
-
if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
if (dp->drop_frags) {
kfree_skb(skb);
if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
if (dp->drop_frags) {
kfree_skb(skb);
- stats->n_frags++;
- return;
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ goto out;
flow_used(flow, skb);
execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
GFP_ATOMIC);
flow_used(flow, skb);
execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
GFP_ATOMIC);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
}
dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
}
+
+out:
+ local_bh_disable();
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
+ (*(u64 *)((u8 *)stats + stats_counter_off))++;
+ local_bh_enable();
}
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
}
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
-/* Called with rcu_read_lock and bottom-halves disabled. */
+/* Called with rcu_read_lock. */
static void
gre_err(struct sk_buff *skb, u32 info)
{
static void
gre_err(struct sk_buff *skb, u32 info)
{
skb->protocol = htons(ETH_P_IP);
}
skb->protocol = htons(ETH_P_IP);
}
-/* Called with rcu_read_lock and bottom-halves disabled. */
+/* Called with rcu_read_lock. */
static int
gre_rcv(struct sk_buff *skb)
{
static int
gre_rcv(struct sk_buff *skb)
{
* @vport: vport that received the packet
* @skb: skb that was received
*
* @vport: vport that received the packet
* @skb: skb that was received
*
- * Must be called with rcu_read_lock and bottom halves disabled. The packet
- * cannot be shared and skb->data should point to the Ethernet header. The
- * caller must have already called compute_ip_summed() to initialize the
- * checksumming fields.
+ * Must be called with rcu_read_lock. The packet cannot be shared and
+ * skb->data should point to the Ethernet header. The caller must have already
+ * called compute_ip_summed() to initialize the checksumming fields.
*/
void
vport_receive(struct vport *vport, struct sk_buff *skb)
*/
void
vport_receive(struct vport *vport, struct sk_buff *skb)