sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnTimeout);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
}
/* Do not allow orphaned sockets to eat all our resources.
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
return 0;
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(DelayedACKLocked);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(TCPSchedulerFailed,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(DelayedACKs);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
}
TCP_CHECK_TIMER(sk);
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
if (tp->sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPSackRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPSackFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPRenoRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPRenoFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(TCPLossFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
} else {
- NET_INC_STATS_BH(TCPTimeouts);
+ NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
}
}