VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / net / ipv4 / tcp_timer.c
index cab2678..72a5a50 100644 (file)
@@ -83,7 +83,7 @@ static void tcp_write_err(struct sock *sk)
        sk->sk_error_report(sk);
 
        tcp_done(sk);
-       NET_INC_STATS_BH(TCPAbortOnTimeout);
+       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
 }
 
 /* Do not allow orphaned sockets to eat all our resources.
@@ -126,7 +126,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
                if (do_reset)
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                tcp_done(sk);
-               NET_INC_STATS_BH(TCPAbortOnMemory);
+               NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
                return 1;
        }
        return 0;
@@ -212,12 +212,12 @@ static void tcp_delack_timer(unsigned long data)
        if (sock_owned_by_user(sk)) {
                /* Try again later. */
                tp->ack.blocked = 1;
-               NET_INC_STATS_BH(DelayedACKLocked);
+               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
                sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
                goto out_unlock;
        }
 
-       tcp_mem_reclaim(sk);
+       sk_stream_mem_reclaim(sk);
 
        if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER))
                goto out;
@@ -231,8 +231,8 @@ static void tcp_delack_timer(unsigned long data)
        if (skb_queue_len(&tp->ucopy.prequeue)) {
                struct sk_buff *skb;
 
-               NET_ADD_STATS_BH(TCPSchedulerFailed,
-                                 skb_queue_len(&tp->ucopy.prequeue));
+               NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, 
+                                skb_queue_len(&tp->ucopy.prequeue));
 
                while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk->sk_backlog_rcv(sk, skb);
@@ -252,13 +252,13 @@ static void tcp_delack_timer(unsigned long data)
                        tp->ack.ato = TCP_ATO_MIN;
                }
                tcp_send_ack(sk);
-               NET_INC_STATS_BH(DelayedACKs);
+               NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
        }
        TCP_CHECK_TIMER(sk);
 
 out:
        if (tcp_memory_pressure)
-               tcp_mem_reclaim(sk);
+               sk_stream_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
        sock_put(sk);
@@ -269,7 +269,7 @@ static void tcp_probe_timer(struct sock *sk)
        struct tcp_opt *tp = tcp_sk(sk);
        int max_probes;
 
-       if (tp->packets_out || !tp->send_head) {
+       if (tp->packets_out || !sk->sk_send_head) {
                tp->probes_out = 0;
                return;
        }
@@ -353,19 +353,19 @@ static void tcp_retransmit_timer(struct sock *sk)
                if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
                        if (tp->sack_ok) {
                                if (tp->ca_state == TCP_CA_Recovery)
-                                       NET_INC_STATS_BH(TCPSackRecoveryFail);
+                                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
                                else
-                                       NET_INC_STATS_BH(TCPSackFailures);
+                                       NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
                        } else {
                                if (tp->ca_state == TCP_CA_Recovery)
-                                       NET_INC_STATS_BH(TCPRenoRecoveryFail);
+                                       NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
                                else
-                                       NET_INC_STATS_BH(TCPRenoFailures);
+                                       NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
                        }
                } else if (tp->ca_state == TCP_CA_Loss) {
-                       NET_INC_STATS_BH(TCPLossFailures);
+                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
                } else {
-                       NET_INC_STATS_BH(TCPTimeouts);
+                       NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
                }
        }
 
@@ -448,7 +448,7 @@ static void tcp_write_timer(unsigned long data)
        TCP_CHECK_TIMER(sk);
 
 out:
-       tcp_mem_reclaim(sk);
+       sk_stream_mem_reclaim(sk);
 out_unlock:
        bh_unlock_sock(sk);
        sock_put(sk);
@@ -606,7 +606,7 @@ static void tcp_keepalive_timer (unsigned long data)
        elapsed = keepalive_time_when(tp);
 
        /* It is alive without keepalive 8) */
-       if (tp->packets_out || tp->send_head)
+       if (tp->packets_out || sk->sk_send_head)
                goto resched;
 
        elapsed = tcp_time_stamp - tp->rcv_tstamp;
@@ -633,7 +633,7 @@ static void tcp_keepalive_timer (unsigned long data)
        }
 
        TCP_CHECK_TIMER(sk);
-       tcp_mem_reclaim(sk);
+       sk_stream_mem_reclaim(sk);
 
 resched:
        tcp_reset_keepalive_timer (sk, elapsed);