X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Ftcp.c;h=14d0bc3da84b98420614a865cb3c061a455ecbad;hb=5d0dd51ddb446e7c058023420f5b7d4404501980;hp=c99f5ceb98198a4f82a0d0aa34b98686cec29b2a;hpb=a91482bdcc2e0f6035702e46f1b99043a0893346;p=linux-2.6.git diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c99f5ceb9..14d0bc3da 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -281,7 +281,6 @@ kmem_cache_t *tcp_timewait_cachep; atomic_t tcp_orphan_count = ATOMIC_INIT(0); int sysctl_tcp_default_win_scale = 7; - int sysctl_tcp_mem[3]; int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; @@ -309,7 +308,7 @@ EXPORT_SYMBOL(tcp_memory_pressure); void tcp_enter_memory_pressure(void) { if (!tcp_memory_pressure) { - NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES); + NET_INC_STATS(TCPMemoryPressures); tcp_memory_pressure = 1; } } @@ -1097,7 +1096,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo, * calculation of whether or not we must ACK for the sake of * a window update. */ -void cleanup_rbuf(struct sock *sk, int copied) +static void cleanup_rbuf(struct sock *sk, int copied) { struct tcp_opt *tp = tcp_sk(sk); int time_to_ack = 0; @@ -1156,7 +1155,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_opt *tp = tcp_sk(sk); - NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); + NET_ADD_STATS_USER(TCPPrequeued, skb_queue_len(&tp->ucopy.prequeue)); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1439,7 +1438,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, /* __ Restore normal policy in scheduler __ */ if ((chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); + NET_ADD_STATS_USER(TCPDirectCopyFromBacklog, chunk); len -= chunk; copied += chunk; } @@ -1450,7 +1449,7 @@ do_prequeue: tcp_prequeue_process(sk); if ((chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); + NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk); len -= chunk; copied += chunk; } @@ -1535,7 +1534,7 @@ skip_copy: tcp_prequeue_process(sk); if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); + NET_ADD_STATS_USER(TCPDirectCopyFromPrequeue, chunk); len -= chunk; copied += chunk; } @@ -1706,13 +1705,13 @@ void tcp_close(struct sock *sk, long timeout) */ if (data_was_unread) { /* Unread data was tossed, zap the connection. */ - NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE); + NET_INC_STATS_USER(TCPAbortOnClose); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_KERNEL); } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); - NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA); + NET_INC_STATS_USER(TCPAbortOnData); } else if (tcp_close_state(sk)) { /* We FIN if the application ate all the data before * zapping the connection. @@ -1778,7 +1777,7 @@ adjudge_to_death: if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); + NET_INC_STATS_BH(TCPAbortOnLinger); } else { int tmo = tcp_fin_time(tp); @@ -1801,7 +1800,7 @@ adjudge_to_death: "sockets\n"); tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); - NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY); + NET_INC_STATS_BH(TCPAbortOnMemory); } } atomic_inc(&tcp_orphan_count); @@ -2496,4 +2495,3 @@ EXPORT_SYMBOL(tcp_setsockopt); EXPORT_SYMBOL(tcp_shutdown); EXPORT_SYMBOL(tcp_statistics); EXPORT_SYMBOL(tcp_timewait_cachep); -EXPORT_SYMBOL_GPL(cleanup_rbuf);