*
* Version: $Id: tcp_timer.c,v 1.88 2002/02/01 22:01:04 davem Exp $
*
- * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
static void tcp_delack_timer(unsigned long);
static void tcp_keepalive_timer (unsigned long data);
-const char timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
+#ifdef TCP_DEBUG
+const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
+EXPORT_SYMBOL(tcp_timer_bug_msg);
+#endif
/*
* Using different timers for retransmit, delayed acks and probes
void tcp_init_xmit_timers(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
init_timer(&tp->retransmit_timer);
tp->retransmit_timer.function=&tcp_write_timer;
void tcp_clear_xmit_timers(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
tp->pending = 0;
- if (timer_pending(&tp->retransmit_timer) &&
- del_timer(&tp->retransmit_timer))
- __sock_put(sk);
+ sk_stop_timer(sk, &tp->retransmit_timer);
tp->ack.pending = 0;
tp->ack.blocked = 0;
- if (timer_pending(&tp->delack_timer) &&
- del_timer(&tp->delack_timer))
- __sock_put(sk);
+ sk_stop_timer(sk, &tp->delack_timer);
- if (timer_pending(&sk->sk_timer) && del_timer(&sk->sk_timer))
- __sock_put(sk);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void tcp_write_err(struct sock *sk)
sk->sk_error_report(sk);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnTimeout);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
}
/* Do not allow orphaned sockets to eat all our resources.
*/
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int orphans = atomic_read(&tcp_orphan_count);
/* If peer does not open window for long time, or did not transmit
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
- NET_INC_STATS_BH(TCPAbortOnMemory);
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
return 1;
}
return 0;
/* A write timeout has occurred. Process the after effects. */
static int tcp_write_timeout(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int retry_until;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
static void tcp_delack_timer(unsigned long data)
{
struct sock *sk = (struct sock*)data;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later. */
tp->ack.blocked = 1;
- NET_INC_STATS_BH(DelayedACKLocked);
- if (!mod_timer(&tp->delack_timer, jiffies + TCP_DELACK_MIN))
- sock_hold(sk);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED);
+ sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN);
goto out_unlock;
}
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER))
goto out;
if (time_after(tp->ack.timeout, jiffies)) {
- if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
- sock_hold(sk);
+ sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout);
goto out;
}
tp->ack.pending &= ~TCP_ACK_TIMER;
if (skb_queue_len(&tp->ucopy.prequeue)) {
struct sk_buff *skb;
- NET_ADD_STATS_BH(TCPSchedulerFailed,
- skb_queue_len(&tp->ucopy.prequeue));
+ NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED,
+ skb_queue_len(&tp->ucopy.prequeue));
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk->sk_backlog_rcv(sk, skb);
tp->ack.ato = TCP_ATO_MIN;
}
tcp_send_ack(sk);
- NET_INC_STATS_BH(DelayedACKs);
+ NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS);
}
TCP_CHECK_TIMER(sk);
out:
if (tcp_memory_pressure)
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
out_unlock:
bh_unlock_sock(sk);
sock_put(sk);
static void tcp_probe_timer(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
- if (tp->packets_out || !tp->send_head) {
+ if (tp->packets_out || !sk->sk_send_head) {
tp->probes_out = 0;
return;
}
static void tcp_retransmit_timer(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
- if (tp->packets_out == 0)
+ if (!tp->packets_out)
goto out;
BUG_TRAP(!skb_queue_empty(&sk->sk_write_queue));
*/
#ifdef TCP_DEBUG
if (net_ratelimit()) {
- struct inet_opt *inet = inet_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
NIPQUAD(inet->daddr), htons(inet->dport),
inet->num, tp->snd_una, tp->snd_nxt);
if (tp->retransmits == 0) {
if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
- if (tp->sack_ok) {
+ if (tp->rx_opt.sack_ok) {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPSackRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPSackFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPSACKFAILURES);
} else {
if (tp->ca_state == TCP_CA_Recovery)
- NET_INC_STATS_BH(TCPRenoRecoveryFail);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERYFAIL);
else
- NET_INC_STATS_BH(TCPRenoFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPRENOFAILURES);
}
} else if (tp->ca_state == TCP_CA_Loss) {
- NET_INC_STATS_BH(TCPLossFailures);
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSSFAILURES);
} else {
- NET_INC_STATS_BH(TCPTimeouts);
+ NET_INC_STATS_BH(LINUX_MIB_TCPTIMEOUTS);
}
}
static void tcp_write_timer(unsigned long data)
{
struct sock *sk = (struct sock*)data;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
int event;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later */
- if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20)))
- sock_hold(sk);
+ sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20));
goto out_unlock;
}
goto out;
if (time_after(tp->timeout, jiffies)) {
- if (!mod_timer(&tp->retransmit_timer, tp->timeout))
- sock_hold(sk);
+ sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout);
goto out;
}
TCP_CHECK_TIMER(sk);
out:
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
out_unlock:
bh_unlock_sock(sk);
sock_put(sk);
static void tcp_synack_timer(struct sock *sk)
{
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
struct tcp_listen_opt *lopt = tp->listen_opt;
int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
int thresh = max_retries;
void tcp_delete_keepalive_timer (struct sock *sk)
{
- if (timer_pending(&sk->sk_timer) && del_timer (&sk->sk_timer))
- __sock_put(sk);
+ sk_stop_timer(sk, &sk->sk_timer);
}
void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
{
- if (!mod_timer(&sk->sk_timer, jiffies + len))
- sock_hold(sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
void tcp_set_keepalive(struct sock *sk, int val)
static void tcp_keepalive_timer (unsigned long data)
{
struct sock *sk = (struct sock *) data;
- struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
__u32 elapsed;
/* Only process if socket is not in use. */
elapsed = keepalive_time_when(tp);
/* It is alive without keepalive 8) */
- if (tp->packets_out || tp->send_head)
+ if (tp->packets_out || sk->sk_send_head)
goto resched;
elapsed = tcp_time_stamp - tp->rcv_tstamp;
}
TCP_CHECK_TIMER(sk);
- tcp_mem_reclaim(sk);
+ sk_stream_mem_reclaim(sk);
resched:
tcp_reset_keepalive_timer (sk, elapsed);