tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static void init_bictcp(struct tcp_opt *tp)
-{
- tp->bictcp.cnt = 0;
-
- tp->bictcp.last_max_cwnd = 0;
- tp->bictcp.last_cwnd = 0;
- tp->bictcp.last_stamp = 0;
-}
-
/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
{
/* This exciting event is worth to be remembered. 8) */
if (ts)
- NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER);
+ NET_INC_STATS_BH(TCPTSReorder);
else if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER);
+ NET_INC_STATS_BH(TCPRenoReorder);
else if (IsFack(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER);
+ NET_INC_STATS_BH(TCPFACKReorder);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER);
+ NET_INC_STATS_BH(TCPSACKReorder);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->sack_ok, tp->ca_state,
if (before(start_seq, ack)) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+ NET_INC_STATS_BH(TCPDSACKRecv);
} else if (num_sacks > 1 &&
!after(end_seq, ntohl(sp[1].end_seq)) &&
!before(start_seq, ntohl(sp[1].start_seq))) {
dup_sack = 1;
tp->sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+ NET_INC_STATS_BH(TCPDSACKOfoRecv);
}
/* D-SACK for already forgotten data...
tp->lost_out++;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
flag |= FLAG_DATA_SACKED;
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
+ NET_INC_STATS_BH(TCPLostRetransmit);
}
}
}
tcp_set_ca_state(tp, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
-
- init_bictcp(tp);
}
void tcp_clear_retrans(struct tcp_opt *tp)
*/
if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
+ NET_INC_STATS_BH(TCPSACKReneging);
tcp_enter_loss(sk, 1);
tp->retransmits++;
DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(tp, 1);
if (tp->ca_state == TCP_CA_Loss)
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO);
+ NET_INC_STATS_BH(TCPFullUndo);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && IsReno(tp)) {
DBGUNDO(sk, tp, "D-SACK");
tcp_undo_cwr(tp, 1);
tp->undo_marker = 0;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO);
+ NET_INC_STATS_BH(TCPDSACKUndo);
}
}
DBGUNDO(sk, tp, "Hoe");
tcp_undo_cwr(tp, 0);
- NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO);
+ NET_INC_STATS_BH(TCPPartialUndo);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
tp->lost_out = 0;
tp->left_out = tp->sacked_out;
tcp_undo_cwr(tp, 1);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO);
+ NET_INC_STATS_BH(TCPLossUndo);
tp->retransmits = 0;
tp->undo_marker = 0;
if (!IsReno(tp))
tp->ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
+ NET_INC_STATS_BH(TCPLoss);
}
/* D. Synchronize left_out to current state. */
/* Otherwise enter Recovery state */
if (IsReno(tp))
- NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY);
+ NET_INC_STATS_BH(TCPRenoRecovery);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY);
+ NET_INC_STATS_BH(TCPSackRecovery);
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
if (!sysctl_tcp_bic)
return tp->snd_cwnd;
- if (tp->bictcp.last_cwnd == tp->snd_cwnd &&
- (s32)(tcp_time_stamp - tp->bictcp.last_stamp) <= (HZ>>5))
- return tp->bictcp.cnt;
-
+ if (tp->bictcp.last_cwnd == tp->snd_cwnd)
+ return tp->bictcp.cnt; /* same cwnd, no update */
+
tp->bictcp.last_cwnd = tp->snd_cwnd;
- tp->bictcp.last_stamp = tcp_time_stamp;
/* start off normal */
if (tp->snd_cwnd <= sysctl_tcp_bic_low_window)
tcp_westwood_fast_bw(sk, skb);
flag |= FLAG_WIN_UPDATE;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPACKS);
+ NET_INC_STATS_BH(TCPHPAcks);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
- NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS);
+ NET_INC_STATS_BH(TCPPureAcks);
flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
{
if (tp->sack_ok && sysctl_tcp_dsack) {
if (before(seq, tp->rcv_nxt))
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT);
+ NET_INC_STATS_BH(TCPDSACKOldSent);
else
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFOSENT);
+ NET_INC_STATS_BH(TCPDSACKOfoSent);
tp->dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_enter_quickack_mode(tp);
if (tp->sack_ok && sysctl_tcp_dsack) {
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */
- NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST);
+ NET_INC_STATS_BH(DelayedACKLost);
tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
continue;
}
struct sk_buff *next = skb->next;
__skb_unlink(skb, skb->list);
__kfree_skb(skb);
- NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
+ NET_INC_STATS_BH(TCPRcvCollapsed);
skb = next;
if (skb == tail || skb->h.th->syn || skb->h.th->fin)
return;
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
- NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED);
+ NET_INC_STATS_BH(PruneCalled);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk, tp);
/* First, purge the out_of_order queue. */
if (skb_queue_len(&tp->out_of_order_queue)) {
- NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED,
+ NET_ADD_STATS_BH(OfoPruned,
skb_queue_len(&tp->out_of_order_queue));
__skb_queue_purge(&tp->out_of_order_queue);
* drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space.
*/
- NET_INC_STATS_BH(LINUX_MIB_RCVPRUNED);
+ NET_INC_STATS_BH(RcvPruned);
/* Massive buffer overcommit. */
tp->pred_flags = 0;
tcp_data_snd_check(sk);
return 0;
} else { /* Header too small */
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
goto discard;
}
} else {
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITSTOUSER);
+ NET_INC_STATS_BH(TCPHPHitsToUser);
eaten = 1;
}
}
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
- NET_INC_STATS_BH(LINUX_MIB_TCPHPHITS);
+ NET_INC_STATS_BH(TCPHPHits);
/* Bulk data transfer: receiver */
__skb_pull(skb,tcp_header_len);
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ TCP_INC_STATS_BH(TcpInErrs);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
return 0;
csum_error:
- TCP_INC_STATS_BH(TCP_MIB_INERRS);
+ TCP_INC_STATS_BH(TcpInErrs);
discard:
__kfree_skb(skb);
if (tp->saw_tstamp && tp->rcv_tsecr &&
!between(tp->rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSACTIVEREJECTED);
+ NET_INC_STATS_BH(PAWSActiveRejected);
goto reset_and_undo;
}
return 1;
init_westwood(sk);
- init_bictcp(tp);
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
case TCP_SYN_SENT:
init_westwood(sk);
- init_bictcp(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
tcp_paws_discard(tp, skb)) {
if (!th->rst) {
- NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED);
+ NET_INC_STATS_BH(PAWSEstabRejected);
tcp_send_dupack(sk, skb);
goto discard;
}
* Check for a SYN in window.
*/
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN);
+ NET_INC_STATS_BH(TCPAbortOnSyn);
tcp_reset(sk);
return 1;
}
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
return 1;
}
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
- NET_INC_STATS_BH(LINUX_MIB_TCPABORTONDATA);
+ NET_INC_STATS_BH(TCPAbortOnData);
tcp_reset(sk);
return 1;
}