X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fipv4%2Ftcp_output.c;fp=net%2Fipv4%2Ftcp_output.c;h=310f2e610582afdd1580a8289da7d5af3ea88a33;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=5dc3118dffef4162e22e5f148a415dddf1851f9f;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5dc3118df..310f2e610 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -45,23 +45,12 @@ /* People can turn this off for buggy TCP's found in printers etc. */ int sysctl_tcp_retrans_collapse = 1; -/* People can turn this on to work with those rare, broken TCPs that - * interpret the window field as a signed quantity. - */ -int sysctl_tcp_workaround_signed_windows = 0; - /* This limits the percentage of the congestion window which we * will allow a single TSO frame to consume. Building TSO frames * which are too large can cause TCP streams to be bursty. */ int sysctl_tcp_tso_win_divisor = 3; -int sysctl_tcp_mtu_probing = 0; -int sysctl_tcp_base_mss = 512; - -/* By default, RFC2861 behavior. */ -int sysctl_tcp_slow_start_after_idle = 1; - static void update_send_head(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { @@ -141,8 +130,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, struct inet_connection_sock *icsk = inet_csk(sk); const u32 now = tcp_time_stamp; - if (sysctl_tcp_slow_start_after_idle && - (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) + if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) tcp_cwnd_restart(sk, __sk_dst_get(sk)); tp->lsndtime = now; @@ -183,25 +171,18 @@ void tcp_select_initial_window(int __space, __u32 mss, space = (space / mss) * mss; /* NOTE: offering an initial window larger than 32767 - * will break some buggy TCP stacks. If the admin tells us - * it is likely we could be speaking with such a buggy stack - * we will truncate our initial window offering to 32K-1 - * unless the remote has sent us a window scaling option, - * which we interpret as a sign the remote TCP is not - * misinterpreting the window field as a signed quantity. + * will break some buggy TCP stacks. We try to be nice. + * If we are not window scaling, then this truncates + * our initial window offering to 32k. There should also + * be a sysctl option to stop being nice. */ - if (sysctl_tcp_workaround_signed_windows) - (*rcv_wnd) = min(space, MAX_TCP_WINDOW); - else - (*rcv_wnd) = space; - + (*rcv_wnd) = min(space, MAX_TCP_WINDOW); (*rcv_wscale) = 0; if (wscale_ok) { /* Set window scaling on max possible window * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); - space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; @@ -254,7 +235,7 @@ static u16 tcp_select_window(struct sock *sk) /* Make sure we do not exceed the maximum possible * scaled window. */ - if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows) + if (!tp->rx_opt.rcv_wscale) new_win = min(new_win, MAX_TCP_WINDOW); else new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); @@ -467,11 +448,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (skb->len != tcp_header_size) tcp_event_data_sent(tp, skb, sk); - if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) - TCP_INC_STATS(TCP_MIB_OUTSEGS); + TCP_INC_STATS(TCP_MIB_OUTSEGS); err = icsk->icsk_af_ops->queue_xmit(skb, 0); - if (likely(err <= 0)) + if (unlikely(err <= 0)) return err; tcp_enter_cwr(sk); @@ -512,21 +492,20 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { - if (skb->len <= mss_now || !sk_can_gso(sk)) { + if (skb->len <= mss_now || + !(sk->sk_route_caps & NETIF_F_TSO)) { /* Avoid the costly divide in the normal * non-TSO case. */ - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; } else { unsigned int factor; factor = skb->len + (mss_now - 1); factor /= mss_now; - skb_shinfo(skb)->gso_segs = factor; - skb_shinfo(skb)->gso_size = mss_now; - skb_shinfo(skb)->gso_type = sk->sk_gso_type; + skb_shinfo(skb)->tso_segs = factor; + skb_shinfo(skb)->tso_size = mss_now; } } @@ -540,7 +519,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int nsize, old_factor; - int nlen; u16 flags; BUG_ON(len > skb->len); @@ -560,10 +538,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss if (buff == NULL) return -ENOMEM; /* We'll just try again later. */ - sk_charge_skb(sk, buff); - nlen = skb->len - len - nsize; - buff->truesize += nlen; - skb->truesize -= nlen; + buff->truesize = skb->len - len; + skb->truesize -= buff->truesize; /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; @@ -649,7 +625,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss * eventually). The difference is that pulled data not copied, but * immediately discarded. */ -static void __pskb_trim_head(struct sk_buff *skb, int len) +static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) { int i, k, eat; @@ -674,6 +650,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) skb->tail = skb->data; skb->data_len -= len; skb->len = skb->data_len; + return skb->tail; } int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) @@ -682,11 +659,12 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return -ENOMEM; - /* If len == headlen, we avoid __skb_pull to preserve alignment. */ - if (unlikely(len < skb_headlen(skb))) + if (len <= skb_headlen(skb)) { __skb_pull(skb, len); - else - __pskb_trim_head(skb, len - skb_headlen(skb)); + } else { + if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) + return -ENOMEM; + } TCP_SKB_CB(skb)->seq += len; skb->ip_summed = CHECKSUM_HW; @@ -705,62 +683,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) return 0; } -/* Not accounting for SACKs here. */ -int tcp_mtu_to_mss(struct sock *sk, int pmtu) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct inet_connection_sock *icsk = inet_csk(sk); - int mss_now; - - /* Calculate base mss without TCP options: - It is MMS_S - sizeof(tcphdr) of rfc1122 - */ - mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); - - /* Clamp it (mss_clamp does not include tcp options) */ - if (mss_now > tp->rx_opt.mss_clamp) - mss_now = tp->rx_opt.mss_clamp; - - /* Now subtract optional transport overhead */ - mss_now -= icsk->icsk_ext_hdr_len; - - /* Then reserve room for full set of TCP options and 8 bytes of data */ - if (mss_now < 48) - mss_now = 48; - - /* Now subtract TCP options size, not including SACKs */ - mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); - - return mss_now; -} - -/* Inverse of above */ -int tcp_mss_to_mtu(struct sock *sk, int mss) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct inet_connection_sock *icsk = inet_csk(sk); - int mtu; - - mtu = mss + - tp->tcp_header_len + - icsk->icsk_ext_hdr_len + - icsk->icsk_af_ops->net_header_len; - - return mtu; -} - -void tcp_mtup_init(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct inet_connection_sock *icsk = inet_csk(sk); - - icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; - icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + - icsk->icsk_af_ops->net_header_len; - icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); - icsk->icsk_mtup.probe_size = 0; -} - /* This function synchronize snd mss to current pmtu/exthdr set. tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts @@ -788,12 +710,25 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); - int mss_now; + /* Calculate base mss without TCP options: + It is MMS_S - sizeof(tcphdr) of rfc1122 + */ + int mss_now = (pmtu - icsk->icsk_af_ops->net_header_len - + sizeof(struct tcphdr)); - if (icsk->icsk_mtup.search_high > pmtu) - icsk->icsk_mtup.search_high = pmtu; + /* Clamp it (mss_clamp does not include tcp options) */ + if (mss_now > tp->rx_opt.mss_clamp) + mss_now = tp->rx_opt.mss_clamp; - mss_now = tcp_mtu_to_mss(sk, pmtu); + /* Now subtract optional transport overhead */ + mss_now -= icsk->icsk_ext_hdr_len; + + /* Then reserve room for full set of TCP options and 8 bytes of data */ + if (mss_now < 48) + mss_now = 48; + + /* Now subtract TCP options size, not including SACKs */ + mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); /* Bound mss with half of window */ if (tp->max_window && mss_now > (tp->max_window>>1)) @@ -801,8 +736,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; - if (icsk->icsk_mtup.enabled) - mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); tp->mss_cache = mss_now; return mss_now; @@ -825,7 +758,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) mss_now = tp->mss_cache; - if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) + if (large_allowed && + (sk->sk_route_caps & NETIF_F_TSO) && + !tp->urg_mode) doing_tso = 1; if (dst) { @@ -858,8 +793,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) return mss_now; } -EXPORT_SYMBOL_GPL(tcp_current_mss); - /* Congestion window validation. (RFC2861) */ static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) @@ -917,7 +850,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int if (!tso_segs || (tso_segs > 1 && - tcp_skb_mss(skb) != mss_now)) { + skb_shinfo(skb)->tso_size != mss_now)) { tcp_set_skb_tso_segs(sk, skb, mss_now); tso_segs = tcp_skb_pcount(skb); } @@ -1047,8 +980,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, if (unlikely(buff == NULL)) return -ENOMEM; - sk_charge_skb(sk, buff); - buff->truesize += nlen; + buff->truesize = nlen; skb->truesize -= nlen; /* Correct the sequence numbers. */ @@ -1133,140 +1065,6 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_ return 1; } -/* Create a new MTU probe if we are ready. - * Returns 0 if we should wait to probe (no cwnd available), - * 1 if a probe was sent, - * -1 otherwise */ -static int tcp_mtu_probe(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct inet_connection_sock *icsk = inet_csk(sk); - struct sk_buff *skb, *nskb, *next; - int len; - int probe_size; - unsigned int pif; - int copy; - int mss_now; - - /* Not currently probing/verifying, - * not in recovery, - * have enough cwnd, and - * not SACKing (the variable headers throw things off) */ - if (!icsk->icsk_mtup.enabled || - icsk->icsk_mtup.probe_size || - inet_csk(sk)->icsk_ca_state != TCP_CA_Open || - tp->snd_cwnd < 11 || - tp->rx_opt.eff_sacks) - return -1; - - /* Very simple search strategy: just double the MSS. */ - mss_now = tcp_current_mss(sk, 0); - probe_size = 2*tp->mss_cache; - if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { - /* TODO: set timer for probe_converge_event */ - return -1; - } - - /* Have enough data in the send queue to probe? */ - len = 0; - if ((skb = sk->sk_send_head) == NULL) - return -1; - while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) - skb = skb->next; - if (len < probe_size) - return -1; - - /* Receive window check. */ - if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { - if (tp->snd_wnd < probe_size) - return -1; - else - return 0; - } - - /* Do we need to wait to drain cwnd? */ - pif = tcp_packets_in_flight(tp); - if (pif + 2 > tp->snd_cwnd) { - /* With no packets in flight, don't stall. */ - if (pif == 0) - return -1; - else - return 0; - } - - /* We're allowed to probe. Build it now. */ - if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL) - return -1; - sk_charge_skb(sk, nskb); - - skb = sk->sk_send_head; - __skb_insert(nskb, skb->prev, skb, &sk->sk_write_queue); - sk->sk_send_head = nskb; - - TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; - TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; - TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; - TCP_SKB_CB(nskb)->sacked = 0; - nskb->csum = 0; - if (skb->ip_summed == CHECKSUM_HW) - nskb->ip_summed = CHECKSUM_HW; - - len = 0; - while (len < probe_size) { - next = skb->next; - - copy = min_t(int, skb->len, probe_size - len); - if (nskb->ip_summed) - skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); - else - nskb->csum = skb_copy_and_csum_bits(skb, 0, - skb_put(nskb, copy), copy, nskb->csum); - - if (skb->len <= copy) { - /* We've eaten all the data from this skb. - * Throw it away. */ - TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags; - __skb_unlink(skb, &sk->sk_write_queue); - sk_stream_free_skb(sk, skb); - } else { - TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & - ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); - if (!skb_shinfo(skb)->nr_frags) { - skb_pull(skb, copy); - if (skb->ip_summed != CHECKSUM_HW) - skb->csum = csum_partial(skb->data, skb->len, 0); - } else { - __pskb_trim_head(skb, copy); - tcp_set_skb_tso_segs(sk, skb, mss_now); - } - TCP_SKB_CB(skb)->seq += copy; - } - - len += copy; - skb = next; - } - tcp_init_tso_segs(sk, nskb, nskb->len); - - /* We're ready to send. If this fails, the probe will - * be resegmented into mss-sized pieces by tcp_write_xmit(). */ - TCP_SKB_CB(nskb)->when = tcp_time_stamp; - if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { - /* Decrement cwnd here because we are sending - * effectively two packets. */ - tp->snd_cwnd--; - update_send_head(sk, tp, nskb); - - icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); - tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; - tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; - - return 1; - } - - return -1; -} - - /* This routine writes packets to the network. It advances the * send_head. This happens as incoming acks open up the remote * window for us. @@ -1280,7 +1078,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) struct sk_buff *skb; unsigned int tso_segs, sent_pkts; int cwnd_quota; - int result; /* If we are closed, the bytes will have to remain here. * In time closedown will finish, we empty the write queue and all @@ -1290,14 +1087,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) return 0; sent_pkts = 0; - - /* Do MTU probing. */ - if ((result = tcp_mtu_probe(sk)) == 0) { - return 0; - } else if (result > 0) { - sent_pkts = 1; - } - while ((skb = sk->sk_send_head)) { unsigned int limit; @@ -1373,7 +1162,6 @@ void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, tcp_check_probe_timer(sk, tp); } } -EXPORT_SYMBOL_GPL(__tcp_push_pending_frames); /* Send _single_ skb sitting at the send head. This function requires * true push pending frames to setup probe timer etc. @@ -1669,15 +1457,9 @@ void tcp_simple_retransmit(struct sock *sk) int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - struct inet_connection_sock *icsk = inet_csk(sk); unsigned int cur_mss = tcp_current_mss(sk, 0); int err; - /* Inconslusive MTU probe */ - if (icsk->icsk_mtup.probe_size) { - icsk->icsk_mtup.probe_size = 0; - } - /* Do not sent more than we queued. 1/4 is reserved for possible * copying overhead: fragmentation, tunneling, mangling etc. */ @@ -1728,9 +1510,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { if (!pskb_trim(skb, 0)) { TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; } @@ -1935,9 +1716,8 @@ void tcp_send_fin(struct sock *sk) skb->csum = 0; TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); TCP_SKB_CB(skb)->sacked = 0; - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ TCP_SKB_CB(skb)->seq = tp->write_seq; @@ -1969,9 +1749,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) skb->csum = 0; TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); TCP_SKB_CB(skb)->sacked = 0; - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; /* Send it off. */ TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); @@ -2046,15 +1825,16 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, memset(th, 0, sizeof(struct tcphdr)); th->syn = 1; th->ack = 1; + if (dst->dev->features&NETIF_F_TSO) + ireq->ecn_ok = 0; TCP_ECN_make_synack(req, th); th->source = inet_sk(sk)->sport; th->dest = ireq->rmt_port; TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; TCP_SKB_CB(skb)->sacked = 0; - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; th->seq = htonl(TCP_SKB_CB(skb)->seq); th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ @@ -2105,7 +1885,6 @@ static void tcp_connect_init(struct sock *sk) if (tp->rx_opt.user_mss) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->max_window = 0; - tcp_mtup_init(sk); tcp_sync_mss(sk, dst_mtu(dst)); if (!tp->window_clamp) @@ -2158,13 +1937,13 @@ int tcp_connect(struct sock *sk) TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; TCP_ECN_send_syn(sk, tp, buff); TCP_SKB_CB(buff)->sacked = 0; - skb_shinfo(buff)->gso_segs = 1; - skb_shinfo(buff)->gso_size = 0; - skb_shinfo(buff)->gso_type = 0; + skb_shinfo(buff)->tso_segs = 1; + skb_shinfo(buff)->tso_size = 0; buff->csum = 0; - tp->snd_nxt = tp->write_seq; TCP_SKB_CB(buff)->seq = tp->write_seq++; TCP_SKB_CB(buff)->end_seq = tp->write_seq; + tp->snd_nxt = tp->write_seq; + tp->pushed_seq = tp->write_seq; /* Send it off. */ TCP_SKB_CB(buff)->when = tcp_time_stamp; @@ -2174,12 +1953,6 @@ int tcp_connect(struct sock *sk) sk_charge_skb(sk, buff); tp->packets_out += tcp_skb_pcount(buff); tcp_transmit_skb(sk, buff, 1, GFP_KERNEL); - - /* We change tp->snd_nxt after the tcp_transmit_skb() call - * in order to make this packet get counted in tcpOutSegs. - */ - tp->snd_nxt = tp->write_seq; - tp->pushed_seq = tp->write_seq; TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ @@ -2269,9 +2042,8 @@ void tcp_send_ack(struct sock *sk) buff->csum = 0; TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; TCP_SKB_CB(buff)->sacked = 0; - skb_shinfo(buff)->gso_segs = 1; - skb_shinfo(buff)->gso_size = 0; - skb_shinfo(buff)->gso_type = 0; + skb_shinfo(buff)->tso_segs = 1; + skb_shinfo(buff)->tso_size = 0; /* Send it off, this clears delayed acks for us. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); @@ -2306,9 +2078,8 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) skb->csum = 0; TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; TCP_SKB_CB(skb)->sacked = urgent; - skb_shinfo(skb)->gso_segs = 1; - skb_shinfo(skb)->gso_size = 0; - skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->tso_segs = 1; + skb_shinfo(skb)->tso_size = 0; /* Use a previous sequence. This should cause the other * end to send an ack. Don't queue or clone SKB, just @@ -2411,4 +2182,3 @@ EXPORT_SYMBOL(tcp_make_synack); EXPORT_SYMBOL(tcp_simple_retransmit); EXPORT_SYMBOL(tcp_sync_mss); EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor); -EXPORT_SYMBOL(tcp_mtup_init);