+/* There is one downside to this scheme. Although we keep the
+ * ACK clock ticking, adjusting packet counters and advancing
+ * congestion window, we do not liberate socket send buffer
+ * space.
+ *
+ * Mucking with skb->truesize and sk->sk_wmem_alloc et al.
+ * then making a write space wakeup callback is a possible
+ * future enhancement. WARNING: it is not trivial to make.
+ */
+static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
+ __u32 now, __s32 *seq_rtt)
+{
+ struct tcp_opt *tp = tcp_sk(sk);
+ struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
+ __u32 mss = tcp_skb_mss(skb);
+ __u32 snd_una = tp->snd_una;
+ __u32 orig_seq, seq;
+ __u32 packets_acked = 0;
+ int acked = 0;
+
+ /* If we get here, the whole TSO packet has not been
+ * acked.
+ */
+ BUG_ON(!after(scb->end_seq, snd_una));
+
+ seq = orig_seq = scb->seq;
+ while (!after(seq + mss, snd_una)) {
+ packets_acked++;
+ seq += mss;
+ }
+
+ if (tcp_trim_head(sk, skb, (seq - orig_seq)))
+ return 0;
+
+ if (packets_acked) {
+ __u8 sacked = scb->sacked;
+
+ acked |= FLAG_DATA_ACKED;
+ if (sacked) {
+ if (sacked & TCPCB_RETRANS) {
+ if (sacked & TCPCB_SACKED_RETRANS)
+ tcp_dec_pcount_explicit(&tp->retrans_out,
+ packets_acked);
+ acked |= FLAG_RETRANS_DATA_ACKED;
+ *seq_rtt = -1;
+ } else if (*seq_rtt < 0)
+ *seq_rtt = now - scb->when;
+ if (sacked & TCPCB_SACKED_ACKED)
+ tcp_dec_pcount_explicit(&tp->sacked_out,
+ packets_acked);
+ if (sacked & TCPCB_LOST)
+ tcp_dec_pcount_explicit(&tp->lost_out,
+ packets_acked);
+ if (sacked & TCPCB_URG) {
+ if (tp->urg_mode &&
+ !before(seq, tp->snd_up))
+ tp->urg_mode = 0;
+ }
+ } else if (*seq_rtt < 0)
+ *seq_rtt = now - scb->when;
+
+ if (tcp_get_pcount(&tp->fackets_out)) {
+ __u32 dval = min(tcp_get_pcount(&tp->fackets_out),
+ packets_acked);
+ tcp_dec_pcount_explicit(&tp->fackets_out, dval);
+ }
+ tcp_dec_pcount_explicit(&tp->packets_out, packets_acked);
+
+ BUG_ON(tcp_skb_pcount(skb) == 0);
+ BUG_ON(!before(scb->seq, scb->end_seq));
+ }
+
+ return acked;
+}
+
+