1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel reference Implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * The SCTP reference implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
18 * The SCTP reference implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #include <linux/types.h>
50 #include <linux/list.h> /* For struct list_head */
51 #include <linux/socket.h>
53 #include <net/sock.h> /* For skb_set_owner_w */
55 #include <net/sctp/sctp.h>
56 #include <net/sctp/sm.h>
58 /* Declare internal functions here. */
59 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
60 static void sctp_check_transmitted(struct sctp_outq *q,
61 struct list_head *transmitted_queue,
62 struct sctp_transport *transport,
63 struct sctp_sackhdr *sack,
64 __u32 highest_new_tsn);
66 static void sctp_mark_missing(struct sctp_outq *q,
67 struct list_head *transmitted_queue,
68 struct sctp_transport *transport,
69 __u32 highest_new_tsn,
70 int count_of_newacks);
72 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
74 /* Add data to the front of the queue. */
75 static inline void sctp_outq_head_data(struct sctp_outq *q,
76 struct sctp_chunk *ch)
78 __skb_queue_head(&q->out, (struct sk_buff *)ch);
79 q->out_qlen += ch->skb->len;
83 /* Take data from the front of the queue. */
84 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
86 struct sctp_chunk *ch;
87 ch = (struct sctp_chunk *)__skb_dequeue(&q->out);
89 q->out_qlen -= ch->skb->len;
92 /* Add data chunk to the end of the queue. */
93 static inline void sctp_outq_tail_data(struct sctp_outq *q,
94 struct sctp_chunk *ch)
96 __skb_queue_tail(&q->out, (struct sk_buff *)ch);
97 q->out_qlen += ch->skb->len;
101 /* Insert a chunk behind chunk 'pos'. */
102 static inline void sctp_outq_insert_data(struct sctp_outq *q,
103 struct sctp_chunk *ch,
104 struct sctp_chunk *pos)
106 __skb_insert((struct sk_buff *)ch, (struct sk_buff *)pos->prev,
107 (struct sk_buff *)pos, pos->list);
108 q->out_qlen += ch->skb->len;
112 * SFR-CACC algorithm:
113 * D) If count_of_newacks is greater than or equal to 2
114 * and t was not sent to the current primary then the
115 * sender MUST NOT increment missing report count for t.
117 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
118 struct sctp_transport *transport,
119 int count_of_newacks)
121 if (count_of_newacks >=2 && transport != primary)
127 * SFR-CACC algorithm:
128 * F) If count_of_newacks is less than 2, let d be the
129 * destination to which t was sent. If cacc_saw_newack
130 * is 0 for destination d, then the sender MUST NOT
131 * increment missing report count for t.
133 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
134 int count_of_newacks)
136 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack)
142 * SFR-CACC algorithm:
143 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
144 * execute steps C, D, F.
146 * C has been implemented in sctp_outq_sack
148 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 struct sctp_transport *transport,
150 int count_of_newacks)
152 if (!primary->cacc.cycling_changeover) {
153 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
155 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
163 * SFR-CACC algorithm:
164 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
165 * than next_tsn_at_change of the current primary, then
166 * the sender MUST NOT increment missing report count
169 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
171 if (primary->cacc.cycling_changeover &&
172 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
178 * SFR-CACC algorithm:
179 * 3) If the missing report count for TSN t is to be
180 * incremented according to [RFC2960] and
181 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
182 * then the sender MUST futher execute steps 3.1 and
183 * 3.2 to determine if the missing report count for
184 * TSN t SHOULD NOT be incremented.
186 * 3.3) If 3.1 and 3.2 do not dictate that the missing
187 * report count for t should not be incremented, then
188 * the sender SOULD increment missing report count for
189 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
191 static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 struct sctp_transport *transport,
193 int count_of_newacks,
196 if (primary->cacc.changeover_active &&
197 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks)
198 || sctp_cacc_skip_3_2(primary, tsn)))
203 /* Generate a new outqueue. */
204 struct sctp_outq *sctp_outq_new(struct sctp_association *asoc)
208 q = t_new(struct sctp_outq, GFP_KERNEL);
210 sctp_outq_init(asoc, q);
216 /* Initialize an existing sctp_outq. This does the boring stuff.
217 * You still need to define handlers if you really want to DO
218 * something with this structure...
220 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
223 skb_queue_head_init(&q->out);
224 skb_queue_head_init(&q->control);
225 INIT_LIST_HEAD(&q->retransmit);
226 INIT_LIST_HEAD(&q->sacked);
227 INIT_LIST_HEAD(&q->abandoned);
229 q->outstanding_bytes = 0;
237 /* Free the outqueue structure and any related pending chunks.
239 void sctp_outq_teardown(struct sctp_outq *q)
241 struct sctp_transport *transport;
242 struct list_head *lchunk, *pos, *temp;
243 struct sctp_chunk *chunk;
245 /* Throw away unacknowledged chunks. */
246 list_for_each(pos, &q->asoc->peer.transport_addr_list) {
247 transport = list_entry(pos, struct sctp_transport, transports);
248 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
249 chunk = list_entry(lchunk, struct sctp_chunk,
251 /* Mark as part of a failed message. */
252 sctp_chunk_fail(chunk, q->error);
253 sctp_chunk_free(chunk);
257 /* Throw away chunks that have been gap ACKed. */
258 list_for_each_safe(lchunk, temp, &q->sacked) {
259 list_del_init(lchunk);
260 chunk = list_entry(lchunk, struct sctp_chunk,
262 sctp_chunk_fail(chunk, q->error);
263 sctp_chunk_free(chunk);
266 /* Throw away any chunks in the retransmit queue. */
267 list_for_each_safe(lchunk, temp, &q->retransmit) {
268 list_del_init(lchunk);
269 chunk = list_entry(lchunk, struct sctp_chunk,
271 sctp_chunk_fail(chunk, q->error);
272 sctp_chunk_free(chunk);
275 /* Throw away any chunks that are in the abandoned queue. */
276 list_for_each_safe(lchunk, temp, &q->abandoned) {
277 list_del_init(lchunk);
278 chunk = list_entry(lchunk, struct sctp_chunk,
280 sctp_chunk_fail(chunk, q->error);
281 sctp_chunk_free(chunk);
284 /* Throw away any leftover data chunks. */
285 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
287 /* Mark as send failure. */
288 sctp_chunk_fail(chunk, q->error);
289 sctp_chunk_free(chunk);
294 /* Throw away any leftover control chunks. */
295 while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL)
296 sctp_chunk_free(chunk);
299 /* Free the outqueue structure and any related pending chunks. */
300 void sctp_outq_free(struct sctp_outq *q)
302 /* Throw away leftover chunks. */
303 sctp_outq_teardown(q);
305 /* If we were kmalloc()'d, free the memory. */
310 /* Put a new chunk in an sctp_outq. */
311 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
315 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
316 q, chunk, chunk && chunk->chunk_hdr ?
317 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
320 /* If it is data, queue it up, otherwise, send it
323 if (SCTP_CID_DATA == chunk->chunk_hdr->type) {
324 /* Is it OK to queue data chunks? */
325 /* From 9. Termination of Association
327 * When either endpoint performs a shutdown, the
328 * association on each peer will stop accepting new
329 * data from its user and only deliver data in queue
330 * at the time of sending or receiving the SHUTDOWN
333 switch (q->asoc->state) {
334 case SCTP_STATE_EMPTY:
335 case SCTP_STATE_CLOSED:
336 case SCTP_STATE_SHUTDOWN_PENDING:
337 case SCTP_STATE_SHUTDOWN_SENT:
338 case SCTP_STATE_SHUTDOWN_RECEIVED:
339 case SCTP_STATE_SHUTDOWN_ACK_SENT:
340 /* Cannot send after transport endpoint shutdown */
345 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
346 q, chunk, chunk && chunk->chunk_hdr ?
347 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
350 sctp_outq_tail_data(q, chunk);
351 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
352 SCTP_INC_STATS(SctpOutUnorderChunks);
354 SCTP_INC_STATS(SctpOutOrderChunks);
359 __skb_queue_tail(&q->control, (struct sk_buff *) chunk);
360 SCTP_INC_STATS(SctpOutCtrlChunks);
367 error = sctp_outq_flush(q, 0);
372 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
373 * and the abandoned list are in ascending order.
375 void sctp_insert_list(struct list_head *head, struct list_head *new)
377 struct list_head *pos;
378 struct sctp_chunk *nchunk, *lchunk;
382 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
383 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
385 list_for_each(pos, head) {
386 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
387 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
388 if (TSN_lt(ntsn, ltsn)) {
389 list_add(new, pos->prev);
395 list_add_tail(new, head);
398 /* Mark all the eligible packets on a transport for retransmission. */
399 void sctp_retransmit_mark(struct sctp_outq *q,
400 struct sctp_transport *transport,
401 __u8 fast_retransmit)
403 struct list_head *lchunk, *ltemp;
404 struct sctp_chunk *chunk;
406 /* Walk through the specified transmitted queue. */
407 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
408 chunk = list_entry(lchunk, struct sctp_chunk,
411 /* If the chunk is abandoned, move it to abandoned list. */
412 if (sctp_chunk_abandoned(chunk)) {
413 list_del_init(lchunk);
414 sctp_insert_list(&q->abandoned, lchunk);
418 /* If we are doing retransmission due to a fast retransmit,
419 * only the chunk's that are marked for fast retransmit
420 * should be added to the retransmit queue. If we are doing
421 * retransmission due to a timeout or pmtu discovery, only the
422 * chunks that are not yet acked should be added to the
425 if ((fast_retransmit && chunk->fast_retransmit) ||
426 (!fast_retransmit && !chunk->tsn_gap_acked)) {
427 /* RFC 2960 6.2.1 Processing a Received SACK
429 * C) Any time a DATA chunk is marked for
430 * retransmission (via either T3-rtx timer expiration
431 * (Section 6.3.3) or via fast retransmit
432 * (Section 7.2.4)), add the data size of those
433 * chunks to the rwnd.
435 q->asoc->peer.rwnd += sctp_data_size(chunk);
436 q->outstanding_bytes -= sctp_data_size(chunk);
437 transport->flight_size -= sctp_data_size(chunk);
439 /* sctpimpguide-05 Section 2.8.2
440 * M5) If a T3-rtx timer expires, the
441 * 'TSN.Missing.Report' of all affected TSNs is set
444 chunk->tsn_missing_report = 0;
446 /* If a chunk that is being used for RTT measurement
447 * has to be retransmitted, we cannot use this chunk
448 * anymore for RTT measurements. Reset rto_pending so
449 * that a new RTT measurement is started when a new
450 * data chunk is sent.
452 if (chunk->rtt_in_progress) {
453 chunk->rtt_in_progress = 0;
454 transport->rto_pending = 0;
457 /* Move the chunk to the retransmit queue. The chunks
458 * on the retransmit queue are always kept in order.
460 list_del_init(lchunk);
461 sctp_insert_list(&q->retransmit, lchunk);
465 SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, "
466 "cwnd: %d, ssthresh: %d, flight_size: %d, "
467 "pba: %d\n", __FUNCTION__,
468 transport, fast_retransmit,
469 transport->cwnd, transport->ssthresh,
470 transport->flight_size,
471 transport->partial_bytes_acked);
475 /* Mark all the eligible packets on a transport for retransmission and force
478 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
479 sctp_retransmit_reason_t reason)
482 __u8 fast_retransmit = 0;
485 case SCTP_RTXR_T3_RTX:
486 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
487 /* Update the retran path if the T3-rtx timer has expired for
488 * the current retran path.
490 if (transport == transport->asoc->peer.retran_path)
491 sctp_assoc_update_retran_path(transport->asoc);
493 case SCTP_RTXR_FAST_RTX:
494 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
497 case SCTP_RTXR_PMTUD:
502 sctp_retransmit_mark(q, transport, fast_retransmit);
504 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
505 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
506 * following the procedures outlined in C1 - C5.
508 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
510 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
513 q->asoc->base.sk->sk_err = -error;
517 * Transmit DATA chunks on the retransmit queue. Upon return from
518 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
519 * need to be transmitted by the caller.
520 * We assume that pkt->transport has already been set.
522 * The return value is a normal kernel error return value.
524 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
525 int rtx_timeout, int *start_timer)
527 struct list_head *lqueue;
528 struct list_head *lchunk;
529 struct sctp_transport *transport = pkt->transport;
531 struct sctp_chunk *chunk;
532 struct sctp_association *asoc;
536 lqueue = &q->retransmit;
538 /* RFC 2960 6.3.3 Handle T3-rtx Expiration
540 * E3) Determine how many of the earliest (i.e., lowest TSN)
541 * outstanding DATA chunks for the address for which the
542 * T3-rtx has expired will fit into a single packet, subject
543 * to the MTU constraint for the path corresponding to the
544 * destination transport address to which the retransmission
545 * is being sent (this may be different from the address for
546 * which the timer expires [see Section 6.4]). Call this value
547 * K. Bundle and retransmit those K DATA chunks in a single
548 * packet to the destination endpoint.
550 * [Just to be painfully clear, if we are retransmitting
551 * because a timeout just happened, we should send only ONE
552 * packet of retransmitted data.]
554 lchunk = sctp_list_dequeue(lqueue);
557 chunk = list_entry(lchunk, struct sctp_chunk,
560 /* Make sure that Gap Acked TSNs are not retransmitted. A
561 * simple approach is just to move such TSNs out of the
562 * way and into a 'transmitted' queue and skip to the
565 if (chunk->tsn_gap_acked) {
566 list_add_tail(lchunk, &transport->transmitted);
567 lchunk = sctp_list_dequeue(lqueue);
571 /* Attempt to append this chunk to the packet. */
572 status = sctp_packet_append_chunk(pkt, chunk);
575 case SCTP_XMIT_PMTU_FULL:
576 /* Send this packet. */
577 if ((error = sctp_packet_transmit(pkt)) == 0)
580 /* If we are retransmitting, we should only
581 * send a single packet.
584 list_add(lchunk, lqueue);
588 /* Bundle lchunk in the next round. */
591 case SCTP_XMIT_RWND_FULL:
592 /* Send this packet. */
593 if ((error = sctp_packet_transmit(pkt)) == 0)
596 /* Stop sending DATA as there is no more room
599 list_add(lchunk, lqueue);
603 case SCTP_XMIT_NAGLE_DELAY:
604 /* Send this packet. */
605 if ((error = sctp_packet_transmit(pkt)) == 0)
608 /* Stop sending DATA because of nagle delay. */
609 list_add(lchunk, lqueue);
614 /* The append was successful, so add this chunk to
615 * the transmitted list.
617 list_add_tail(lchunk, &transport->transmitted);
621 /* Retrieve a new chunk to bundle. */
622 lchunk = sctp_list_dequeue(lqueue);
630 /* Cork the outqueue so queued chunks are really queued. */
631 int sctp_outq_uncork(struct sctp_outq *q)
636 error = sctp_outq_flush(q, 0);
642 * Try to flush an outqueue.
644 * Description: Send everything in q which we legally can, subject to
645 * congestion limitations.
646 * * Note: This function can be called from multiple contexts so appropriate
647 * locking concerns must be made. Today we use the sock lock to protect
650 int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
652 struct sctp_packet *packet;
653 struct sctp_packet singleton;
654 struct sctp_association *asoc = q->asoc;
655 __u16 sport = asoc->base.bind_addr.port;
656 __u16 dport = asoc->peer.port;
657 __u32 vtag = asoc->peer.i.init_tag;
658 struct sk_buff_head *queue;
659 struct sctp_transport *transport = NULL;
660 struct sctp_transport *new_transport;
661 struct sctp_chunk *chunk;
666 /* These transports have chunks to send. */
667 struct list_head transport_list;
668 struct list_head *ltransport;
670 INIT_LIST_HEAD(&transport_list);
676 * When bundling control chunks with DATA chunks, an
677 * endpoint MUST place control chunks first in the outbound
678 * SCTP packet. The transmitter MUST transmit DATA chunks
679 * within a SCTP packet in increasing order of TSN.
684 while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) {
685 /* Pick the right transport to use. */
686 new_transport = chunk->transport;
688 if (!new_transport) {
689 new_transport = asoc->peer.active_path;
690 } else if (!new_transport->active) {
691 /* If the chunk is Heartbeat, send it to
692 * chunk->transport, even it's inactive.
694 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT)
695 new_transport = asoc->peer.active_path;
698 /* Are we switching transports?
699 * Take care of transport locks.
701 if (new_transport != transport) {
702 transport = new_transport;
703 if (list_empty(&transport->send_ready)) {
704 list_add_tail(&transport->send_ready,
707 packet = &transport->packet;
708 sctp_packet_config(packet, vtag,
709 asoc->peer.ecn_capable);
712 switch (chunk->chunk_hdr->type) {
716 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
717 * COMPLETE with any other chunks. [Send them immediately.]
720 case SCTP_CID_INIT_ACK:
721 case SCTP_CID_SHUTDOWN_COMPLETE:
722 sctp_packet_init(&singleton, transport, sport, dport);
723 sctp_packet_config(&singleton, vtag, 0);
724 sctp_packet_append_chunk(&singleton, chunk);
725 error = sctp_packet_transmit(&singleton);
732 case SCTP_CID_HEARTBEAT:
733 case SCTP_CID_HEARTBEAT_ACK:
734 case SCTP_CID_SHUTDOWN:
735 case SCTP_CID_SHUTDOWN_ACK:
737 case SCTP_CID_COOKIE_ECHO:
738 case SCTP_CID_COOKIE_ACK:
739 case SCTP_CID_ECN_ECNE:
740 case SCTP_CID_ECN_CWR:
741 case SCTP_CID_ASCONF:
742 case SCTP_CID_ASCONF_ACK:
743 case SCTP_CID_FWD_TSN:
744 sctp_packet_transmit_chunk(packet, chunk);
748 /* We built a chunk with an illegal type! */
753 /* Is it OK to send data chunks? */
754 switch (asoc->state) {
755 case SCTP_STATE_COOKIE_ECHOED:
756 /* Only allow bundling when this packet has a COOKIE-ECHO
759 if (!packet || !packet->has_cookie_echo)
763 case SCTP_STATE_ESTABLISHED:
764 case SCTP_STATE_SHUTDOWN_PENDING:
765 case SCTP_STATE_SHUTDOWN_RECEIVED:
767 * RFC 2960 6.1 Transmission of DATA Chunks
769 * C) When the time comes for the sender to transmit,
770 * before sending new DATA chunks, the sender MUST
771 * first transmit any outstanding DATA chunks which
772 * are marked for retransmission (limited by the
775 if (!list_empty(&q->retransmit)) {
776 if (transport == asoc->peer.retran_path)
779 /* Switch transports & prepare the packet. */
781 transport = asoc->peer.retran_path;
783 if (list_empty(&transport->send_ready)) {
784 list_add_tail(&transport->send_ready,
788 packet = &transport->packet;
789 sctp_packet_config(packet, vtag,
790 asoc->peer.ecn_capable);
792 error = sctp_outq_flush_rtx(q, packet,
793 rtx_timeout, &start_timer);
796 sctp_transport_reset_timers(transport);
798 /* This can happen on COOKIE-ECHO resend. Only
799 * one chunk can get bundled with a COOKIE-ECHO.
801 if (packet->has_cookie_echo)
804 /* Don't send new data if there is still data
805 * waiting to retransmit.
807 if (!list_empty(&q->retransmit))
811 /* Finally, transmit new packets. */
815 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
816 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
819 if (chunk->sinfo.sinfo_stream >=
820 asoc->c.sinit_num_ostreams) {
822 /* Mark as failed send. */
823 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
824 sctp_chunk_free(chunk);
828 /* Has this chunk expired? */
829 if (sctp_chunk_abandoned(chunk)) {
830 sctp_chunk_fail(chunk, 0);
831 sctp_chunk_free(chunk);
835 /* If there is a specified transport, use it.
836 * Otherwise, we want to use the active path.
838 new_transport = chunk->transport;
839 if (!new_transport || !new_transport->active)
840 new_transport = asoc->peer.active_path;
842 /* Change packets if necessary. */
843 if (new_transport != transport) {
844 transport = new_transport;
846 /* Schedule to have this transport's
849 if (list_empty(&transport->send_ready)) {
850 list_add_tail(&transport->send_ready,
854 packet = &transport->packet;
855 sctp_packet_config(packet, vtag,
856 asoc->peer.ecn_capable);
859 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
861 chunk && chunk->chunk_hdr ?
862 sctp_cname(SCTP_ST_CHUNK(
863 chunk->chunk_hdr->type))
866 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
867 "%p skb->users %d.\n",
868 ntohl(chunk->subh.data_hdr->tsn),
869 chunk->skb ?chunk->skb->head : NULL,
871 atomic_read(&chunk->skb->users) : -1);
873 /* Add the chunk to the packet. */
874 status = sctp_packet_transmit_chunk(packet, chunk);
877 case SCTP_XMIT_PMTU_FULL:
878 case SCTP_XMIT_RWND_FULL:
879 case SCTP_XMIT_NAGLE_DELAY:
880 /* We could not append this chunk, so put
881 * the chunk back on the output queue.
883 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
884 "not transmit TSN: 0x%x, status: %d\n",
885 ntohl(chunk->subh.data_hdr->tsn),
887 sctp_outq_head_data(q, chunk);
898 /* BUG: We assume that the sctp_packet_transmit()
899 * call below will succeed all the time and add the
900 * chunk to the transmitted list and restart the
902 * It is possible that the call can fail under OOM
905 * Is this really a problem? Won't this behave
908 list_add_tail(&chunk->transmitted_list,
909 &transport->transmitted);
911 sctp_transport_reset_timers(transport);
915 /* Only let one DATA chunk get bundled with a
918 if (packet->has_cookie_echo)
930 /* Before returning, examine all the transports touched in
931 * this call. Right now, we bluntly force clear all the
932 * transports. Things might change after we implement Nagle.
933 * But such an examination is still required.
937 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
938 struct sctp_transport *t = list_entry(ltransport,
939 struct sctp_transport,
942 if (!sctp_packet_empty(packet))
943 error = sctp_packet_transmit(packet);
949 /* Update unack_data based on the incoming SACK chunk */
950 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
951 struct sctp_sackhdr *sack)
953 sctp_sack_variable_t *frags;
957 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
959 frags = sack->variable;
960 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
961 unack_data -= ((ntohs(frags[i].gab.end) -
962 ntohs(frags[i].gab.start) + 1));
965 assoc->unack_data = unack_data;
968 /* Return the highest new tsn that is acknowledged by the given SACK chunk. */
969 static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
970 struct sctp_association *asoc)
972 struct list_head *ltransport, *lchunk;
973 struct sctp_transport *transport;
974 struct sctp_chunk *chunk;
975 __u32 highest_new_tsn, tsn;
976 struct list_head *transport_list = &asoc->peer.transport_addr_list;
978 highest_new_tsn = ntohl(sack->cum_tsn_ack);
980 list_for_each(ltransport, transport_list) {
981 transport = list_entry(ltransport, struct sctp_transport,
983 list_for_each(lchunk, &transport->transmitted) {
984 chunk = list_entry(lchunk, struct sctp_chunk,
986 tsn = ntohl(chunk->subh.data_hdr->tsn);
988 if (!chunk->tsn_gap_acked &&
989 TSN_lt(highest_new_tsn, tsn) &&
990 sctp_acked(sack, tsn))
991 highest_new_tsn = tsn;
995 return highest_new_tsn;
998 /* This is where we REALLY process a SACK.
1000 * Process the SACK against the outqueue. Mostly, this just frees
1001 * things off the transmitted queue.
1003 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1005 struct sctp_association *asoc = q->asoc;
1006 struct sctp_transport *transport;
1007 struct sctp_chunk *tchunk = NULL;
1008 struct list_head *lchunk, *transport_list, *pos, *temp;
1009 sctp_sack_variable_t *frags = sack->variable;
1010 __u32 sack_ctsn, ctsn, tsn;
1011 __u32 highest_tsn, highest_new_tsn;
1013 unsigned outstanding;
1014 struct sctp_transport *primary = asoc->peer.primary_path;
1015 int count_of_newacks = 0;
1017 /* Grab the association's destination address list. */
1018 transport_list = &asoc->peer.transport_addr_list;
1020 sack_ctsn = ntohl(sack->cum_tsn_ack);
1023 * SFR-CACC algorithm:
1024 * On receipt of a SACK the sender SHOULD execute the
1025 * following statements.
1027 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1028 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1029 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1032 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1033 primary->cacc.changeover_active = 0;
1034 list_for_each(pos, transport_list) {
1035 transport = list_entry(pos, struct sctp_transport,
1037 transport->cacc.cycling_changeover = 0;
1042 * SFR-CACC algorithm:
1043 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1044 * is set the receiver of the SACK MUST take the following actions:
1046 * A) Initialize the cacc_saw_newack to 0 for all destination
1049 if (sack->num_gap_ack_blocks > 0 &&
1050 primary->cacc.changeover_active) {
1051 list_for_each(pos, transport_list) {
1052 transport = list_entry(pos, struct sctp_transport,
1054 transport->cacc.cacc_saw_newack = 0;
1058 /* Get the highest TSN in the sack. */
1059 highest_tsn = sack_ctsn;
1060 if (sack->num_gap_ack_blocks)
1062 ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end);
1064 if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
1065 highest_new_tsn = highest_tsn;
1066 asoc->highest_sacked = highest_tsn;
1068 highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
1071 /* Run through the retransmit queue. Credit bytes received
1072 * and free those chunks that we can.
1074 sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn);
1075 sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0);
1077 /* Run through the transmitted queue.
1078 * Credit bytes received and free those chunks which we can.
1080 * This is a MASSIVE candidate for optimization.
1082 list_for_each(pos, transport_list) {
1083 transport = list_entry(pos, struct sctp_transport,
1085 sctp_check_transmitted(q, &transport->transmitted,
1086 transport, sack, highest_new_tsn);
1088 * SFR-CACC algorithm:
1089 * C) Let count_of_newacks be the number of
1090 * destinations for which cacc_saw_newack is set.
1092 if (transport->cacc.cacc_saw_newack)
1093 count_of_newacks ++;
1096 list_for_each(pos, transport_list) {
1097 transport = list_entry(pos, struct sctp_transport,
1099 sctp_mark_missing(q, &transport->transmitted, transport,
1100 highest_new_tsn, count_of_newacks);
1103 /* Move the Cumulative TSN Ack Point if appropriate. */
1104 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn))
1105 asoc->ctsn_ack_point = sack_ctsn;
1107 /* Update unack_data field in the assoc. */
1108 sctp_sack_update_unack_data(asoc, sack);
1110 ctsn = asoc->ctsn_ack_point;
1112 /* Throw away stuff rotting on the sack queue. */
1113 list_for_each_safe(lchunk, temp, &q->sacked) {
1114 tchunk = list_entry(lchunk, struct sctp_chunk,
1116 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1117 if (TSN_lte(tsn, ctsn))
1118 sctp_chunk_free(tchunk);
1121 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1122 * number of bytes still outstanding after processing the
1123 * Cumulative TSN Ack and the Gap Ack Blocks.
1126 sack_a_rwnd = ntohl(sack->a_rwnd);
1127 outstanding = q->outstanding_bytes;
1129 if (outstanding < sack_a_rwnd)
1130 sack_a_rwnd -= outstanding;
1134 asoc->peer.rwnd = sack_a_rwnd;
1136 sctp_generate_fwdtsn(q, sack_ctsn);
1138 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1139 __FUNCTION__, sack_ctsn);
1140 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1141 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1142 __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point);
1144 /* See if all chunks are acked.
1145 * Make sure the empty queue handler will get run later.
1147 q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) &&
1148 list_empty(&q->retransmit);
1152 list_for_each(pos, transport_list) {
1153 transport = list_entry(pos, struct sctp_transport,
1155 q->empty = q->empty && list_empty(&transport->transmitted);
1160 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1165 /* Is the outqueue empty? */
1166 int sctp_outq_is_empty(const struct sctp_outq *q)
1171 /********************************************************************
1172 * 2nd Level Abstractions
1173 ********************************************************************/
1175 /* Go through a transport's transmitted list or the association's retransmit
1176 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1177 * The retransmit list will not have an associated transport.
1179 * I added coherent debug information output. --xguo
1181 * Instead of printing 'sacked' or 'kept' for each TSN on the
1182 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1183 * KEPT TSN6-TSN7, etc.
1185 static void sctp_check_transmitted(struct sctp_outq *q,
1186 struct list_head *transmitted_queue,
1187 struct sctp_transport *transport,
1188 struct sctp_sackhdr *sack,
1189 __u32 highest_new_tsn_in_sack)
1191 struct list_head *lchunk;
1192 struct sctp_chunk *tchunk;
1193 struct list_head tlist;
1197 __u8 restart_timer = 0;
1198 int bytes_acked = 0;
1200 /* These state variables are for coherent debug output. --xguo */
1203 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
1204 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
1205 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
1206 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
1208 /* 0 : The last TSN was ACKed.
1209 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1210 * -1: We need to initialize.
1212 int dbg_prt_state = -1;
1213 #endif /* SCTP_DEBUG */
1215 sack_ctsn = ntohl(sack->cum_tsn_ack);
1217 INIT_LIST_HEAD(&tlist);
1219 /* The while loop will skip empty transmitted queues. */
1220 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1221 tchunk = list_entry(lchunk, struct sctp_chunk,
1224 if (sctp_chunk_abandoned(tchunk)) {
1225 /* Move the chunk to abandoned list. */
1226 sctp_insert_list(&q->abandoned, lchunk);
1230 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1231 if (sctp_acked(sack, tsn)) {
1232 /* If this queue is the retransmit queue, the
1233 * retransmit timer has already reclaimed
1234 * the outstanding bytes for this chunk, so only
1235 * count bytes associated with a transport.
1238 /* If this chunk is being used for RTT
1239 * measurement, calculate the RTT and update
1240 * the RTO using this value.
1242 * 6.3.1 C5) Karn's algorithm: RTT measurements
1243 * MUST NOT be made using packets that were
1244 * retransmitted (and thus for which it is
1245 * ambiguous whether the reply was for the
1246 * first instance of the packet or a later
1249 if (!tchunk->tsn_gap_acked &&
1251 tchunk->rtt_in_progress) {
1252 rtt = jiffies - tchunk->sent_at;
1253 sctp_transport_update_rto(transport,
1257 if (TSN_lte(tsn, sack_ctsn)) {
1258 /* RFC 2960 6.3.2 Retransmission Timer Rules
1260 * R3) Whenever a SACK is received
1261 * that acknowledges the DATA chunk
1262 * with the earliest outstanding TSN
1263 * for that address, restart T3-rtx
1264 * timer for that address with its
1269 if (!tchunk->tsn_gap_acked) {
1270 tchunk->tsn_gap_acked = 1;
1271 bytes_acked += sctp_data_size(tchunk);
1273 * SFR-CACC algorithm:
1274 * 2) If the SACK contains gap acks
1275 * and the flag CHANGEOVER_ACTIVE is
1276 * set the receiver of the SACK MUST
1277 * take the following action:
1279 * B) For each TSN t being acked that
1280 * has not been acked in any SACK so
1281 * far, set cacc_saw_newack to 1 for
1282 * the destination that the TSN was
1286 sack->num_gap_ack_blocks &&
1287 q->asoc->peer.primary_path->cacc.
1289 transport->cacc.cacc_saw_newack
1293 list_add_tail(&tchunk->transmitted_list,
1296 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1297 * M2) Each time a SACK arrives reporting
1298 * 'Stray DATA chunk(s)' record the highest TSN
1299 * reported as newly acknowledged, call this
1300 * value 'HighestTSNinSack'. A newly
1301 * acknowledged DATA chunk is one not
1302 * previously acknowledged in a SACK.
1304 * When the SCTP sender of data receives a SACK
1305 * chunk that acknowledges, for the first time,
1306 * the receipt of a DATA chunk, all the still
1307 * unacknowledged DATA chunks whose TSN is
1308 * older than that newly acknowledged DATA
1309 * chunk, are qualified as 'Stray DATA chunks'.
1311 if (!tchunk->tsn_gap_acked) {
1312 tchunk->tsn_gap_acked = 1;
1313 bytes_acked += sctp_data_size(tchunk);
1315 list_add_tail(lchunk, &tlist);
1319 switch (dbg_prt_state) {
1320 case 0: /* last TSN was ACKed */
1321 if (dbg_last_ack_tsn + 1 == tsn) {
1322 /* This TSN belongs to the
1323 * current ACK range.
1328 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1329 /* Display the end of the
1332 SCTP_DEBUG_PRINTK("-%08x",
1336 /* Start a new range. */
1337 SCTP_DEBUG_PRINTK(",%08x", tsn);
1341 case 1: /* The last TSN was NOT ACKed. */
1342 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1343 /* Display the end of current range. */
1344 SCTP_DEBUG_PRINTK("-%08x",
1348 SCTP_DEBUG_PRINTK("\n");
1350 /* FALL THROUGH... */
1352 /* This is the first-ever TSN we examined. */
1353 /* Start a new range of ACK-ed TSNs. */
1354 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1359 dbg_last_ack_tsn = tsn;
1360 #endif /* SCTP_DEBUG */
1363 if (tchunk->tsn_gap_acked) {
1364 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1368 tchunk->tsn_gap_acked = 0;
1370 bytes_acked -= sctp_data_size(tchunk);
1372 /* RFC 2960 6.3.2 Retransmission Timer Rules
1374 * R4) Whenever a SACK is received missing a
1375 * TSN that was previously acknowledged via a
1376 * Gap Ack Block, start T3-rtx for the
1377 * destination address to which the DATA
1378 * chunk was originally
1379 * transmitted if it is not already running.
1384 list_add_tail(lchunk, &tlist);
1387 /* See the above comments on ACK-ed TSNs. */
1388 switch (dbg_prt_state) {
1390 if (dbg_last_kept_tsn + 1 == tsn)
1393 if (dbg_last_kept_tsn != dbg_kept_tsn)
1394 SCTP_DEBUG_PRINTK("-%08x",
1397 SCTP_DEBUG_PRINTK(",%08x", tsn);
1402 if (dbg_last_ack_tsn != dbg_ack_tsn)
1403 SCTP_DEBUG_PRINTK("-%08x",
1405 SCTP_DEBUG_PRINTK("\n");
1407 /* FALL THROUGH... */
1409 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1414 dbg_last_kept_tsn = tsn;
1415 #endif /* SCTP_DEBUG */
1420 /* Finish off the last range, displaying its ending TSN. */
1421 switch (dbg_prt_state) {
1423 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1424 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn);
1426 SCTP_DEBUG_PRINTK("\n");
1431 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1432 SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn);
1434 SCTP_DEBUG_PRINTK("\n");
1437 #endif /* SCTP_DEBUG */
1440 /* 8.2. When an outstanding TSN is acknowledged,
1441 * the endpoint shall clear the error counter of
1442 * the destination transport address to which the
1443 * DATA chunk was last sent.
1444 * The association's overall error counter is
1447 transport->error_count = 0;
1448 transport->asoc->overall_error_count = 0;
1450 /* Mark the destination transport address as
1451 * active if it is not so marked.
1453 if (!transport->active) {
1454 sctp_assoc_control_transport(
1458 SCTP_RECEIVED_SACK);
1461 sctp_transport_raise_cwnd(transport, sack_ctsn,
1464 transport->flight_size -= bytes_acked;
1465 q->outstanding_bytes -= bytes_acked;
1467 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1468 * When a sender is doing zero window probing, it
1469 * should not timeout the association if it continues
1470 * to receive new packets from the receiver. The
1471 * reason is that the receiver MAY keep its window
1472 * closed for an indefinite time.
1473 * A sender is doing zero window probing when the
1474 * receiver's advertised window is zero, and there is
1475 * only one data chunk in flight to the receiver.
1477 if (!q->asoc->peer.rwnd &&
1478 !list_empty(&tlist) &&
1479 (sack_ctsn+2 == q->asoc->next_tsn)) {
1480 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1481 "window probe: %u\n",
1482 __FUNCTION__, sack_ctsn);
1483 q->asoc->overall_error_count = 0;
1484 transport->error_count = 0;
1488 /* RFC 2960 6.3.2 Retransmission Timer Rules
1490 * R2) Whenever all outstanding data sent to an address have
1491 * been acknowledged, turn off the T3-rtx timer of that
1494 if (!transport->flight_size) {
1495 if (timer_pending(&transport->T3_rtx_timer) &&
1496 del_timer(&transport->T3_rtx_timer)) {
1497 sctp_transport_put(transport);
1499 } else if (restart_timer) {
1500 if (!mod_timer(&transport->T3_rtx_timer,
1501 jiffies + transport->rto))
1502 sctp_transport_hold(transport);
1506 list_splice(&tlist, transmitted_queue);
1509 /* Mark chunks as missing and consequently may get retransmitted. */
1510 static void sctp_mark_missing(struct sctp_outq *q,
1511 struct list_head *transmitted_queue,
1512 struct sctp_transport *transport,
1513 __u32 highest_new_tsn_in_sack,
1514 int count_of_newacks)
1516 struct sctp_chunk *chunk;
1517 struct list_head *pos;
1519 char do_fast_retransmit = 0;
1520 struct sctp_transport *primary = q->asoc->peer.primary_path;
1522 list_for_each(pos, transmitted_queue) {
1524 chunk = list_entry(pos, struct sctp_chunk, transmitted_list);
1525 tsn = ntohl(chunk->subh.data_hdr->tsn);
1527 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1528 * 'Unacknowledged TSN's', if the TSN number of an
1529 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1530 * value, increment the 'TSN.Missing.Report' count on that
1531 * chunk if it has NOT been fast retransmitted or marked for
1532 * fast retransmit already.
1534 if (!chunk->fast_retransmit &&
1535 !chunk->tsn_gap_acked &&
1536 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1538 /* SFR-CACC may require us to skip marking
1539 * this chunk as missing.
1541 if (!transport || !sctp_cacc_skip(primary, transport,
1542 count_of_newacks, tsn)) {
1543 chunk->tsn_missing_report++;
1546 "%s: TSN 0x%x missing counter: %d\n",
1548 chunk->tsn_missing_report);
1552 * M4) If any DATA chunk is found to have a
1553 * 'TSN.Missing.Report'
1554 * value larger than or equal to 4, mark that chunk for
1555 * retransmission and start the fast retransmit procedure.
1558 if (chunk->tsn_missing_report >= 4) {
1559 chunk->fast_retransmit = 1;
1560 do_fast_retransmit = 1;
1565 if (do_fast_retransmit)
1566 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1568 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1569 "ssthresh: %d, flight_size: %d, pba: %d\n",
1570 __FUNCTION__, transport, transport->cwnd,
1571 transport->ssthresh, transport->flight_size,
1572 transport->partial_bytes_acked);
1576 /* Is the given TSN acked by this packet? */
1577 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1580 sctp_sack_variable_t *frags;
1582 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1584 if (TSN_lte(tsn, ctsn))
1587 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1590 * These fields contain the Gap Ack Blocks. They are repeated
1591 * for each Gap Ack Block up to the number of Gap Ack Blocks
1592 * defined in the Number of Gap Ack Blocks field. All DATA
1593 * chunks with TSNs greater than or equal to (Cumulative TSN
1594 * Ack + Gap Ack Block Start) and less than or equal to
1595 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1596 * Block are assumed to have been received correctly.
1599 frags = sack->variable;
1601 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1602 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1603 TSN_lte(gap, ntohs(frags[i].gab.end)))
1612 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1613 int nskips, __u16 stream)
1617 for (i = 0; i < nskips; i++) {
1618 if (skiplist[i].stream == stream)
1624 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1625 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1627 struct sctp_association *asoc = q->asoc;
1628 struct sctp_chunk *ftsn_chunk = NULL;
1629 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1633 struct sctp_chunk *chunk;
1634 struct list_head *lchunk, *temp;
1636 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1639 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1640 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1642 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1643 asoc->adv_peer_ack_point = ctsn;
1645 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1646 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1647 * the chunk next in the out-queue space is marked as "abandoned" as
1648 * shown in the following example:
1650 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1651 * and the Advanced.Peer.Ack.Point is updated to this value:
1653 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1654 * normal SACK processing local advancement
1656 * Adv.Ack.Pt-> 102 acked 102 acked
1657 * 103 abandoned 103 abandoned
1658 * 104 abandoned Adv.Ack.P-> 104 abandoned
1660 * 106 acked 106 acked
1663 * In this example, the data sender successfully advanced the
1664 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1666 list_for_each_safe(lchunk, temp, &q->abandoned) {
1667 chunk = list_entry(lchunk, struct sctp_chunk,
1669 tsn = ntohl(chunk->subh.data_hdr->tsn);
1671 /* Remove any chunks in the abandoned queue that are acked by
1674 if (TSN_lte(tsn, ctsn)) {
1675 list_del_init(lchunk);
1676 if (!chunk->tsn_gap_acked) {
1677 chunk->transport->flight_size -=
1678 sctp_data_size(chunk);
1679 q->outstanding_bytes -= sctp_data_size(chunk);
1681 sctp_chunk_free(chunk);
1683 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1684 asoc->adv_peer_ack_point = tsn;
1685 if (chunk->chunk_hdr->flags &
1686 SCTP_DATA_UNORDERED)
1688 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1690 chunk->subh.data_hdr->stream);
1691 ftsn_skip_arr[skip_pos].stream =
1692 chunk->subh.data_hdr->stream;
1693 ftsn_skip_arr[skip_pos].ssn =
1694 chunk->subh.data_hdr->ssn;
1695 if (skip_pos == nskips)
1704 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1705 * is greater than the Cumulative TSN ACK carried in the received
1706 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1707 * chunk containing the latest value of the
1708 * "Advanced.Peer.Ack.Point".
1710 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1711 * list each stream and sequence number in the forwarded TSN. This
1712 * information will enable the receiver to easily find any
1713 * stranded TSN's waiting on stream reorder queues. Each stream
1714 * SHOULD only be reported once; this means that if multiple
1715 * abandoned messages occur in the same stream then only the
1716 * highest abandoned stream sequence number is reported. If the
1717 * total size of the FORWARD TSN does NOT fit in a single MTU then
1718 * the sender of the FORWARD TSN SHOULD lower the
1719 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1722 if (asoc->adv_peer_ack_point > ctsn)
1723 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1724 nskips, &ftsn_skip_arr[0]);
1727 __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
1728 SCTP_INC_STATS(SctpOutCtrlChunks);