/* Throw away unacknowledged chunks. */
list_for_each(pos, &q->asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
- while ((lchunk = sctp_list_dequeue(&transport->transmitted))) {
+ while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
chunk = list_entry(lchunk, struct sctp_chunk,
transmitted_list);
/* Mark as part of a failed message. */
}
/* Throw away any leftover data chunks. */
- while ((chunk = sctp_outq_dequeue_data(q))) {
+ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* Mark as send failure. */
sctp_chunk_fail(chunk, q->error);
q->error = 0;
/* Throw away any leftover control chunks. */
- while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)))
+ while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL)
sctp_chunk_free(chunk);
}
sctp_outq_tail_data(q, chunk);
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
- SCTP_INC_STATS(SctpOutUnorderChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
else
- SCTP_INC_STATS(SctpOutOrderChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
q->empty = 0;
break;
};
} else {
__skb_queue_tail(&q->control, (struct sk_buff *) chunk);
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
if (error < 0)
int rtx_timeout, int *start_timer)
{
struct list_head *lqueue;
- struct list_head *lchunk;
+ struct list_head *lchunk, *lchunk1;
struct sctp_transport *transport = pkt->transport;
sctp_xmit_t status;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk, *chunk1;
struct sctp_association *asoc;
int error = 0;
* the transmitted list.
*/
list_add_tail(lchunk, &transport->transmitted);
+
+ /* Mark the chunk as ineligible for fast retransmit
+ * after it is retransmitted.
+ */
+ chunk->fast_retransmit = 0;
+
*start_timer = 1;
q->empty = 0;
lchunk = sctp_list_dequeue(lqueue);
break;
};
+
+ /* If we are here due to a retransmit timeout or a fast
+ * retransmit and if there are any chunks left in the retransmit
+ * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
+ */
+ if (rtx_timeout && !lchunk) {
+ list_for_each(lchunk1, lqueue) {
+ chunk1 = list_entry(lchunk1, struct sctp_chunk,
+ transmitted_list);
+ chunk1->fast_retransmit = 0;
+ }
+ }
}
return error;
*/
queue = &q->control;
- while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) {
+ while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) {
/* Pick the right transport to use. */
new_transport = chunk->transport;
start_timer = 0;
queue = &q->out;
- while ((chunk = sctp_outq_dequeue_data(q))) {
+ while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
"%p skb->users %d.\n",
ntohl(chunk->subh.data_hdr->tsn),
- chunk->skb ?chunk->skb->head : 0,
+ chunk->skb ?chunk->skb->head : NULL,
chunk->skb ?
atomic_read(&chunk->skb->users) : -1);
if (ftsn_chunk) {
__skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
- SCTP_INC_STATS(SctpOutCtrlChunks);
+ SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
}
}