fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / net / sctp / outqueue.c
index 02272a7..fba567a 100644 (file)
@@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
 static inline void sctp_outq_head_data(struct sctp_outq *q,
                                        struct sctp_chunk *ch)
 {
-       __skb_queue_head(&q->out, (struct sk_buff *)ch);
+       list_add(&ch->list, &q->out_chunk_list);
        q->out_qlen += ch->skb->len;
        return;
 }
@@ -83,31 +83,26 @@ static inline void sctp_outq_head_data(struct sctp_outq *q,
 /* Take data from the front of the queue. */
 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
 {
-       struct sctp_chunk *ch;
-       ch = (struct sctp_chunk *)__skb_dequeue(&q->out);
-       if (ch)
+       struct sctp_chunk *ch = NULL;
+
+       if (!list_empty(&q->out_chunk_list)) {
+               struct list_head *entry = q->out_chunk_list.next;
+
+               ch = list_entry(entry, struct sctp_chunk, list);
+               list_del_init(entry);
                q->out_qlen -= ch->skb->len;
+       }
        return ch;
 }
 /* Add data chunk to the end of the queue. */
 static inline void sctp_outq_tail_data(struct sctp_outq *q,
                                       struct sctp_chunk *ch)
 {
-       __skb_queue_tail(&q->out, (struct sk_buff *)ch);
+       list_add_tail(&ch->list, &q->out_chunk_list);
        q->out_qlen += ch->skb->len;
        return;
 }
 
-/* Insert a chunk behind chunk 'pos'. */
-static inline void sctp_outq_insert_data(struct sctp_outq *q,
-                                        struct sctp_chunk *ch,
-                                        struct sctp_chunk *pos)
-{
-       __skb_insert((struct sk_buff *)ch, (struct sk_buff *)pos->prev,
-                    (struct sk_buff *)pos, pos->list);
-       q->out_qlen += ch->skb->len;
-}
-
 /*
  * SFR-CACC algorithm:
  * D) If count_of_newacks is greater than or equal to 2
@@ -200,19 +195,6 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary,
        return 0;
 }
 
-/* Generate a new outqueue.  */
-struct sctp_outq *sctp_outq_new(struct sctp_association *asoc)
-{
-       struct sctp_outq *q;
-
-       q = t_new(struct sctp_outq, GFP_KERNEL);
-       if (q) {
-               sctp_outq_init(asoc, q);
-               q->malloced = 1;
-       }
-       return q;
-}
-
 /* Initialize an existing sctp_outq.  This does the boring stuff.
  * You still need to define handlers if you really want to DO
  * something with this structure...
@@ -220,8 +202,8 @@ struct sctp_outq *sctp_outq_new(struct sctp_association *asoc)
 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 {
        q->asoc = asoc;
-       skb_queue_head_init(&q->out);
-       skb_queue_head_init(&q->control);
+       INIT_LIST_HEAD(&q->out_chunk_list);
+       INIT_LIST_HEAD(&q->control_chunk_list);
        INIT_LIST_HEAD(&q->retransmit);
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
@@ -240,12 +222,12 @@ void sctp_outq_teardown(struct sctp_outq *q)
 {
        struct sctp_transport *transport;
        struct list_head *lchunk, *pos, *temp;
-       struct sctp_chunk *chunk;
+       struct sctp_chunk *chunk, *tmp;
 
        /* Throw away unacknowledged chunks. */
        list_for_each(pos, &q->asoc->peer.transport_addr_list) {
                transport = list_entry(pos, struct sctp_transport, transports);
-               while ((lchunk = sctp_list_dequeue(&transport->transmitted))) {
+               while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
                        chunk = list_entry(lchunk, struct sctp_chunk,
                                           transmitted_list);
                        /* Mark as part of a failed message. */
@@ -282,7 +264,7 @@ void sctp_outq_teardown(struct sctp_outq *q)
        }
 
        /* Throw away any leftover data chunks. */
-       while ((chunk = sctp_outq_dequeue_data(q))) {
+       while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
 
                /* Mark as send failure. */
                sctp_chunk_fail(chunk, q->error);
@@ -292,8 +274,10 @@ void sctp_outq_teardown(struct sctp_outq *q)
        q->error = 0;
 
        /* Throw away any leftover control chunks. */
-       while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)))
+       list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+               list_del_init(&chunk->list);
                sctp_chunk_free(chunk);
+       }
 }
 
 /* Free the outqueue structure and any related pending chunks.  */
@@ -349,15 +333,15 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 
                        sctp_outq_tail_data(q, chunk);
                        if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
-                               SCTP_INC_STATS(SctpOutUnorderChunks);
+                               SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
                        else
-                               SCTP_INC_STATS(SctpOutOrderChunks);
+                               SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
                        q->empty = 0;
                        break;
                };
        } else {
-               __skb_queue_tail(&q->control, (struct sk_buff *) chunk);
-               SCTP_INC_STATS(SctpOutCtrlChunks);
+               list_add_tail(&chunk->list, &q->control_chunk_list);
+               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
        }
 
        if (error < 0)
@@ -372,7 +356,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
 /* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
  * and the abandoned list are in ascending order.
  */
-void sctp_insert_list(struct list_head *head, struct list_head *new)
+static void sctp_insert_list(struct list_head *head, struct list_head *new)
 {
        struct list_head *pos;
        struct sctp_chunk *nchunk, *lchunk;
@@ -422,7 +406,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                 * chunks that are not yet acked should be added to the
                 * retransmit queue.
                 */
-               if ((fast_retransmit && chunk->fast_retransmit) ||
+               if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
                   (!fast_retransmit && !chunk->tsn_gap_acked)) {
                        /* RFC 2960 6.2.1 Processing a Received SACK
                         *
@@ -432,7 +416,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += sctp_data_size(chunk);
+                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
+                                               sizeof(struct sk_buff));
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        transport->flight_size -= sctp_data_size(chunk);
 
@@ -483,6 +468,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
 
        switch(reason) {
        case SCTP_RTXR_T3_RTX:
+               SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
                /* Update the retran path if the T3-rtx timer has expired for
                 * the current retran path.
@@ -491,12 +477,15 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
                        sctp_assoc_update_retran_path(transport->asoc);
                break;
        case SCTP_RTXR_FAST_RTX:
+               SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
                fast_retransmit = 1;
                break;
        case SCTP_RTXR_PMTUD:
-       default:
+               SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
                break;
+       default:
+               BUG();
        }
 
        sctp_retransmit_mark(q, transport, fast_retransmit);
@@ -525,10 +514,10 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                               int rtx_timeout, int *start_timer)
 {
        struct list_head *lqueue;
-       struct list_head *lchunk;
+       struct list_head *lchunk, *lchunk1;
        struct sctp_transport *transport = pkt->transport;
        sctp_xmit_t status;
-       struct sctp_chunk *chunk;
+       struct sctp_chunk *chunk, *chunk1;
        struct sctp_association *asoc;
        int error = 0;
 
@@ -615,6 +604,13 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                         * the transmitted list.
                         */
                        list_add_tail(lchunk, &transport->transmitted);
+
+                       /* Mark the chunk as ineligible for fast retransmit 
+                        * after it is retransmitted.
+                        */
+                       if (chunk->fast_retransmit > 0)
+                               chunk->fast_retransmit = -1;
+
                        *start_timer = 1;
                        q->empty = 0;
 
@@ -622,6 +618,19 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
                        lchunk = sctp_list_dequeue(lqueue);
                        break;
                };
+
+               /* If we are here due to a retransmit timeout or a fast
+                * retransmit and if there are any chunks left in the retransmit
+                * queue that could not fit in the PMTU sized packet, they need                  * to be marked as ineligible for a subsequent fast retransmit.
+                */
+               if (rtx_timeout && !lchunk) {
+                       list_for_each(lchunk1, lqueue) {
+                               chunk1 = list_entry(lchunk1, struct sctp_chunk,
+                                                   transmitted_list);
+                               if (chunk1->fast_retransmit > 0)
+                                       chunk1->fast_retransmit = -1;
+                       }
+               }
        }
 
        return error;
@@ -655,10 +664,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
        __u16 sport = asoc->base.bind_addr.port;
        __u16 dport = asoc->peer.port;
        __u32 vtag = asoc->peer.i.init_tag;
-       struct sk_buff_head *queue;
        struct sctp_transport *transport = NULL;
        struct sctp_transport *new_transport;
-       struct sctp_chunk *chunk;
+       struct sctp_chunk *chunk, *tmp;
        sctp_xmit_t status;
        int error = 0;
        int start_timer = 0;
@@ -680,18 +688,29 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
         *   ...
         */
 
-       queue = &q->control;
-       while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) {
+       list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
+               list_del_init(&chunk->list);
+
                /* Pick the right transport to use. */
                new_transport = chunk->transport;
 
                if (!new_transport) {
                        new_transport = asoc->peer.active_path;
-               } else if (!new_transport->active) {
-                       /* If the chunk is Heartbeat, send it to
-                        * chunk->transport, even it's inactive.
+               } else if ((new_transport->state == SCTP_INACTIVE) ||
+                          (new_transport->state == SCTP_UNCONFIRMED)) {
+                       /* If the chunk is Heartbeat or Heartbeat Ack,
+                        * send it to chunk->transport, even if it's
+                        * inactive.
+                        *
+                        * 3.3.6 Heartbeat Acknowledgement:
+                        * ...  
+                        * A HEARTBEAT ACK is always sent to the source IP
+                        * address of the IP datagram containing the
+                        * HEARTBEAT chunk to which this ack is responding.
+                        * ...  
                         */
-                       if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT)
+                       if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
+                           chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK)
                                new_transport = asoc->peer.active_path;
                }
 
@@ -810,9 +829,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
 
                /* Finally, transmit new packets.  */
                start_timer = 0;
-               queue = &q->out;
-
-               while ((chunk = sctp_outq_dequeue_data(q))) {
+               while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
                        /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
                         * stream identifier.
                         */
@@ -836,7 +853,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
                         * Otherwise, we want to use the active path.
                         */
                        new_transport = chunk->transport;
-                       if (!new_transport || !new_transport->active)
+                       if (!new_transport ||
+                           ((new_transport->state == SCTP_INACTIVE) ||
+                            (new_transport->state == SCTP_UNCONFIRMED)))
                                new_transport = asoc->peer.active_path;
 
                        /* Change packets if necessary.  */
@@ -866,7 +885,7 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
                        SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
                                        "%p skb->users %d.\n",
                                        ntohl(chunk->subh.data_hdr->tsn),
-                                       chunk->skb ?chunk->skb->head : 0,
+                                       chunk->skb ?chunk->skb->head : NULL,
                                        chunk->skb ?
                                        atomic_read(&chunk->skb->users) : -1);
 
@@ -1046,7 +1065,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
         * A) Initialize the cacc_saw_newack to 0 for all destination
         * addresses.
         */
-       if (sack->num_gap_ack_blocks > 0 &&
+       if (sack->num_gap_ack_blocks &&
            primary->cacc.changeover_active) {
                list_for_each(pos, transport_list) {
                        transport = list_entry(pos, struct sctp_transport,
@@ -1144,8 +1163,9 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
        /* See if all chunks are acked.
         * Make sure the empty queue handler will get run later.
         */
-       q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) &&
-                       list_empty(&q->retransmit);
+       q->empty = (list_empty(&q->out_chunk_list) &&
+                   list_empty(&q->control_chunk_list) &&
+                   list_empty(&q->retransmit));
        if (!q->empty)
                goto finish;
 
@@ -1249,6 +1269,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                                if (!tchunk->tsn_gap_acked &&
                                    !tchunk->resent &&
                                    tchunk->rtt_in_progress) {
+                                       tchunk->rtt_in_progress = 0;
                                        rtt = jiffies - tchunk->sent_at;
                                        sctp_transport_update_rto(transport,
                                                                  rtt);
@@ -1450,7 +1471,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
                        /* Mark the destination transport address as
                         * active if it is not so marked.
                         */
-                       if (!transport->active) {
+                       if ((transport->state == SCTP_INACTIVE) ||
+                           (transport->state == SCTP_UNCONFIRMED)) {
                                sctp_assoc_control_transport(
                                        transport->asoc,
                                        transport,
@@ -1551,11 +1573,11 @@ static void sctp_mark_missing(struct sctp_outq *q,
                /*
                 * M4) If any DATA chunk is found to have a
                 * 'TSN.Missing.Report'
-                * value larger than or equal to 4, mark that chunk for
+                * value larger than or equal to 3, mark that chunk for
                 * retransmission and start the fast retransmit procedure.
                 */
 
-               if (chunk->tsn_missing_report >= 4) {
+               if (chunk->tsn_missing_report >= 3) {
                        chunk->fast_retransmit = 1;
                        do_fast_retransmit = 1;
                }
@@ -1610,7 +1632,7 @@ pass:
 }
 
 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
-                                   int nskips, __u16 stream)
+                                   int nskips, __be16 stream)
 {
        int i;
 
@@ -1674,9 +1696,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
                if (TSN_lte(tsn, ctsn)) {
                        list_del_init(lchunk);
                        if (!chunk->tsn_gap_acked) {
-                       chunk->transport->flight_size -=
-                                                sctp_data_size(chunk);
-                       q->outstanding_bytes -= sctp_data_size(chunk);
+                               chunk->transport->flight_size -=
+                                       sctp_data_size(chunk);
+                               q->outstanding_bytes -= sctp_data_size(chunk);
                        }
                        sctp_chunk_free(chunk);
                } else {
@@ -1724,7 +1746,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
                                              nskips, &ftsn_skip_arr[0]); 
 
        if (ftsn_chunk) {
-               __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk);
-               SCTP_INC_STATS(SctpOutCtrlChunks);
+               list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
+               SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
        }
 }