packet->transport = transport;
packet->source_port = sport;
packet->destination_port = dport;
- skb_queue_head_init(&packet->chunks);
+ INIT_LIST_HEAD(&packet->chunk_list);
if (asoc) {
- struct sctp_opt *sp = sctp_sk(asoc->base.sk);
+ struct sctp_sock *sp = sctp_sk(asoc->base.sk);
overhead = sp->pf->af->net_header_len;
} else {
overhead = sizeof(struct ipv6hdr);
/* Free a packet. */
void sctp_packet_free(struct sctp_packet *packet)
{
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk, *tmp;
SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
- while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)))
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
sctp_chunk_free(chunk);
+ }
if (packet->malloced)
kfree(packet);
goto finish;
pmtu = ((packet->transport->asoc) ?
- (packet->transport->asoc->pmtu) :
- (packet->transport->pmtu));
+ (packet->transport->asoc->pathmtu) :
+ (packet->transport->pathmtu));
too_big = (psize + chunk_len > pmtu);
packet->has_sack = 1;
/* It is OK to send this chunk. */
- __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk);
+ list_add_tail(&chunk->list, &packet->chunk_list);
packet->size += chunk_len;
chunk->transport = packet->transport;
finish:
struct sctp_transport *tp = packet->transport;
struct sctp_association *asoc = tp->asoc;
struct sctphdr *sh;
- __u32 crc32;
+ __u32 crc32 = 0;
struct sk_buff *nskb;
- struct sctp_chunk *chunk;
+ struct sctp_chunk *chunk, *tmp;
struct sock *sk;
int err = 0;
int padding; /* How much padding do we need? */
__u8 has_data = 0;
- struct dst_entry *dst;
+ struct dst_entry *dst = tp->dst;
SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet);
/* Do NOT generate a chunkless packet. */
- chunk = (struct sctp_chunk *)skb_peek(&packet->chunks);
- if (unlikely(!chunk))
+ if (list_empty(&packet->chunk_list))
return err;
/* Set up convenience variables... */
+ chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
sk = chunk->skb->sk;
/* Allocate the new skb. */
- nskb = dev_alloc_skb(packet->size);
+ nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
if (!nskb)
goto nomem;
/* Make sure the outbound skb has enough header room reserved. */
- skb_reserve(nskb, packet->overhead);
+ skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
/* Set the owning socket so that we know where to get the
* destination IP address.
*/
skb_set_owner_w(nskb, sk);
+ /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
+ if (!dst || (dst->obsolete > 1)) {
+ dst_release(dst);
+ sctp_transport_route(tp, NULL, sctp_sk(sk));
+ if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
+ sctp_assoc_sync_pmtu(asoc);
+ }
+ }
+ nskb->dst = dst_clone(tp->dst);
+ if (!nskb->dst)
+ goto no_route;
+ dst = nskb->dst;
+
/* Build the SCTP header. */
sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
sh->source = htons(packet->source_port);
* Note: Adler-32 is no longer applicable, as has been replaced
* by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
*/
- crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr));
+ if (!(dst->dev->features & NETIF_F_NO_CSUM))
+ crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr));
/**
* 6.10 Bundling
* [This whole comment explains WORD_ROUND() below.]
*/
SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
- while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
if (sctp_chunk_is_data(chunk)) {
if (!chunk->has_tsn) {
if (padding)
memset(skb_put(chunk->skb, padding), 0, padding);
- crc32 = sctp_update_copy_cksum(skb_put(nskb, chunk->skb->len),
- chunk->skb->data,
- chunk->skb->len, crc32);
+ if (dst->dev->features & NETIF_F_NO_CSUM)
+ memcpy(skb_put(nskb, chunk->skb->len),
+ chunk->skb->data, chunk->skb->len);
+ else
+ crc32 = sctp_update_copy_cksum(skb_put(nskb,
+ chunk->skb->len),
+ chunk->skb->data,
+ chunk->skb->len, crc32);
SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n",
"*** Chunk", chunk,
}
/* Perform final transformation on checksum. */
- crc32 = sctp_end_cksum(crc32);
+ if (!(dst->dev->features & NETIF_F_NO_CSUM))
+ crc32 = sctp_end_cksum(crc32);
/* 3) Put the resultant value into the checksum field in the
* common header, and leave the rest of the bits unchanged.
}
}
- dst = tp->dst;
- /* The 'obsolete' field of dst is set to 2 when a dst is freed. */
- if (!dst || (dst->obsolete > 1)) {
- dst_release(dst);
- sctp_transport_route(tp, NULL, sctp_sk(sk));
- sctp_assoc_sync_pmtu(asoc);
- }
-
- nskb->dst = dst_clone(tp->dst);
- if (!nskb->dst)
- goto no_route;
-
SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
nskb->len);
- (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok);
+ if (tp->param_flags & SPP_PMTUD_ENABLE)
+ (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok);
+ else
+ (*tp->af_specific->sctp_xmit)(nskb, tp, 1);
out:
packet->size = packet->overhead;
return err;
no_route:
kfree_skb(nskb);
- IP_INC_STATS_BH(IpOutNoRoutes);
+ IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
* will get resent or dropped later.
*/
- while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks))) {
+ list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
+ list_del_init(&chunk->list);
if (!sctp_chunk_is_data(chunk))
sctp_chunk_free(chunk);
}
struct sctp_transport *transport = packet->transport;
__u32 max_burst_bytes;
struct sctp_association *asoc = transport->asoc;
- struct sctp_opt *sp = sctp_sk(asoc->base.sk);
+ struct sctp_sock *sp = sctp_sk(asoc->base.sk);
struct sctp_outq *q = &asoc->outqueue;
/* RFC 2960 6.1 Transmission of DATA Chunks
* if ((flightsize + Max.Burst * MTU) < cwnd)
* cwnd = flightsize + Max.Burst * MTU
*/
- max_burst_bytes = asoc->max_burst * asoc->pmtu;
+ max_burst_bytes = asoc->max_burst * asoc->pathmtu;
if ((transport->flight_size + max_burst_bytes) < transport->cwnd) {
transport->cwnd = transport->flight_size + max_burst_bytes;
SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
* When a Fast Retransmit is being performed the sender SHOULD
* ignore the value of cwnd and SHOULD NOT delay retransmission.
*/
- if (!chunk->fast_retransmit)
+ if (chunk->fast_retransmit <= 0)
if (transport->flight_size >= transport->cwnd) {
retval = SCTP_XMIT_RWND_FULL;
goto finish;
* data will fit or delay in hopes of bundling a full
* sized packet.
*/
- if (len < asoc->pmtu - packet->overhead) {
+ if (len < asoc->frag_point) {
retval = SCTP_XMIT_NAGLE_DELAY;
goto finish;
}
/* Keep track of how many bytes are in flight to the receiver. */
asoc->outqueue.outstanding_bytes += datasize;
- /* Update our view of the receiver's rwnd. */
+ /* Update our view of the receiver's rwnd. Include sk_buff overhead
+ * while updating peer.rwnd so that it reduces the chances of a
+ * receiver running out of receive buffer space even when receive
+ * window is still open. This can happen when a sender is sending
+ * sending small messages.
+ */
+ datasize += sizeof(struct sk_buff);
if (datasize < rwnd)
rwnd -= datasize;
else