struct sock *sk = asoc->base.sk;
int amt = 0;
- amt = sk->sk_sndbuf - asoc->sndbuf_used;
+ if (asoc->ep->sndbuf_policy) {
+ /* make sure that no association uses more than sk_sndbuf */
+ amt = sk->sk_sndbuf - asoc->sndbuf_used;
+ } else {
+ /* do socket level accounting */
+ amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+ }
+
if (amt < 0)
amt = 0;
+
return amt;
}
/* The sndbuf space is tracked per association. */
sctp_association_hold(asoc);
+ skb_set_owner_w(chunk->skb, sk);
+
chunk->skb->destructor = sctp_wfree;
/* Save the chunk pointer in skb for sctp_wfree to use later. */
*((struct sctp_chunk **)(chunk->skb->cb)) = chunk;
- asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk);
- sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk);
+ asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) +
+ sizeof(struct sk_buff) +
+ sizeof(struct sctp_chunk);
+
+ sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk) +
+ sizeof(struct sk_buff) +
+ sizeof(struct sctp_chunk);
+
+ atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
}
/* Verify that this is a valid address. */
return -EINVAL;
/* Values correspoinding to the specific association */
- if (assocparams.sasoc_assoc_id != 0) {
+ if (asoc) {
assocparams.sasoc_asocmaxrxt = asoc->max_retrans;
assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
assocparams.sasoc_local_rwnd = asoc->a_rwnd;
chunk = *((struct sctp_chunk **)(skb->cb));
asoc = chunk->asoc;
sk = asoc->base.sk;
- asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk);
- sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk);
+ asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) +
+ sizeof(struct sk_buff) +
+ sizeof(struct sctp_chunk);
+
+ sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk) +
+ sizeof(struct sk_buff) +
+ sizeof(struct sctp_chunk);
+
+ atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
+
+ sock_wfree(skb);
__sctp_write_space(asoc);
sctp_association_put(asoc);
struct sctp_endpoint *newep = newsp->ep;
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
+ int flags = 0;
/* Migrate socket buffer sizes and all the socket level options to the
* new socket.
sctp_sk(newsk)->bind_hash = pp;
inet_sk(newsk)->num = inet_sk(oldsk)->num;
+ /* Copy the bind_addr list from the original endpoint to the new
+ * endpoint so that we can handle restarts properly
+ */
+ if (assoc->peer.ipv4_address)
+ flags |= SCTP_ADDR4_PEERSUPP;
+ if (assoc->peer.ipv6_address)
+ flags |= SCTP_ADDR6_PEERSUPP;
+ sctp_bind_addr_copy(&newsp->ep->base.bind_addr,
+ &oldsp->ep->base.bind_addr,
+ SCTP_SCOPE_GLOBAL, GFP_KERNEL, flags);
+
/* Move any messages in the old socket's receive queue that are for the
* peeled off association to the new socket's receive queue.
*/
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
- .slab_obj_size = sizeof(struct sctp_sock),
+ .obj_size = sizeof(struct sctp_sock),
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
.hash = sctp_hash,
.unhash = sctp_unhash,
.get_port = sctp_get_port,
- .slab_obj_size = sizeof(struct sctp6_sock),
+ .obj_size = sizeof(struct sctp6_sock),
};
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */