const union sctp_addr *peer,
struct sctp_transport **pt);
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
+
/* Calculate the SCTP checksum of an SCTP packet. */
static inline int sctp_rcv_checksum(struct sk_buff *skb)
return 0;
}
-/* The free routine for skbuffs that sctp receives */
-static void sctp_rfree(struct sk_buff *skb)
-{
- atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc);
- sock_rfree(skb);
-}
-
-/* The ownership wrapper routine to do receive buffer accounting */
-static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
- skb_set_owner_r(skb,sk);
- skb->destructor = sctp_rfree;
- atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc);
-}
+struct sctp_input_cb {
+ union {
+ struct inet_skb_parm h4;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
+ struct sctp_chunk *chunk;
+};
+#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
/*
* This is the routine which IP calls when receiving an SCTP packet.
union sctp_addr dest;
int family;
struct sctp_af *af;
- int ret = 0;
if (skb->pkt_type!=PACKET_HOST)
goto discard_it;
* IP broadcast addresses cannot be used in an SCTP transport
* address."
*/
- if (!af->addr_valid(&src, NULL) || !af->addr_valid(&dest, NULL))
+ if (!af->addr_valid(&src, NULL, skb) ||
+ !af->addr_valid(&dest, NULL, skb))
goto discard_it;
asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport);
*/
if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
{
- sock_put(sk);
if (asoc) {
sctp_association_put(asoc);
asoc = NULL;
sk = sctp_get_ctl_sock();
ep = sctp_sk(sk)->ep;
sctp_endpoint_hold(ep);
- sock_hold(sk);
rcvr = &ep->base;
}
- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
- goto discard_release;
-
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets.
* An SCTP packet is called an "out of the blue" (OOTB)
}
/* SCTP seems to always need a timestamp right now (FIXME) */
- if (skb->stamp.tv_sec == 0) {
- do_gettimeofday(&skb->stamp);
+ if (skb->tstamp.off_sec == 0) {
+ __net_timestamp(skb);
sock_enable_timestamp(sk);
}
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
+ nf_reset(skb);
- ret = sk_filter(sk, skb, 1);
- if (ret)
+ if (sk_filter(sk, skb, 1))
goto discard_release;
/* Create an SCTP packet structure. */
chunk = sctp_chunkify(skb, asoc, sk);
- if (!chunk) {
- ret = -ENOMEM;
+ if (!chunk)
goto discard_release;
- }
-
- sctp_rcv_set_owner_r(skb,sk);
+ SCTP_INPUT_CB(skb)->chunk = chunk;
/* Remember what endpoint is to handle this packet. */
chunk->rcvr = rcvr;
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- sk_add_backlog(sk, (struct sk_buff *) chunk);
+ sctp_add_backlog(sk, skb);
else
- sctp_backlog_rcv(sk, (struct sk_buff *) chunk);
+ sctp_inq_push(&chunk->rcvr->inqueue, chunk);
- /* Release the sock and any reference counts we took in the
- * lookup calls.
- */
sctp_bh_unlock_sock(sk);
+
+ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
sctp_endpoint_put(ep);
- sock_put(sk);
- return ret;
+
+ return 0;
discard_it:
kfree_skb(skb);
- return ret;
+ return 0;
discard_release:
- /* Release any structures we may be holding. */
- sock_put(sk);
+ /* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
sctp_association_put(asoc);
else
goto discard_it;
}
-/* Handle second half of inbound skb processing. If the sock was busy,
- * we may have need to delay processing until later when the sock is
- * released (on the backlog). If not busy, we call this routine
- * directly from the bottom half.
+/* Process the backlog queue of the socket. Every skb on
+ * the backlog holds a ref on an association or endpoint.
+ * We hold this ref throughout the state machine to make
+ * sure that the structure we need is still around.
*/
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct sctp_chunk *chunk;
- struct sctp_inq *inqueue;
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
+ struct sctp_ep_common *rcvr = NULL;
+ int backloged = 0;
+
+ rcvr = chunk->rcvr;
- /* One day chunk will live inside the skb, but for
- * now this works.
+ /* If the rcvr is dead then the association or endpoint
+ * has been deleted and we can safely drop the chunk
+ * and refs that we are holding.
*/
- chunk = (struct sctp_chunk *) skb;
- inqueue = &chunk->rcvr->inqueue;
+ if (rcvr->dead) {
+ sctp_chunk_free(chunk);
+ goto done;
+ }
+
+ if (unlikely(rcvr->sk != sk)) {
+ /* In this case, the association moved from one socket to
+ * another. We are currently sitting on the backlog of the
+ * old socket, so we need to move.
+ * However, since we are here in the process context we
+ * need to take make sure that the user doesn't own
+ * the new socket when we process the packet.
+ * If the new socket is user-owned, queue the chunk to the
+ * backlog of the new socket without dropping any refs.
+ * Otherwise, we can safely push the chunk on the inqueue.
+ */
+
+ sk = rcvr->sk;
+ sctp_bh_lock_sock(sk);
+
+ if (sock_owned_by_user(sk)) {
+ sk_add_backlog(sk, skb);
+ backloged = 1;
+ } else
+ sctp_inq_push(inqueue, chunk);
+
+ sctp_bh_unlock_sock(sk);
+
+ /* If the chunk was backloged again, don't drop refs */
+ if (backloged)
+ return 0;
+ } else {
+ sctp_inq_push(inqueue, chunk);
+ }
+
+done:
+ /* Release the refs we took in sctp_add_backlog */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_put(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_put(sctp_ep(rcvr));
+ else
+ BUG();
- sctp_inq_push(inqueue, chunk);
return 0;
}
+static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+ struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
+ struct sctp_ep_common *rcvr = chunk->rcvr;
+
+ /* Hold the assoc/ep while hanging on the backlog queue.
+ * This way, we know structures we need will not disappear from us
+ */
+ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
+ sctp_association_hold(sctp_assoc(rcvr));
+ else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
+ sctp_endpoint_hold(sctp_ep(rcvr));
+ else
+ BUG();
+
+ sk_add_backlog(sk, skb);
+}
+
/* Handle icmp frag needed error. */
void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
struct sctp_transport *t, __u32 pmtu)
{
- if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
- printk(KERN_WARNING "%s: Reported pmtu %d too low, "
- "using default minimum of %d\n", __FUNCTION__, pmtu,
- SCTP_DEFAULT_MINSEGMENT);
- pmtu = SCTP_DEFAULT_MINSEGMENT;
- }
+ if (sock_owned_by_user(sk) || !t || (t->pathmtu == pmtu))
+ return;
- if (!sock_owned_by_user(sk) && t && (t->pmtu != pmtu)) {
- t->pmtu = pmtu;
+ if (t->param_flags & SPP_PMTUD_ENABLE) {
+ if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
+ printk(KERN_WARNING "%s: Reported pmtu %d too low, "
+ "using default minimum of %d\n",
+ __FUNCTION__, pmtu,
+ SCTP_DEFAULT_MINSEGMENT);
+ /* Use default minimum segment size and disable
+ * pmtu discovery on this transport.
+ */
+ t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
+ t->param_flags = (t->param_flags & ~SPP_HB) |
+ SPP_PMTUD_DISABLE;
+ } else {
+ t->pathmtu = pmtu;
+ }
+
+ /* Update association pmtu. */
sctp_assoc_sync_pmtu(asoc);
- sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
+
+ /* Retransmit with the new pmtu setting.
+ * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
+ * Needed will never be sent, but if a message was sent before
+ * PMTU discovery was disabled that was larger than the PMTU, it
+ * would not be fragmented, so it must be re-transmitted fragmented.
+ */
+ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
/*
*
*/
void sctp_icmp_proto_unreachable(struct sock *sk,
- struct sctp_endpoint *ep,
struct sctp_association *asoc,
struct sctp_transport *t)
{
sctp_do_sm(SCTP_EVENT_T_OTHER,
SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
- asoc->state, asoc->ep, asoc, NULL,
+ asoc->state, asoc->ep, asoc, t,
GFP_ATOMIC);
}
/* Common lookup code for icmp/icmpv6 error handler. */
struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
struct sctphdr *sctphdr,
- struct sctp_endpoint **epp,
struct sctp_association **app,
struct sctp_transport **tpp)
{
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
- struct sctp_endpoint *ep = NULL;
- struct sctp_association *asoc = NULL;
+ struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
- *app = NULL; *epp = NULL; *tpp = NULL;
+ *app = NULL; *tpp = NULL;
af = sctp_get_af_specific(family);
if (unlikely(!af)) {
* packet.
*/
asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
- if (!asoc) {
- /* If there is no matching association, see if it matches any
- * endpoint. This may happen for an ICMP error generated in
- * response to an INIT_ACK.
- */
- ep = __sctp_rcv_lookup_endpoint(&daddr);
- if (!ep) {
- return NULL;
- }
- }
+ if (!asoc)
+ return NULL;
- if (asoc) {
- sk = asoc->base.sk;
+ sk = asoc->base.sk;
- if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
- ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
- goto out;
- }
- } else
- sk = ep->base.sk;
+ if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
+ ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
+ goto out;
+ }
sctp_bh_lock_sock(sk);
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
- *epp = ep;
*app = asoc;
*tpp = transport;
return sk;
out:
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
- if (ep)
- sctp_endpoint_put(ep);
return NULL;
}
/* Common cleanup code for icmp/icmpv6 error handler. */
-void sctp_err_finish(struct sock *sk, struct sctp_endpoint *ep,
- struct sctp_association *asoc)
+void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
sctp_bh_unlock_sock(sk);
- sock_put(sk);
if (asoc)
sctp_association_put(asoc);
- if (ep)
- sctp_endpoint_put(ep);
}
/*
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
struct sock *sk;
- struct sctp_endpoint *ep;
- struct sctp_association *asoc;
+ struct sctp_association *asoc = NULL;
struct sctp_transport *transport;
struct inet_sock *inet;
char *saveip, *savesctp;
savesctp = skb->h.raw;
skb->nh.iph = iph;
skb->h.raw = (char *)sh;
- sk = sctp_err_lookup(AF_INET, skb, sh, &ep, &asoc, &transport);
+ sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport);
/* Put back, the original pointers. */
skb->nh.raw = saveip;
skb->h.raw = savesctp;
}
else {
if (ICMP_PROT_UNREACH == code) {
- sctp_icmp_proto_unreachable(sk, ep, asoc,
+ sctp_icmp_proto_unreachable(sk, asoc,
transport);
goto out_unlock;
}
}
out_unlock:
- sctp_err_finish(sk, ep, asoc);
+ sctp_err_finish(sk, asoc);
}
/*
sctp_errhdr_t *err;
ch = (sctp_chunkhdr_t *) skb->data;
- ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
/* Scan through all the chunks in the packet. */
- while (ch_end > (__u8 *)ch && ch_end < skb->tail) {
+ do {
+ /* Break out if chunk length is less then minimal. */
+ if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
+ break;
+
+ ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+ if (ch_end > skb->tail)
+ break;
/* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
* receiver MUST silently discard the OOTB packet and take no
}
ch = (sctp_chunkhdr_t *) ch_end;
- ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length));
- }
+ } while (ch_end < skb->tail);
return 0;
hit:
sctp_endpoint_hold(ep);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return ep;
}
hit:
*pt = transport;
sctp_association_hold(asoc);
- sock_hold(epb->sk);
read_unlock(&head->lock);
return asoc;
}
struct sctp_transport *transport;
if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) {
- sock_put(asoc->base.sk);
sctp_association_put(asoc);
return 1;
}