X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fsctp%2Fsm_statefuns.c;h=4eaa1c1929f39e68d96220d4a6f995900d2a3d7f;hb=90ad8654ffeb336af8c878fdf7bc72e2ac72467a;hp=addb95d790cdc3fcf9b3d2744ea128e711b0ad24;hpb=a91482bdcc2e0f6035702e46f1b99043a0893346;p=linux-2.6.git diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index addb95d79..4eaa1c192 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -148,8 +148,8 @@ sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpShutdowns); + SCTP_DEC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); @@ -245,7 +245,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); return SCTP_DISPOSITION_CONSUME; } else { return SCTP_DISPOSITION_NOMEM; @@ -404,7 +404,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + SCTP_INC_STATS(SctpAborteds); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); return SCTP_DISPOSITION_DELETE_TCB; } @@ -415,7 +415,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, &err_chunk)) { - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + SCTP_INC_STATS(SctpAborteds); /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes if there is any. @@ -432,7 +432,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, @@ -472,6 +472,8 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET, + SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR)); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -585,8 +587,8 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); - SCTP_INC_STATS(SCTP_MIB_CURRESTAB); - SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS); + SCTP_INC_STATS(SctpCurrEstab); + SCTP_INC_STATS(SctpPassiveEstabs); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (new_asoc->autoclose) @@ -672,15 +674,6 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(ep, asoc, type, arg, commands); - /* Reset init error count upon receipt of COOKIE-ACK, - * to avoid problems with the managemement of this - * counter in stale cookie situations when a transition back - * from the COOKIE-ECHOED state to the COOKIE-WAIT - * state is performed. - */ - sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET, - SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR)); - /* RFC 2960 5.1 Normal Establishment of an Association * * E) Upon reception of the COOKIE ACK, endpoint "A" will move @@ -691,8 +684,8 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); - SCTP_INC_STATS(SCTP_MIB_CURRESTAB); - SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS); + SCTP_INC_STATS(SctpCurrEstab); + SCTP_INC_STATS(SctpActiveEstabs); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); if (asoc->autoclose) sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, @@ -764,8 +757,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -967,7 +960,7 @@ static int sctp_sf_send_restart_abort(union sctp_addr *ssa, goto out; sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); /* Discard the rest of the inbound packet. */ sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); @@ -1170,7 +1163,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( if (packet) { sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); retval = SCTP_DISPOSITION_CONSUME; } else { retval = SCTP_DISPOSITION_NOMEM; @@ -1508,7 +1501,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); - SCTP_INC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); repl = sctp_make_cookie_ack(new_asoc, chunk); @@ -1592,7 +1585,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_ESTABLISHED)); - SCTP_INC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); @@ -1879,6 +1872,8 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, time_t stale; sctp_cookie_preserve_param_t bht; sctp_errhdr_t *err; + struct list_head *pos; + struct sctp_transport *t; struct sctp_chunk *reply; struct sctp_bind_addr *bp; int attempts; @@ -1925,27 +1920,20 @@ sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, /* Clear peer's init_tag cached in assoc as we are sending a new INIT */ sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL()); - /* Stop pending T3-rtx and heartbeat timers */ - sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); - - /* Delete non-primary peer ip addresses since we are transitioning - * back to the COOKIE-WAIT state - */ - sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); - - /* If we've sent any data bundled with COOKIE-ECHO we will need to - * resend - */ - sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, - SCTP_TRANSPORT(asoc->peer.primary_path)); - /* Cast away the const modifier, as we want to just * rerun it through as a sideffect. */ sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_INC, SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR)); + /* If we've sent any data bundled with COOKIE-ECHO we need to + * resend. + */ + list_for_each(pos, &asoc->peer.transport_addr_list) { + t = list_entry(pos, struct sctp_transport, transports); + sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(t)); + } + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, @@ -2013,8 +2001,8 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, /* ASSOC_FAILED will DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_ABORT; } @@ -2039,7 +2027,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + SCTP_INC_STATS(SctpAborteds); sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); @@ -2333,7 +2321,12 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - int error; + sctp_datahdr_t *data_hdr; + struct sctp_chunk *err; + size_t datalen; + sctp_verb_t deliver; + int tmp; + __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, @@ -2341,22 +2334,158 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } - error = sctp_eat_data(asoc, chunk, commands ); - switch (error) { - case SCTP_IERROR_NO_ERROR: - break; - case SCTP_IERROR_HIGH_TSN: - case SCTP_IERROR_BAD_STREAM: + data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); + + tsn = ntohl(data_hdr->tsn); + SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); + + /* ASSERT: Now skb->data is really the user data. */ + + /* Process ECN based congestion. + * + * Since the chunk structure is reused for all chunks within + * a packet, we use ecn_ce_done to track if we've already + * done CE processing for this packet. + * + * We need to do ECN processing even if we plan to discard the + * chunk later. + */ + + if (!chunk->ecn_ce_done) { + struct sctp_af *af; + chunk->ecn_ce_done = 1; + + af = sctp_get_af_specific( + ipver2af(chunk->skb->nh.iph->version)); + + if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { + /* Do real work as sideffect. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, + SCTP_U32(tsn)); + } + } + + tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); + if (tmp < 0) { + /* The TSN is too high--silently discard the chunk and + * count on it getting retransmitted later. + */ goto discard_noforce; - case SCTP_IERROR_DUP_TSN: - case SCTP_IERROR_IGNORE_TSN: + } else if (tmp > 0) { + /* This is a duplicate. Record it. */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); goto discard_force; - case SCTP_IERROR_NO_DATA: - goto consume; - default: - BUG(); } + /* This is a new TSN. */ + + /* Discard if there is no room in the receive window. + * Actually, allow a little bit of overflow (up to a MTU). + */ + datalen = ntohs(chunk->chunk_hdr->length); + datalen -= sizeof(sctp_data_chunk_t); + + deliver = SCTP_CMD_CHUNK_ULP; + + /* Think about partial delivery. */ + if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { + + /* Even if we don't accept this chunk there is + * memory pressure. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); + } + + /* Spill over rwnd a little bit. Note: While allowed, this spill over + * seems a bit troublesome in that frag_point varies based on + * PMTU. In cases, such as loopback, this might be a rather + * large spill over. + */ + if (!asoc->rwnd || asoc->rwnd_over || + (datalen > asoc->rwnd + asoc->frag_point)) { + + /* If this is the next TSN, consider reneging to make + * room. Note: Playing nice with a confused sender. A + * malicious sender can still eat up all our buffer + * space and in the future we may want to detect and + * do more drastic reneging. + */ + if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && + (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { + SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); + deliver = SCTP_CMD_RENEGE; + } else { + SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, " + "rwnd: %d\n", tsn, datalen, + asoc->rwnd); + goto discard_force; + } + } + + /* + * Section 3.3.10.9 No User Data (9) + * + * Cause of error + * --------------- + * No User Data: This error cause is returned to the originator of a + * DATA chunk if a received DATA chunk has no user data. + */ + if (unlikely(0 == datalen)) { + err = sctp_make_abort_no_data(asoc, chunk, tsn); + if (err) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + } + /* We are going to ABORT, so we might as well stop + * processing the rest of the chunks in the packet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_U32(SCTP_ERROR_NO_DATA)); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); + return SCTP_DISPOSITION_CONSUME; + } + + /* If definately accepting the DATA chunk, record its TSN, otherwise + * wait for renege processing. + */ + if (SCTP_CMD_CHUNK_ULP == deliver) + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + + /* Note: Some chunks may get overcounted (if we drop) or overcounted + * if we renege and the chunk arrives again. + */ + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(SctpInUnorderChunks); + else + SCTP_INC_STATS(SctpInOrderChunks); + + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number + * + * If an endpoint receive a DATA chunk with an invalid stream + * identifier, it shall acknowledge the reception of the DATA chunk + * following the normal procedure, immediately send an ERROR chunk + * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) + * and discard the DATA chunk. + */ + if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { + err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, + &data_hdr->stream, + sizeof(data_hdr->stream)); + if (err) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + goto discard_noforce; + } + + /* Send the data up to the user. Note: Schedule the + * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK + * chunk needs the updated rwnd. + */ + sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); + if (asoc->autoclose) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); @@ -2422,9 +2551,6 @@ discard_noforce: SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); } return SCTP_DISPOSITION_DISCARD; -consume: - return SCTP_DISPOSITION_CONSUME; - } /* @@ -2450,7 +2576,11 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - int error; + sctp_datahdr_t *data_hdr; + struct sctp_chunk *err; + size_t datalen; + int tmp; + __u32 tsn; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, @@ -2458,22 +2588,110 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, return sctp_sf_pdiscard(ep, asoc, type, arg, commands); } - error = sctp_eat_data(asoc, chunk, commands ); - switch (error) { - case SCTP_IERROR_NO_ERROR: - case SCTP_IERROR_HIGH_TSN: - case SCTP_IERROR_DUP_TSN: - case SCTP_IERROR_IGNORE_TSN: - case SCTP_IERROR_BAD_STREAM: - break; - case SCTP_IERROR_NO_DATA: - goto consume; - default: - BUG(); + data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *) chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); + + tsn = ntohl(data_hdr->tsn); + + SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); + + /* ASSERT: Now skb->data is really the user data. */ + + /* Process ECN based congestion. + * + * Since the chunk structure is reused for all chunks within + * a packet, we use ecn_ce_done to track if we've already + * done CE processing for this packet. + * + * We need to do ECN processing even if we plan to discard the + * chunk later. + */ + if (!chunk->ecn_ce_done) { + struct sctp_af *af; + chunk->ecn_ce_done = 1; + + af = sctp_get_af_specific( + ipver2af(chunk->skb->nh.iph->version)); + + if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { + /* Do real work as sideffect. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, + SCTP_U32(tsn)); + } } - /* Go a head and force a SACK, since we are shutting down. */ + tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); + if (tmp < 0) { + /* The TSN is too high--silently discard the chunk and + * count on it getting retransmitted later. + */ + goto gen_shutdown; + } else if (tmp > 0) { + /* This is a duplicate. Record it. */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); + goto gen_shutdown; + } + + /* This is a new TSN. */ + + datalen = ntohs(chunk->chunk_hdr->length); + datalen -= sizeof(sctp_data_chunk_t); + + /* + * Section 3.3.10.9 No User Data (9) + * + * Cause of error + * --------------- + * No User Data: This error cause is returned to the originator of a + * DATA chunk if a received DATA chunk has no user data. + */ + if (unlikely(0 == datalen)) { + err = sctp_make_abort_no_data(asoc, chunk, tsn); + if (err) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + } + /* We are going to ABORT, so we might as well stop + * processing the rest of the chunks in the packet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_U32(SCTP_ERROR_NO_DATA)); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); + return SCTP_DISPOSITION_CONSUME; + } + + /* We are accepting this DATA chunk. */ + + /* Record the fact that we have received this TSN. */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(SctpInUnorderChunks); + else + SCTP_INC_STATS(SctpInOrderChunks); + + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number + * + * If an endpoint receive a DATA chunk with an invalid stream + * identifier, it shall acknowledge the reception of the DATA chunk + * following the normal procedure, immediately send an ERROR chunk + * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) + * and discard the DATA chunk. + */ + if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { + err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, + &data_hdr->stream, + sizeof(data_hdr->stream)); + if (err) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + } + } + /* Go a head and force a SACK, since we are shutting down. */ +gen_shutdown: /* Implementor's Guide. * * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately @@ -2489,8 +2707,6 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); } - -consume: return SCTP_DISPOSITION_CONSUME; } @@ -2616,7 +2832,7 @@ sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); return SCTP_DISPOSITION_CONSUME; } @@ -2713,8 +2929,8 @@ sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpShutdowns); + SCTP_DEC_STATS(SctpCurrEstab); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); /* ...and remove all record of the association. */ @@ -2755,7 +2971,7 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, __u8 *ch_end; int ootb_shut_ack = 0; - SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); + SCTP_INC_STATS(SctpOutOfBlues); ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; do { @@ -2824,7 +3040,7 @@ sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); return SCTP_DISPOSITION_CONSUME; } @@ -2960,8 +3176,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_ASCONF_ACK)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_ABORT; } @@ -2986,8 +3202,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_ASCONF_ACK)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_ABORT; } @@ -3581,8 +3797,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_USER_ABORT)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return retval; } @@ -3639,7 +3855,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); + SCTP_INC_STATS(SctpShutdowns); sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); @@ -3712,7 +3928,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, SCTP_STATE(SCTP_STATE_CLOSED)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + SCTP_INC_STATS(SctpAborteds); /* Even if we can't send the ABORT due to low memory delete the * TCB. This is a departure from our typical NOMEM handling. @@ -4072,8 +4288,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -4242,8 +4458,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_DEC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_DELETE_TCB; } @@ -4316,8 +4532,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire( SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(SCTP_ERROR_NO_ERROR)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_INC_STATS(SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(SctpAborteds); + SCTP_INC_STATS(SctpCurrEstab); return SCTP_DISPOSITION_ABORT; } @@ -4493,7 +4709,7 @@ struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) num_blocks = ntohs(sack->num_gap_ack_blocks); num_dup_tsns = ntohs(sack->num_dup_tsns); len = sizeof(struct sctp_sackhdr); - len += (num_blocks + num_dup_tsns) * sizeof(__u32); + len = (num_blocks + num_dup_tsns) * sizeof(__u32); if (len > chunk->skb->len) return NULL; @@ -4627,176 +4843,8 @@ void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep, sctp_packet_append_chunk(packet, err_chunk); sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(packet)); - SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); + SCTP_INC_STATS(SctpOutCtrlChunks); } else sctp_chunk_free (err_chunk); } } - - -/* Process a data chunk */ -int sctp_eat_data(const struct sctp_association *asoc, - struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands) -{ - sctp_datahdr_t *data_hdr; - struct sctp_chunk *err; - size_t datalen; - sctp_verb_t deliver; - int tmp; - __u32 tsn; - - data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; - skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); - - tsn = ntohl(data_hdr->tsn); - SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); - - /* ASSERT: Now skb->data is really the user data. */ - - /* Process ECN based congestion. - * - * Since the chunk structure is reused for all chunks within - * a packet, we use ecn_ce_done to track if we've already - * done CE processing for this packet. - * - * We need to do ECN processing even if we plan to discard the - * chunk later. - */ - - if (!chunk->ecn_ce_done) { - struct sctp_af *af; - chunk->ecn_ce_done = 1; - - af = sctp_get_af_specific( - ipver2af(chunk->skb->nh.iph->version)); - - if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { - /* Do real work as sideffect. */ - sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, - SCTP_U32(tsn)); - } - } - - tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); - if (tmp < 0) { - /* The TSN is too high--silently discard the chunk and - * count on it getting retransmitted later. - */ - return SCTP_IERROR_HIGH_TSN; - } else if (tmp > 0) { - /* This is a duplicate. Record it. */ - sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); - return SCTP_IERROR_DUP_TSN; - } - - /* This is a new TSN. */ - - /* Discard if there is no room in the receive window. - * Actually, allow a little bit of overflow (up to a MTU). - */ - datalen = ntohs(chunk->chunk_hdr->length); - datalen -= sizeof(sctp_data_chunk_t); - - deliver = SCTP_CMD_CHUNK_ULP; - - /* Think about partial delivery. */ - if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { - - /* Even if we don't accept this chunk there is - * memory pressure. - */ - sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); - } - - /* Spill over rwnd a little bit. Note: While allowed, this spill over - * seems a bit troublesome in that frag_point varies based on - * PMTU. In cases, such as loopback, this might be a rather - * large spill over. - */ - if (!asoc->rwnd || asoc->rwnd_over || - (datalen > asoc->rwnd + asoc->frag_point)) { - - /* If this is the next TSN, consider reneging to make - * room. Note: Playing nice with a confused sender. A - * malicious sender can still eat up all our buffer - * space and in the future we may want to detect and - * do more drastic reneging. - */ - if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && - (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { - SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); - deliver = SCTP_CMD_RENEGE; - } else { - SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, " - "rwnd: %d\n", tsn, datalen, - asoc->rwnd); - return SCTP_IERROR_IGNORE_TSN; - } - } - - /* - * Section 3.3.10.9 No User Data (9) - * - * Cause of error - * --------------- - * No User Data: This error cause is returned to the originator of a - * DATA chunk if a received DATA chunk has no user data. - */ - if (unlikely(0 == datalen)) { - err = sctp_make_abort_no_data(asoc, chunk, tsn); - if (err) { - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, - SCTP_CHUNK(err)); - } - /* We are going to ABORT, so we might as well stop - * processing the rest of the chunks in the packet. - */ - sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); - sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, - SCTP_U32(SCTP_ERROR_NO_DATA)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); - return SCTP_IERROR_NO_DATA; - } - - /* If definately accepting the DATA chunk, record its TSN, otherwise - * wait for renege processing. - */ - if (SCTP_CMD_CHUNK_ULP == deliver) - sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); - - /* Note: Some chunks may get overcounted (if we drop) or overcounted - * if we renege and the chunk arrives again. - */ - if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) - SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); - else - SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); - - /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number - * - * If an endpoint receive a DATA chunk with an invalid stream - * identifier, it shall acknowledge the reception of the DATA chunk - * following the normal procedure, immediately send an ERROR chunk - * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) - * and discard the DATA chunk. - */ - if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { - err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, - &data_hdr->stream, - sizeof(data_hdr->stream)); - if (err) - sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, - SCTP_CHUNK(err)); - return SCTP_IERROR_BAD_STREAM; - } - - /* Send the data up to the user. Note: Schedule the - * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK - * chunk needs the updated rwnd. - */ - sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); - - return SCTP_IERROR_NO_ERROR; -}