/* 1st Level Abstractions. */
-/* Allocate and initialize a new association */
-struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
- const struct sock *sk,
- sctp_scope_t scope, int gfp)
-{
- struct sctp_association *asoc;
-
- asoc = t_new(struct sctp_association, gfp);
- if (!asoc)
- goto fail;
-
- if (!sctp_association_init(asoc, ep, sk, scope, gfp))
- goto fail_init;
-
- asoc->base.malloced = 1;
- SCTP_DBG_OBJCNT_INC(assoc);
-
- return asoc;
-
-fail_init:
- kfree(asoc);
-fail:
- return NULL;
-}
-
/* Initialize a new association from provided memory. */
-struct sctp_association *sctp_association_init(struct sctp_association *asoc,
+static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
const struct sctp_endpoint *ep,
const struct sock *sk,
sctp_scope_t scope,
int gfp)
{
- struct sctp_opt *sp;
+ struct sctp_sock *sp;
int i;
/* Retrieve the SCTP per socket area. */
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
- asoc->base.addr_lock = RW_LOCK_UNLOCKED;
+ rwlock_init(&asoc->base.addr_lock);
asoc->state = SCTP_STATE_CLOSED;
asoc->c.peer_vtag = 0;
asoc->c.my_ttag = 0;
asoc->c.peer_ttag = 0;
+ asoc->c.my_port = ep->base.bind_addr.port;
asoc->c.initial_tsn = sctp_generate_tsn(ep);
asoc->need_ecne = 0;
- asoc->assoc_id = (sctp_assoc_t)-1L;
+ asoc->assoc_id = 0;
/* Assume that peer would support both address types unless we are
* told otherwise.
return NULL;
}
+/* Allocate and initialize a new association */
+struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
+ const struct sock *sk,
+ sctp_scope_t scope, int gfp)
+{
+ struct sctp_association *asoc;
+
+ asoc = t_new(struct sctp_association, gfp);
+ if (!asoc)
+ goto fail;
+
+ if (!sctp_association_init(asoc, ep, sk, scope, gfp))
+ goto fail_init;
+
+ asoc->base.malloced = 1;
+ SCTP_DBG_OBJCNT_INC(assoc);
+
+ return asoc;
+
+fail_init:
+ kfree(asoc);
+fail:
+ return NULL;
+}
+
/* Free this association if possible. There may still be users, so
* the actual deallocation may be delayed.
*/
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
- if ((long)asoc->assoc_id != -1L) {
+ if (asoc->assoc_id != 0) {
spin_lock_bh(&sctp_assocs_id_lock);
- idr_remove(&sctp_assocs_id, (long)asoc->assoc_id);
+ idr_remove(&sctp_assocs_id, asoc->assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
}
int gfp)
{
struct sctp_transport *peer;
- struct sctp_opt *sp;
+ struct sctp_sock *sp;
unsigned short port;
sp = sctp_sk(asoc->base.sk);
/* 7.2.1 Slow-Start
*
- * o The initial cwnd before data transmission or after a
- * sufficiently long idle period MUST be <= 2*MTU.
+ * o The initial cwnd before DATA transmission or after a sufficiently
+ * long idle period MUST be set to
+ * min(4*MTU, max(2*MTU, 4380 bytes))
*
* o The initial value of ssthresh MAY be arbitrarily high
* (for example, implementations MAY use the size of the
* receiver advertised window).
*/
- peer->cwnd = asoc->pmtu * 2;
+ peer->cwnd = min(4*asoc->pmtu, max_t(__u32, 2*asoc->pmtu, 4380));
/* At this point, we may not have the receiver's advertised window,
* so initialize ssthresh to the default value and it will be set
peer->partial_bytes_acked = 0;
peer->flight_size = 0;
- peer->error_threshold = peer->max_retrans;
/* By default, enable heartbeat for peer address. */
peer->hb_allowed = 1;
peer->hb_interval = msecs_to_jiffies(sp->paddrparam.spp_hbinterval);
/* Set the path max_retrans. */
- peer->max_retrans = asoc->max_retrans;
+ peer->max_retrans = sp->paddrparam.spp_pathmaxrxt;
/* Set the transport's RTO.initial value */
peer->rto = asoc->rto_initial;
return retval;
}
-/* Allocate 'num' TSNs by incrementing the association's TSN by num. */
-__u32 sctp_association_get_tsn_block(struct sctp_association *asoc, int num)
-{
- __u32 retval = asoc->next_tsn;
-
- asoc->next_tsn += num;
- asoc->unack_data += num;
-
- return retval;
-}
-
-
/* Compare two addresses to see if they match. Wildcard addresses
* only match themselves.
*/
return chunk;
}
-/* Use this function for the packet prepend callback when no ECNE
- * packet is desired (e.g. some packets don't like to be bundled).
- */
-struct sctp_chunk *sctp_get_no_prepend(struct sctp_association *asoc)
-{
- return NULL;
-}
-
/*
* Find which transport this TSN was sent on.
*/
struct sctp_chunk *chunk;
struct sock *sk;
struct sctp_inq *inqueue;
- int state, subtype;
+ int state;
+ sctp_subtype_t subtype;
int error = 0;
/* The association should be held so we should be safe. */
sctp_association_hold(asoc);
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
state = asoc->state;
- subtype = chunk->chunk_hdr->type;
+ subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
/* Remember where the last DATA chunk came from so we
* know where to send the SACK.
chunk->transport->last_time_heard = jiffies;
/* Run through the state machine. */
- error = sctp_do_sm(SCTP_EVENT_T_CHUNK, SCTP_ST_CHUNK(subtype),
+ error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
state, ep, asoc, chunk, GFP_ATOMIC);
/* Check to see if the association is freed in response to
/* This routine moves an association from its old sk to a new sk. */
void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
{
- struct sctp_opt *newsp = sctp_sk(newsk);
+ struct sctp_sock *newsp = sctp_sk(newsk);
struct sock *oldsk = assoc->base.sk;
/* Delete the association from the old endpoint's list of
}
if (pmtu) {
- struct sctp_opt *sp = sctp_sk(asoc->base.sk);
+ struct sctp_sock *sp = sctp_sk(asoc->base.sk);
asoc->pmtu = pmtu;
asoc->frag_point = sctp_frag_point(sp, pmtu);
}