#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-extern kmem_cache_t *sctp_chunk_cachep;
+extern struct kmem_cache *sctp_chunk_cachep;
SCTP_STATIC
struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
- int gfp);
+ gfp_t gfp);
/* What was the inbound interface for this chunk? */
int sctp_chunk_iif(const struct sctp_chunk *chunk)
* provided chunk, as most cause codes will be embedded inside an
* abort chunk.
*/
-void sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code,
+void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
const void *payload, size_t paylen)
{
sctp_errhdr_t err;
padlen = len % 4;
err.length = htons(len);
len += padlen;
- sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
- chunk->subh.err_hdr = sctp_addto_chunk(chunk, paylen, payload);
+ chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err);
+ sctp_addto_chunk(chunk, paylen, payload);
}
/* 3.3.2 Initiation (INIT) (1)
*/
struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
const struct sctp_bind_addr *bp,
- int gfp, int vparam_len)
+ gfp_t gfp, int vparam_len)
{
sctp_inithdr_t init;
union sctp_params addrs;
int num_types, addrs_len = 0;
struct sctp_sock *sp;
sctp_supported_addrs_param_t sat;
- __u16 types[2];
- sctp_adaption_ind_param_t aiparam;
+ __be16 types[2];
+ sctp_adaptation_ind_param_t aiparam;
/* RFC 2960 3.3.2 Initiation (INIT) (1)
*
sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param);
if (sctp_prsctp_enable)
sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
- aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND;
+ aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
aiparam.param_hdr.length = htons(sizeof(aiparam));
- aiparam.adaption_ind = htonl(sp->adaption_ind);
+ aiparam.adaptation_ind = htonl(sp->adaptation_ind);
sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
nodata:
- if (addrs.v)
- kfree(addrs.v);
+ kfree(addrs.v);
return retval;
}
struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
- int gfp, int unkparam_len)
+ gfp_t gfp, int unkparam_len)
{
sctp_inithdr_t initack;
struct sctp_chunk *retval;
sctp_cookie_param_t *cookie;
int cookie_len;
size_t chunksize;
- sctp_adaption_ind_param_t aiparam;
+ sctp_adaptation_ind_param_t aiparam;
retval = NULL;
if (asoc->peer.prsctp_capable)
sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param);
- aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND;
+ aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND;
aiparam.param_hdr.length = htons(sizeof(aiparam));
- aiparam.adaption_ind = htonl(sctp_sk(asoc->base.sk)->adaption_ind);
+ aiparam.adaptation_ind = htonl(sctp_sk(asoc->base.sk)->adaptation_ind);
sctp_addto_chunk(retval, sizeof(aiparam), &aiparam);
/* We need to remove the const qualifier at this point. */
nomem_chunk:
kfree(cookie);
nomem_cookie:
- if (addrs.v)
- kfree(addrs.v);
+ kfree(addrs.v);
return retval;
}
dp.ppid = sinfo->sinfo_ppid;
/* Set the flags for an unordered send. */
- if (sinfo->sinfo_flags & MSG_UNORDERED) {
+ if (sinfo->sinfo_flags & SCTP_UNORDERED) {
flags |= SCTP_DATA_UNORDERED;
dp.ssn = 0;
} else
const struct sctp_chunk *chunk, __u32 tsn)
{
struct sctp_chunk *retval;
- __u32 payload;
+ __be32 payload;
retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t)
+ sizeof(tsn));
/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */
struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc,
- const struct sctp_chunk *chunk,
- const struct msghdr *msg)
+ const struct msghdr *msg,
+ size_t paylen)
{
struct sctp_chunk *retval;
- void *payload = NULL, *payoff;
- size_t paylen = 0;
- struct iovec *iov = NULL;
- int iovlen = 0;
-
- if (msg) {
- iov = msg->msg_iov;
- iovlen = msg->msg_iovlen;
- paylen = get_user_iov_size(iov, iovlen);
- }
+ void *payload = NULL;
+ int err;
- retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen);
+ retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen);
if (!retval)
goto err_chunk;
if (paylen) {
/* Put the msg_iov together into payload. */
- payload = kmalloc(paylen, GFP_ATOMIC);
+ payload = kmalloc(paylen, GFP_KERNEL);
if (!payload)
goto err_payload;
- payoff = payload;
- for (; iovlen > 0; --iovlen) {
- if (copy_from_user(payoff, iov->iov_base,iov->iov_len))
- goto err_copy;
- payoff += iov->iov_len;
- iov++;
- }
+ err = memcpy_fromiovec(payload, msg->msg_iov, paylen);
+ if (err < 0)
+ goto err_copy;
}
sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen);
/* Create an Operation Error chunk. */
struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
- __u16 cause_code, const void *payload,
+ __be16 cause_code, const void *payload,
size_t paylen)
{
struct sctp_chunk *retval;
{
struct sctp_chunk *retval;
- retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC);
+ retval = kmem_cache_alloc(sctp_chunk_cachep, GFP_ATOMIC);
if (!retval)
goto nodata;
SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb);
}
+ INIT_LIST_HEAD(&retval->list);
retval->skb = skb;
retval->asoc = (struct sctp_association *)asoc;
retval->resent = 0;
/* Possibly, free the chunk. */
void sctp_chunk_free(struct sctp_chunk *chunk)
{
- /* Make sure that we are not on any list. */
- skb_unlink((struct sk_buff *) chunk);
+ BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
/* Release our reference on the message tracker. */
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
ssn = 0;
} else {
- sid = htons(chunk->subh.data_hdr->stream);
+ sid = ntohs(chunk->subh.data_hdr->stream);
if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid);
else
ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid);
- ssn = htons(ssn);
}
- chunk->subh.data_hdr->ssn = ssn;
+ chunk->subh.data_hdr->ssn = htons(ssn);
chunk->has_ssn = 1;
}
/* Create a CLOSED association to use with an incoming packet. */
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep,
- struct sctp_chunk *chunk, int gfp)
+ struct sctp_chunk *chunk,
+ gfp_t gfp)
{
struct sctp_association *asoc;
struct sk_buff *skb;
unsigned int keylen;
char *key;
- headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE;
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(sctp_paramhdr_t) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
bodysize = sizeof(struct sctp_cookie)
+ ntohs(init_chunk->chunk_hdr->length) + addrs_len;
- (bodysize % SCTP_COOKIE_MULTIPLE);
*cookie_len = headersize + bodysize;
- retval = (sctp_cookie_param_t *)kmalloc(*cookie_len, GFP_ATOMIC);
-
- if (!retval) {
- *cookie_len = 0;
- goto nodata;
- }
-
/* Clear this memory since we are sending this data structure
* out on the network.
*/
- memset(retval, 0x00, *cookie_len);
+ retval = kzalloc(*cookie_len, GFP_ATOMIC);
+ if (!retval)
+ goto nodata;
+
cookie = (struct sctp_signed_cookie *) retval->body;
/* Set up the parameter header. */
/* Remember PR-SCTP capability. */
cookie->c.prsctp_capable = asoc->peer.prsctp_capable;
- /* Save adaption indication in the cookie. */
- cookie->c.adaption_ind = asoc->peer.adaption_ind;
+ /* Save adaptation indication in the cookie. */
+ cookie->c.adaptation_ind = asoc->peer.adaptation_ind;
/* Set an expiration time for the cookie. */
do_gettimeofday(&cookie->c.expiration);
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
if (sctp_sk(ep->base.sk)->hmac) {
+ struct hash_desc desc;
+
/* Sign the message. */
sg.page = virt_to_page(&cookie->c);
sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE;
sg.length = bodysize;
keylen = SCTP_SECRET_SIZE;
key = (char *)ep->secret_key[ep->current_key];
+ desc.tfm = sctp_sk(ep->base.sk)->hmac;
+ desc.flags = 0;
- sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen,
- &sg, 1, cookie->signature);
+ if (crypto_hash_setkey(desc.tfm, key, keylen) ||
+ crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
+ goto free_cookie;
}
-nodata:
return retval;
+
+free_cookie:
+ kfree(retval);
+nodata:
+ *cookie_len = 0;
+ return NULL;
}
/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
struct sctp_association *sctp_unpack_cookie(
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
- struct sctp_chunk *chunk, int gfp,
+ struct sctp_chunk *chunk, gfp_t gfp,
int *error, struct sctp_chunk **errp)
{
struct sctp_association *retval = NULL;
struct sctp_signed_cookie *cookie;
struct sctp_cookie *bear_cookie;
int headersize, bodysize, fixed_size;
- __u8 digest[SCTP_SIGNATURE_SIZE];
+ __u8 *digest = ep->digest;
struct scatterlist sg;
unsigned int keylen, len;
char *key;
sctp_scope_t scope;
struct sk_buff *skb = chunk->skb;
+ struct timeval tv;
+ struct hash_desc desc;
- headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE;
+ /* Header size is static data prior to the actual cookie, including
+ * any padding.
+ */
+ headersize = sizeof(sctp_chunkhdr_t) +
+ (sizeof(struct sctp_signed_cookie) -
+ sizeof(struct sctp_cookie));
bodysize = ntohs(chunk->chunk_hdr->length) - headersize;
fixed_size = headersize + sizeof(struct sctp_cookie);
sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE;
sg.length = bodysize;
key = (char *)ep->secret_key[ep->current_key];
+ desc.tfm = sctp_sk(ep->base.sk)->hmac;
+ desc.flags = 0;
- memset(digest, 0x00, sizeof(digest));
- sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg,
- 1, digest);
+ memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
+ if (crypto_hash_setkey(desc.tfm, key, keylen) ||
+ crypto_hash_digest(&desc, &sg, bodysize, digest)) {
+ *error = -SCTP_IERROR_NOMEM;
+ goto fail;
+ }
if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
/* Try the previous key. */
key = (char *)ep->secret_key[ep->last_key];
- memset(digest, 0x00, sizeof(digest));
- sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen,
- &sg, 1, digest);
+ memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
+ if (crypto_hash_setkey(desc.tfm, key, keylen) ||
+ crypto_hash_digest(&desc, &sg, bodysize, digest)) {
+ *error = -SCTP_IERROR_NOMEM;
+ goto fail;
+ }
if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
/* Yikes! Still bad signature! */
goto fail;
}
- if (ntohs(chunk->sctp_hdr->source) != bear_cookie->peer_addr.v4.sin_port ||
+ if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port ||
ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) {
*error = -SCTP_IERROR_BAD_PORTS;
goto fail;
/* Check to see if the cookie is stale. If there is already
* an association, there is no need to check cookie's expiration
* for init collision case of lost COOKIE ACK.
+ * If skb has been timestamped, then use the stamp, otherwise
+ * use current time. This introduces a small possibility that
+ * that a cookie may be considered expired, but his would only slow
+ * down the new association establishment instead of every packet.
*/
- if (!asoc && tv_lt(bear_cookie->expiration, skb->stamp)) {
+ if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
+ skb_get_timestamp(skb, &tv);
+ else
+ do_gettimeofday(&tv);
+
+ if (!asoc && tv_lt(bear_cookie->expiration, tv)) {
__u16 len;
/*
* Section 3.3.10.3 Stale Cookie Error (3)
len = ntohs(chunk->chunk_hdr->length);
*errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) {
- suseconds_t usecs = (skb->stamp.tv_sec -
+ suseconds_t usecs = (tv.tv_sec -
bear_cookie->expiration.tv_sec) * 1000000L +
- skb->stamp.tv_usec -
- bear_cookie->expiration.tv_usec;
+ tv.tv_usec - bear_cookie->expiration.tv_usec;
+ __be32 n = htonl(usecs);
- usecs = htonl(usecs);
sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE,
- &usecs, sizeof(usecs));
+ &n, sizeof(n));
*error = -SCTP_IERROR_STALE_COOKIE;
} else
*error = -SCTP_IERROR_NOMEM;
/* Also, add the destination address. */
if (list_empty(&retval->base.bind_addr.address_list)) {
- sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest,
+ sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, 1,
GFP_ATOMIC);
}
retval->addip_serial = retval->c.initial_tsn;
retval->adv_peer_ack_point = retval->ctsn_ack_point;
retval->peer.prsctp_capable = retval->c.prsctp_capable;
- retval->peer.adaption_ind = retval->c.adaption_ind;
+ retval->peer.adaptation_ind = retval->c.adaptation_ind;
/* The INIT stuff will be done by the side effects. */
return retval;
********************************************************************/
struct __sctp_missing {
- __u32 num_missing;
- __u16 type;
+ __be32 num_missing;
+ __be16 type;
} __attribute__((packed));
/*
if (*errp) {
report.num_missing = htonl(1);
report.type = paramtype;
- sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM,
+ sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM,
&report, sizeof(report));
}
case SCTP_PARAM_HEARTBEAT_INFO:
case SCTP_PARAM_UNRECOGNIZED_PARAMETERS:
case SCTP_PARAM_ECN_CAPABLE:
- case SCTP_PARAM_ADAPTION_LAYER_IND:
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
break;
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Verify stream values are non-zero. */
if ((0 == peer_init->init_hdr.num_outbound_streams) ||
- (0 == peer_init->init_hdr.num_inbound_streams)) {
+ (0 == peer_init->init_hdr.num_inbound_streams) ||
+ (0 == peer_init->init_hdr.init_tag) ||
+ (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
sctp_process_inv_mandatory(asoc, chunk, errp);
return 0;
*/
int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
const union sctp_addr *peer_addr,
- sctp_init_chunk_t *peer_init, int gfp)
+ sctp_init_chunk_t *peer_init, gfp_t gfp)
{
union sctp_params param;
struct sctp_transport *transport;
* added as the primary transport. The source address seems to
* be a a better choice than any of the embedded addresses.
*/
- if (peer_addr)
- if(!sctp_assoc_add_peer(asoc, peer_addr, gfp))
+ if (peer_addr) {
+ if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
+ }
/* Process the initialization parameters. */
goto clean_up;
}
+ /* Walk list of transports, removing transports in the UNKNOWN state. */
+ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos, struct sctp_transport, transports);
+ if (transport->state == SCTP_UNKNOWN) {
+ sctp_assoc_rm_peer(asoc, transport);
+ }
+ }
+
/* The fixed INIT headers are always in network byte
* order.
*/
/* Copy cookie in case we need to resend COOKIE-ECHO. */
cookie = asoc->peer.cookie;
if (cookie) {
- asoc->peer.cookie = kmalloc(asoc->peer.cookie_len, gfp);
+ asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
if (!asoc->peer.cookie)
goto clean_up;
- memcpy(asoc->peer.cookie, cookie, asoc->peer.cookie_len);
}
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
* stream sequence number shall be set to 0.
*/
- /* Allocate storage for the negotiated streams if it is not a temporary * association.
+ /* Allocate storage for the negotiated streams if it is not a temporary
+ * association.
*/
if (!asoc->temp) {
int assoc_id;
list_del_init(pos);
sctp_transport_free(transport);
}
+
+ asoc->peer.transport_count = 0;
+
nomem:
return 0;
}
static int sctp_process_param(struct sctp_association *asoc,
union sctp_params param,
const union sctp_addr *peer_addr,
- int gfp)
+ gfp_t gfp)
{
union sctp_addr addr;
int i;
/* Fall through. */
case SCTP_PARAM_IPV4_ADDRESS:
af = sctp_get_af_specific(param_type2af(param.p->type));
- af->from_addr_param(&addr, param.addr, asoc->peer.port, 0);
+ af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
scope = sctp_scope(peer_addr);
if (sctp_in_scope(&addr, scope))
- if (!sctp_assoc_add_peer(asoc, &addr, gfp))
+ if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
return 0;
break;
asoc->peer.ecn_capable = 1;
break;
- case SCTP_PARAM_ADAPTION_LAYER_IND:
- asoc->peer.adaption_ind = param.aind->adaption_ind;
+ case SCTP_PARAM_ADAPTATION_LAYER_IND:
+ asoc->peer.adaptation_ind = param.aind->adaptation_ind;
break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
union sctp_addr *laddr,
struct sockaddr *addrs,
int addrcnt,
- __u16 flags)
+ __be16 flags)
{
sctp_addip_param_t param;
struct sctp_chunk *retval;
}
/* Add response parameters to an ASCONF_ACK chunk. */
-static void sctp_add_asconf_response(struct sctp_chunk *chunk, __u32 crr_id,
- __u16 err_code, sctp_addip_param_t *asconf_param)
+static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id,
+ __be16 err_code, sctp_addip_param_t *asconf_param)
{
sctp_addip_param_t ack_param;
sctp_errhdr_t err_param;
int asconf_param_len = 0;
int err_param_len = 0;
- __u16 response_type;
+ __be16 response_type;
if (SCTP_ERROR_NO_ERROR == err_code) {
response_type = SCTP_PARAM_SUCCESS_REPORT;
}
/* Process a asconf parameter. */
-static __u16 sctp_process_asconf_param(struct sctp_association *asoc,
+static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
struct sctp_chunk *asconf,
sctp_addip_param_t *asconf_param)
{
union sctp_addr addr;
struct list_head *pos;
union sctp_addr_param *addr_param;
-
+
addr_param = (union sctp_addr_param *)
((void *)asconf_param + sizeof(sctp_addip_param_t));
if (unlikely(!af))
return SCTP_ERROR_INV_PARAM;
- af->from_addr_param(&addr, addr_param, asoc->peer.port, 0);
+ af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
/* ADDIP 4.3 D9) If an endpoint receives an ADD IP address
* Due to Resource Shortage'.
*/
- peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC);
+ peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
if (!peer)
return SCTP_ERROR_RSRC_LOW;
sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
- __u16 err_code;
+ __be16 err_code;
int length = 0;
int chunk_len = asconf->skb->len;
__u32 serial;
union sctp_addr_param *addr_param;
struct list_head *pos;
struct sctp_transport *transport;
+ struct sctp_sockaddr_entry *saddr;
int retval = 0;
addr_param = (union sctp_addr_param *)
/* We have checked the packet before, so we do not check again. */
af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type));
- af->from_addr_param(&addr, addr_param, bp->port, 0);
+ af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
switch (asconf_param->param_hdr.type) {
case SCTP_PARAM_ADD_IP:
sctp_local_bh_disable();
sctp_write_lock(&asoc->base.addr_lock);
- retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC);
+ list_for_each(pos, &bp->address_list) {
+ saddr = list_entry(pos, struct sctp_sockaddr_entry, list);
+ if (sctp_cmp_addr_exact(&saddr->a, &addr))
+ saddr->use_as_src = 1;
+ }
sctp_write_unlock(&asoc->base.addr_lock);
sctp_local_bh_enable();
break;
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
+ dst_release(transport->dst);
sctp_transport_route(transport, NULL,
sctp_sk(asoc->base.sk));
}
* All TLVs after the failed response are considered unsuccessful unless a
* specific success indication is present for the parameter.
*/
-static __u16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
+static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
sctp_addip_param_t *asconf_param,
int no_err)
{
sctp_errhdr_t *err_param;
int length;
int asconf_ack_len = asconf_ack->skb->len;
- __u16 err_code;
+ __be16 err_code;
if (no_err)
err_code = SCTP_ERROR_NO_ERROR;
int all_param_pass = 0;
int no_err = 1;
int retval = 0;
- __u16 err_code = SCTP_ERROR_NO_ERROR;
+ __be16 err_code = SCTP_ERROR_NO_ERROR;
/* Skip the chunkhdr and addiphdr from the last asconf sent and store
* a pointer to address parameter.
asoc->addip_last_asconf = NULL;
/* Send the next asconf chunk from the addip chunk queue. */
- asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks);
- if (asconf) {
+ if (!list_empty(&asoc->addip_chunk_list)) {
+ struct list_head *entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(asconf);
if (sctp_primitive_ASCONF(asoc, asconf))