* be incorporated into the next SCTP release.
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
/* Use GFP_ATOMIC since BHs are disabled. */
addr->v4.sin_port = ntohs(addr->v4.sin_port);
- ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
+ ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC);
addr->v4.sin_port = htons(addr->v4.sin_port);
sctp_write_unlock(&ep->base.addr_lock);
sctp_local_bh_enable();
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
- union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
}
retval = sctp_send_asconf(asoc, chunk);
- if (retval)
- goto out;
- /* Add the new addresses to the bind address list with
- * use_as_src set to 0.
+ /* FIXME: After sending the add address ASCONF chunk, we
+ * cannot append the address to the association's binding
+ * address list, because the new address may be used as the
+ * source of a message sent to the peer before the ASCONF
+ * chunk is received by the peer. So we should wait until
+ * ASCONF_ACK is received.
*/
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
- addr_buf = addrs;
- for (i = 0; i < addrcnt; i++) {
- addr = (union sctp_addr *)addr_buf;
- af = sctp_get_af_specific(addr->v4.sin_family);
- memcpy(&saveaddr, addr, af->sockaddr_len);
- saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
- retval = sctp_add_bind_addr(bp, &saveaddr, 0,
- GFP_ATOMIC);
- addr_buf += af->sockaddr_len;
- }
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
}
out:
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
- struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
- union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
- struct list_head *pos, *pos1;
- struct sctp_sockaddr_entry *saddr;
+ struct list_head *pos;
int i;
int retval = 0;
goto out;
}
- /* Reset use_as_src flag for the addresses in the bind address
- * list that are to be deleted.
- */
- sctp_local_bh_disable();
- sctp_write_lock(&asoc->base.addr_lock);
- addr_buf = addrs;
- for (i = 0; i < addrcnt; i++) {
- laddr = (union sctp_addr *)addr_buf;
- af = sctp_get_af_specific(laddr->v4.sin_family);
- memcpy(&saveaddr, laddr, af->sockaddr_len);
- saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
- list_for_each(pos1, &bp->address_list) {
- saddr = list_entry(pos1,
- struct sctp_sockaddr_entry,
- list);
- if (sctp_cmp_addr_exact(&saddr->a, &saveaddr))
- saddr->use_as_src = 0;
- }
- addr_buf += af->sockaddr_len;
- }
- sctp_write_unlock(&asoc->base.addr_lock);
- sctp_local_bh_enable();
+ retval = sctp_send_asconf(asoc, chunk);
- /* Update the route and saddr entries for all the transports
- * as some of the addresses in the bind address list are
- * about to be deleted and cannot be used as source addresses.
+ /* FIXME: After sending the delete address ASCONF chunk, we
+ * cannot remove the addresses from the association's bind
+ * address list, because there maybe some packet send to
+ * the delete addresses, so we should wait until ASCONF_ACK
+ * packet is received.
*/
- list_for_each(pos1, &asoc->peer.transport_addr_list) {
- transport = list_entry(pos1, struct sctp_transport,
- transports);
- dst_release(transport->dst);
- sctp_transport_route(transport, NULL,
- sctp_sk(asoc->base.sk));
- }
-
- retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
- if (pp && hlist_empty(&pp->owner)) {
+ if (hlist_empty(&pp->owner)) {
if (pp->next)
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;