* be incorporated into the next SCTP release.
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
struct sctp_association *, sctp_socket_type_t);
static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
-extern kmem_cache_t *sctp_bucket_cachep;
+extern struct kmem_cache *sctp_bucket_cachep;
/* Get the sndbuf space available at the time on the association. */
static inline int sctp_wspace(struct sctp_association *asoc)
struct sctp_transport *transport;
union sctp_addr *laddr = (union sctp_addr *)addr;
- laddr->v4.sin_port = ntohs(laddr->v4.sin_port);
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
- (union sctp_addr *)addr,
+ laddr,
&transport);
- laddr->v4.sin_port = htons(laddr->v4.sin_port);
if (!addr_asoc)
return NULL;
sctp_write_lock(&ep->base.addr_lock);
/* Use GFP_ATOMIC since BHs are disabled. */
- addr->v4.sin_port = ntohs(addr->v4.sin_port);
- ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC);
- addr->v4.sin_port = htons(addr->v4.sin_port);
+ ret = sctp_add_bind_addr(bp, addr, 1, GFP_ATOMIC);
sctp_write_unlock(&ep->base.addr_lock);
sctp_local_bh_enable();
struct sctp_chunk *chunk;
struct sctp_sockaddr_entry *laddr;
union sctp_addr *addr;
+ union sctp_addr saveaddr;
void *addr_buf;
struct sctp_af *af;
struct list_head *pos;
}
retval = sctp_send_asconf(asoc, chunk);
+ if (retval)
+ goto out;
- /* FIXME: After sending the add address ASCONF chunk, we
- * cannot append the address to the association's binding
- * address list, because the new address may be used as the
- * source of a message sent to the peer before the ASCONF
- * chunk is received by the peer. So we should wait until
- * ASCONF_ACK is received.
+ /* Add the new addresses to the bind address list with
+ * use_as_src set to 0.
*/
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ addr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(addr->v4.sin_family);
+ memcpy(&saveaddr, addr, af->sockaddr_len);
+ retval = sctp_add_bind_addr(bp, &saveaddr, 0,
+ GFP_ATOMIC);
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
}
out:
int cnt;
struct sctp_bind_addr *bp = &ep->base.bind_addr;
int retval = 0;
- union sctp_addr saveaddr;
void *addr_buf;
- struct sockaddr *sa_addr;
+ union sctp_addr *sa_addr;
struct sctp_af *af;
SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n",
goto err_bindx_rem;
}
- /* The list may contain either IPv4 or IPv6 address;
- * determine the address length to copy the address to
- * saveaddr.
- */
- sa_addr = (struct sockaddr *)addr_buf;
- af = sctp_get_af_specific(sa_addr->sa_family);
+ sa_addr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(sa_addr->sa.sa_family);
if (!af) {
retval = -EINVAL;
goto err_bindx_rem;
}
- memcpy(&saveaddr, sa_addr, af->sockaddr_len);
- saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port);
- if (saveaddr.v4.sin_port != bp->port) {
+ if (sa_addr->v4.sin_port != htons(bp->port)) {
retval = -EINVAL;
goto err_bindx_rem;
}
sctp_local_bh_disable();
sctp_write_lock(&ep->base.addr_lock);
- retval = sctp_del_bind_addr(bp, &saveaddr);
+ retval = sctp_del_bind_addr(bp, sa_addr);
sctp_write_unlock(&ep->base.addr_lock);
sctp_local_bh_enable();
struct sctp_sock *sp;
struct sctp_endpoint *ep;
struct sctp_association *asoc;
+ struct sctp_transport *transport;
struct sctp_bind_addr *bp;
struct sctp_chunk *chunk;
union sctp_addr *laddr;
void *addr_buf;
struct sctp_af *af;
- struct list_head *pos;
+ struct list_head *pos, *pos1;
+ struct sctp_sockaddr_entry *saddr;
int i;
int retval = 0;
goto out;
}
- retval = sctp_send_asconf(asoc, chunk);
+ /* Reset use_as_src flag for the addresses in the bind address
+ * list that are to be deleted.
+ */
+ sctp_local_bh_disable();
+ sctp_write_lock(&asoc->base.addr_lock);
+ addr_buf = addrs;
+ for (i = 0; i < addrcnt; i++) {
+ laddr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(laddr->v4.sin_family);
+ list_for_each(pos1, &bp->address_list) {
+ saddr = list_entry(pos1,
+ struct sctp_sockaddr_entry,
+ list);
+ if (sctp_cmp_addr_exact(&saddr->a, laddr))
+ saddr->use_as_src = 0;
+ }
+ addr_buf += af->sockaddr_len;
+ }
+ sctp_write_unlock(&asoc->base.addr_lock);
+ sctp_local_bh_enable();
- /* FIXME: After sending the delete address ASCONF chunk, we
- * cannot remove the addresses from the association's bind
- * address list, because there maybe some packet send to
- * the delete addresses, so we should wait until ASCONF_ACK
- * packet is received.
+ /* Update the route and saddr entries for all the transports
+ * as some of the addresses in the bind address list are
+ * about to be deleted and cannot be used as source addresses.
*/
+ list_for_each(pos1, &asoc->peer.transport_addr_list) {
+ transport = list_entry(pos1, struct sctp_transport,
+ transports);
+ dst_release(transport->dst);
+ sctp_transport_route(transport, NULL,
+ sctp_sk(asoc->base.sk));
+ }
+
+ retval = sctp_send_asconf(asoc, chunk);
}
out:
return retval;
* addrs is a pointer to an array of one or more socket addresses. Each
* address is contained in its appropriate structure (i.e. struct
* sockaddr_in or struct sockaddr_in6) the family of the address type
- * must be used to distengish the address length (note that this
+ * must be used to distinguish the address length (note that this
* representation is termed a "packed array" of addresses). The caller
* specifies the number of addresses in the array with addrcnt.
*
int err = 0;
int addrcnt = 0;
int walk_size = 0;
- struct sockaddr *sa_addr;
+ union sctp_addr *sa_addr;
void *addr_buf;
sp = sctp_sk(sk);
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
- sa_addr = (struct sockaddr *)addr_buf;
- af = sctp_get_af_specific(sa_addr->sa_family);
+ sa_addr = (union sctp_addr *)addr_buf;
+ af = sctp_get_af_specific(sa_addr->sa.sa_family);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
goto out_free;
}
- err = sctp_verify_addr(sk, (union sctp_addr *)sa_addr,
- af->sockaddr_len);
+ err = sctp_verify_addr(sk, sa_addr, af->sockaddr_len);
if (err)
goto out_free;
memcpy(&to, sa_addr, af->sockaddr_len);
- to.v4.sin_port = ntohs(to.v4.sin_port);
/* Check if there already is a matching association on the
* endpoint (other than the one created here).
*/
- asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport);
+ asoc2 = sctp_endpoint_lookup_assoc(ep, sa_addr, &transport);
if (asoc2 && asoc2 != asoc) {
if (asoc2->state >= SCTP_STATE_ESTABLISHED)
err = -EISCONN;
* make sure that there is no peeled-off association matching
* the peer address even on another socket.
*/
- if (sctp_endpoint_is_peeled_off(ep, &to)) {
+ if (sctp_endpoint_is_peeled_off(ep, sa_addr)) {
err = -EADDRNOTAVAIL;
goto out_free;
}
}
}
- scope = sctp_scope(&to);
+ scope = sctp_scope(sa_addr);
asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
if (!asoc) {
err = -ENOMEM;
}
/* Prime the peer's transport structures. */
- transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL,
+ transport = sctp_assoc_add_peer(asoc, sa_addr, GFP_KERNEL,
SCTP_UNKNOWN);
if (!transport) {
err = -ENOMEM;
inet_sk(sk)->dport = htons(asoc->peer.port);
af = sctp_get_af_specific(to.sa.sa_family);
af->to_sk_daddr(&to, sk);
+ sk->sk_err = 0;
timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
err = sctp_wait_for_connect(asoc, &timeo);
ep = sctp_sk(sk)->ep;
- /* Walk all associations on a socket, not on an endpoint. */
+ /* Walk all associations on an endpoint. */
list_for_each_safe(pos, temp, &ep->asocs) {
asoc = list_entry(pos, struct sctp_association, asocs);
if (sctp_state(asoc, CLOSED)) {
sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ continue;
+ }
+ }
- } else if (sock_flag(sk, SOCK_LINGER) &&
- !sk->sk_lingertime) {
- struct sctp_chunk *chunk;
+ if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
+ struct sctp_chunk *chunk;
- chunk = sctp_make_abort_user(asoc, NULL, 0);
- if (chunk)
- sctp_primitive_ABORT(asoc, chunk);
- } else
- sctp_primitive_SHUTDOWN(asoc, NULL);
+ chunk = sctp_make_abort_user(asoc, NULL, 0);
+ if (chunk)
+ sctp_primitive_ABORT(asoc, chunk);
} else
sctp_primitive_SHUTDOWN(asoc, NULL);
}
if (msg_namelen > sizeof(to))
msg_namelen = sizeof(to);
memcpy(&to, msg->msg_name, msg_namelen);
- SCTP_DEBUG_PRINTK("Just memcpy'd. msg_name is "
- "0x%x:%u.\n",
- to.v4.sin_addr.s_addr, to.v4.sin_port);
-
- to.v4.sin_port = ntohs(to.v4.sin_port);
msg_name = msg->msg_name;
}
* SPP_SACKDELAY_ENABLE, setting both will have undefined
* results.
*/
-int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
- struct sctp_transport *trans,
- struct sctp_association *asoc,
- struct sctp_sock *sp,
- int hb_change,
- int pmtud_change,
- int sackdelay_change)
+static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
+ struct sctp_transport *trans,
+ struct sctp_association *asoc,
+ struct sctp_sock *sp,
+ int hb_change,
+ int pmtud_change,
+ int sackdelay_change)
{
int error;
return err;
}
-static int sctp_setsockopt_adaption_layer(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
int optlen)
{
- struct sctp_setadaption adaption;
+ struct sctp_setadaptation adaptation;
- if (optlen != sizeof(struct sctp_setadaption))
+ if (optlen != sizeof(struct sctp_setadaptation))
return -EINVAL;
- if (copy_from_user(&adaption, optval, optlen))
+ if (copy_from_user(&adaptation, optval, optlen))
return -EFAULT;
- sctp_sk(sk)->adaption_ind = adaption.ssb_adaption_ind;
+ sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
+
+ return 0;
+}
+
+/*
+ * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
+ *
+ * The context field in the sctp_sndrcvinfo structure is normally only
+ * used when a failed message is retrieved holding the value that was
+ * sent down on the actual send call. This option allows the setting of
+ * a default context on an association basis that will be received on
+ * reading messages from the peer. This is especially helpful in the
+ * one-2-many model for an application to keep some reference to an
+ * internal state machine that is processing messages on the
+ * association. Note that the setting of this value only effects
+ * received messages from the peer and does not effect the value that is
+ * saved with outbound messages.
+ */
+static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
+ int optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_sock *sp;
+ struct sctp_association *asoc;
+
+ if (optlen != sizeof(struct sctp_assoc_value))
+ return -EINVAL;
+ if (copy_from_user(¶ms, optval, optlen))
+ return -EFAULT;
+
+ sp = sctp_sk(sk);
+
+ if (params.assoc_id != 0) {
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc)
+ return -EINVAL;
+ asoc->default_rcv_context = params.assoc_value;
+ } else {
+ sp->default_rcv_context = params.assoc_value;
+ }
return 0;
}
case SCTP_MAXSEG:
retval = sctp_setsockopt_maxseg(sk, optval, optlen);
break;
- case SCTP_ADAPTION_LAYER:
- retval = sctp_setsockopt_adaption_layer(sk, optval, optlen);
+ case SCTP_ADAPTATION_LAYER:
+ retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
+ break;
+ case SCTP_CONTEXT:
+ retval = sctp_setsockopt_context(sk, optval, optlen);
break;
default:
goto out;
}
- timeo = sock_rcvtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
error = sctp_wait_for_accept(sk, timeo);
if (error)
sp->default_context = 0;
sp->default_timetolive = 0;
+ sp->default_rcv_context = 0;
+
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
* overridden by the SCTP_INIT CMSG.
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
sp->initmsg.sinit_max_attempts = sctp_max_retrans_init;
- sp->initmsg.sinit_max_init_timeo = jiffies_to_msecs(sctp_rto_max);
+ sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
- sp->rtoinfo.srto_initial = jiffies_to_msecs(sctp_rto_initial);
- sp->rtoinfo.srto_max = jiffies_to_msecs(sctp_rto_max);
- sp->rtoinfo.srto_min = jiffies_to_msecs(sctp_rto_min);
+ sp->rtoinfo.srto_initial = sctp_rto_initial;
+ sp->rtoinfo.srto_max = sctp_rto_max;
+ sp->rtoinfo.srto_min = sctp_rto_min;
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
- sp->assocparams.sasoc_cookie_life =
- jiffies_to_msecs(sctp_valid_cookie_life);
+ sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
- sp->hbinterval = jiffies_to_msecs(sctp_hb_interval);
+ sp->hbinterval = sctp_hb_interval;
sp->pathmaxrxt = sctp_max_retrans_path;
sp->pathmtu = 0; // allow default discovery
- sp->sackdelay = jiffies_to_msecs(sctp_sack_timeout);
+ sp->sackdelay = sctp_sack_timeout;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
*/
sp->disable_fragments = 0;
- /* Turn on/off any Nagle-like algorithm. */
- sp->nodelay = 1;
+ /* Enable Nagle algorithm by default. */
+ sp->nodelay = 0;
/* Enable by default. */
sp->v4mapped = 1;
/* User specified fragmentation limit. */
sp->user_frag = 0;
- sp->adaption_ind = 0;
+ sp->adaptation_ind = 0;
sp->pf = sctp_get_pf_specific(sk->sk_family);
status.sstat_outstrms = asoc->c.sinit_num_ostreams;
status.sstat_fragmentation_point = asoc->frag_point;
status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
- memcpy(&status.sstat_primary.spinfo_address,
- &(transport->ipaddr), sizeof(union sctp_addr));
+ memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
+ transport->af_specific->sockaddr_len);
/* Map ipv4 address into v4-mapped-on-v6 address. */
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
(union sctp_addr *)&status.sstat_primary.spinfo_address);
{
struct sock *sk = asoc->base.sk;
struct socket *sock;
+ struct inet_sock *inetsk;
int err = 0;
/* An association cannot be branched off from an already peeled-off
* asoc to the newsk.
*/
sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH);
+
+ /* Make peeled-off sockets more like 1-1 accepted sockets.
+ * Set the daddr and initialize id to something more random
+ */
+ inetsk = inet_sk(sock->sk);
+ inetsk->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
+ inetsk->id = asoc->next_tsn ^ jiffies;
+
*sockp = sock;
return err;
memcpy(&temp, &from->ipaddr, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
- temp.v4.sin_port = htons(temp.v4.sin_port);
if (copy_to_user(to, &temp, addrlen))
return -EFAULT;
to += addrlen ;
addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len;
if(space_left < addrlen)
return -ENOMEM;
- temp.v4.sin_port = htons(temp.v4.sin_port);
if (copy_to_user(to, &temp, addrlen))
return -EFAULT;
to += addrlen;
sctp_assoc_t id;
struct sctp_bind_addr *bp;
struct sctp_association *asoc;
- struct list_head *pos;
+ struct list_head *pos, *temp;
struct sctp_sockaddr_entry *addr;
rwlock_t *addr_lock;
- unsigned long flags;
int cnt = 0;
if (len != sizeof(sctp_assoc_t))
addr = list_entry(bp->address_list.next,
struct sctp_sockaddr_entry, list);
if (sctp_is_any(&addr->a)) {
- sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
- list_for_each(pos, &sctp_local_addr_list) {
+ list_for_each_safe(pos, temp, &sctp_local_addr_list) {
addr = list_entry(pos,
struct sctp_sockaddr_entry,
list);
if ((PF_INET == sk->sk_family) &&
- (AF_INET6 == addr->a.sa.sa_family))
+ (AF_INET6 == addr->a.sa.sa_family))
continue;
cnt++;
}
- sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
- flags);
} else {
cnt = 1;
}
static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs,
void __user *to)
{
- struct list_head *pos;
+ struct list_head *pos, *next;
struct sctp_sockaddr_entry *addr;
- unsigned long flags;
union sctp_addr temp;
int cnt = 0;
int addrlen;
- sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
- list_for_each(pos, &sctp_local_addr_list) {
+ list_for_each_safe(pos, next, &sctp_local_addr_list) {
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
&temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- temp.v4.sin_port = htons(port);
- if (copy_to_user(to, &temp, addrlen)) {
- sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
- flags);
+ if (copy_to_user(to, &temp, addrlen))
return -EFAULT;
- }
+
to += addrlen;
cnt ++;
if (cnt >= max_addrs) break;
}
- sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
return cnt;
}
static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
void __user **to, size_t space_left)
{
- struct list_head *pos;
+ struct list_head *pos, *next;
struct sctp_sockaddr_entry *addr;
- unsigned long flags;
union sctp_addr temp;
int cnt = 0;
int addrlen;
- sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags);
- list_for_each(pos, &sctp_local_addr_list) {
+ list_for_each_safe(pos, next, &sctp_local_addr_list) {
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
if ((PF_INET == sk->sk_family) &&
(AF_INET6 == addr->a.sa.sa_family))
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if(space_left<addrlen)
return -ENOMEM;
- temp.v4.sin_port = htons(port);
- if (copy_to_user(*to, &temp, addrlen)) {
- sctp_spin_unlock_irqrestore(&sctp_local_addr_lock,
- flags);
+ if (copy_to_user(*to, &temp, addrlen))
return -EFAULT;
- }
+
*to += addrlen;
cnt ++;
space_left -= addrlen;
}
- sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags);
return cnt;
}
memcpy(&temp, &addr->a, sizeof(temp));
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
- temp.v4.sin_port = htons(temp.v4.sin_port);
if (copy_to_user(to, &temp, addrlen)) {
err = -EFAULT;
goto unlock;
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
if(space_left < addrlen)
return -ENOMEM; /*fixme: right error?*/
- temp.v4.sin_port = htons(temp.v4.sin_port);
if (copy_to_user(to, &temp, addrlen)) {
err = -EFAULT;
goto unlock;
if (!asoc->peer.primary_path)
return -ENOTCONN;
- asoc->peer.primary_path->ipaddr.v4.sin_port =
- htons(asoc->peer.primary_path->ipaddr.v4.sin_port);
memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
- sizeof(union sctp_addr));
- asoc->peer.primary_path->ipaddr.v4.sin_port =
- ntohs(asoc->peer.primary_path->ipaddr.v4.sin_port);
+ asoc->peer.primary_path->af_specific->sockaddr_len);
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
(union sctp_addr *)&prim.ssp_addr);
}
/*
- * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER)
+ * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER)
*
- * Requests that the local endpoint set the specified Adaption Layer
+ * Requests that the local endpoint set the specified Adaptation Layer
* Indication parameter for all future INIT and INIT-ACK exchanges.
*/
-static int sctp_getsockopt_adaption_layer(struct sock *sk, int len,
+static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
- struct sctp_setadaption adaption;
+ struct sctp_setadaptation adaptation;
- if (len != sizeof(struct sctp_setadaption))
+ if (len != sizeof(struct sctp_setadaptation))
return -EINVAL;
- adaption.ssb_adaption_ind = sctp_sk(sk)->adaption_ind;
- if (copy_to_user(optval, &adaption, len))
+ adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
+ if (copy_to_user(optval, &adaptation, len))
return -EFAULT;
return 0;
return 0;
}
+/*
+ * 7.1.29. Set or Get the default context (SCTP_CONTEXT)
+ * (chapter and verse is quoted at sctp_setsockopt_context())
+ */
+static int sctp_getsockopt_context(struct sock *sk, int len,
+ char __user *optval, int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_sock *sp;
+ struct sctp_association *asoc;
+
+ if (len != sizeof(struct sctp_assoc_value))
+ return -EINVAL;
+
+ if (copy_from_user(¶ms, optval, len))
+ return -EFAULT;
+
+ sp = sctp_sk(sk);
+
+ if (params.assoc_id != 0) {
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (!asoc)
+ return -EINVAL;
+ params.assoc_value = asoc->default_rcv_context;
+ } else {
+ params.assoc_value = sp->default_rcv_context;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, ¶ms, len))
+ return -EFAULT;
+
+ return 0;
+}
+
/*
* 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG)
*
retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
optlen);
break;
- case SCTP_ADAPTION_LAYER:
- retval = sctp_getsockopt_adaption_layer(sk, len, optval,
+ case SCTP_ADAPTATION_LAYER:
+ retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
optlen);
break;
+ case SCTP_CONTEXT:
+ retval = sctp_getsockopt_context(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
unsigned short snum;
int ret;
- /* NOTE: Remember to put this back to net order. */
- addr->v4.sin_port = ntohs(addr->v4.sin_port);
- snum = addr->v4.sin_port;
+ snum = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum);
sctp_local_bh_disable();
fail:
sctp_local_bh_enable();
- addr->v4.sin_port = htons(addr->v4.sin_port);
return ret;
}
int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
- struct crypto_tfm *tfm=NULL;
+ struct crypto_hash *tfm = NULL;
int err = -EINVAL;
if (unlikely(backlog < 0))
/* Allocate HMAC for generating cookie. */
if (sctp_hmac_alg) {
- tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0);
+ tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (!tfm) {
err = -ENOSYS;
goto out;
sctp_release_sock(sk);
return err;
cleanup:
- sctp_crypto_free_tfm(tfm);
+ crypto_free_hash(tfm);
goto out;
}
/* Is there any exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
{
struct sctp_bind_bucket *pp;
- pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC);
+ pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC);
SCTP_DBG_OBJCNT_INC(bind_bucket);
if (pp) {
pp->port = snum;
/* Caller must hold hashbucket lock for this tb with local BH disabled */
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
- if (hlist_empty(&pp->owner)) {
+ if (pp && hlist_empty(&pp->owner)) {
if (pp->next)
pp->next->pprev = pp->pprev;
*(pp->pprev) = pp->next;
{
union sctp_addr autoaddr;
struct sctp_af *af;
- unsigned short port;
+ __be16 port;
/* Initialize a local sockaddr structure to INADDR_ANY. */
af = sctp_sk(sk)->pf->af;
sctp_association_put(asoc);
}
+/* Do accounting for the receive space on the socket.
+ * Accounting for the association is done in ulpevent.c
+ * We set this as a destructor for the cloned data skbs so that
+ * accounting is done at the correct time.
+ */
+void sctp_sock_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct sctp_ulpevent *event = sctp_skb2event(skb);
+
+ atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
+}
+
+
/* Helper function to wait for space in the sndbuf. */
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len)
*/
sctp_release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
+ BUG_ON(sk != asoc->base.sk);
sctp_lock_sock(sk);
*timeo_p = current_timeo;
/* Copy the bind_addr list from the original endpoint to the new
* endpoint so that we can handle restarts properly
*/
+ if (PF_INET6 == assoc->base.sk->sk_family)
+ flags = SCTP_ADDR6_ALLOWED;
if (assoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (assoc->peer.ipv6_address)
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sock_rfree(skb);
+ sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
- skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r(skb, newsk);
}
}
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
- sock_rfree(skb);
+ sctp_sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
- skb_set_owner_r(skb, newsk);
+ sctp_skb_set_owner_r(skb, newsk);
}
}
*/
newsp->type = type;
- spin_lock_bh(&oldsk->sk_lock.slock);
- /* Migrate the backlog from oldsk to newsk. */
- sctp_backlog_migrate(assoc, oldsk, newsk);
- /* Migrate the association to the new socket. */
+ /* Mark the new socket "in-use" by the user so that any packets
+ * that may arrive on the association after we've moved it are
+ * queued to the backlog. This prevents a potential race between
+ * backlog processing on the old socket and new-packet processing
+ * on the new socket.
+ */
+ sctp_lock_sock(newsk);
sctp_assoc_migrate(assoc, newsk);
- spin_unlock_bh(&oldsk->sk_lock.slock);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
newsk->sk_shutdown |= RCV_SHUTDOWN;
newsk->sk_state = SCTP_SS_ESTABLISHED;
+ sctp_release_sock(newsk);
}
/* This proto struct describes the ULP interface for SCTP. */