*
* Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
*
- * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Florian La Roche, <flla@stud.uni-sb.de>
* Alan Cox, <A.Cox@swansea.ac.uk>
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/capability.h>
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
-#include <linux/major.h>
#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
+#include <net/request_sock.h>
#include <net/sock.h>
+#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <linux/filter.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
#ifdef CONFIG_INET
#include <net/tcp.h>
static void sock_warn_obsolete_bsdism(const char *name)
{
static int warned;
- static char warncomm[16];
+ static char warncomm[TASK_COMM_LEN];
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
printk(KERN_WARNING "process `%s' is using obsolete "
}
}
+static void sock_disable_timestamp(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_TIMESTAMP)) {
+ sock_reset_flag(sk, SOCK_TIMESTAMP);
+ net_disable_timestamp();
+ }
+}
+
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err = 0;
+ int skb_len;
+
+#if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
+ /* Silently drop if VNET is active (if INET bind() has been
+ * overridden) and the context is not entitled to read the
+ * packet.
+ */
+ if (vnet_active &&
+ (int) sk->sk_xid > 0 && sk->sk_xid != skb->xid) {
+ err = -EPERM;
+ goto out;
+ }
+#endif
+
+ /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
+ number of warnings when compiling with -W --ANK
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* It would be deadlock, if sock_queue_rcv_skb is used
+ with socket lock! We assume that users of this
+ function are lock free.
+ */
+ err = sk_filter(sk, skb, 1);
+ if (err)
+ goto out;
+
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_queue_rcv_skb);
+
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int rc = NET_RX_SUCCESS;
+
+ if (sk_filter(sk, skb, 0))
+ goto discard_and_relse;
+
+ skb->dev = NULL;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk))
+ rc = sk->sk_backlog_rcv(sk, skb);
+ else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
+out:
+ sock_put(sk);
+ return rc;
+discard_and_relse:
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(sk_receive_skb);
+
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk->sk_dst_cache;
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk->sk_dst_cache = NULL;
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(__sk_dst_check);
+
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk_dst_reset(sk);
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(sk_dst_check);
+
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
*/
#ifdef SO_DONTLINGER /* Compatibility item... */
- switch (optname) {
- case SO_DONTLINGER:
- sock_reset_flag(sk, SOCK_LINGER);
- return 0;
+ if (optname == SO_DONTLINGER) {
+ lock_sock(sk);
+ sock_reset_flag(sk, SOCK_LINGER);
+ release_sock(sk);
+ return 0;
}
-#endif
-
+#endif
+
if(optlen<sizeof(int))
return(-EINVAL);
{
ret = -EACCES;
}
+ else if (valbool)
+ sock_set_flag(sk, SOCK_DBG);
else
- sk->sk_debug = valbool;
+ sock_reset_flag(sk, SOCK_DBG);
break;
case SO_REUSEADDR:
sk->sk_reuse = valbool;
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
- sk->sk_localroute = valbool;
+ if (valbool)
+ sock_set_flag(sk, SOCK_LOCALROUTE);
+ else
+ sock_reset_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
if (val > sysctl_wmem_max)
val = sysctl_wmem_max;
-
+set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
if ((val * 2) < SOCK_MIN_SNDBUF)
sk->sk_sndbuf = SOCK_MIN_SNDBUF;
sk->sk_write_space(sk);
break;
+ case SO_SNDBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_sndbuf;
+
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
about it this is right. Otherwise apps have to
if (val > sysctl_rmem_max)
val = sysctl_rmem_max;
-
+set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
- /* FIXME: is this lower bound the right one? */
+ /*
+ * We double it on the way in to account for
+ * "struct sk_buff" etc. overhead. Applications
+ * assume that the SO_RCVBUF setting they make will
+ * allow that much actual data to be received on that
+ * socket.
+ *
+ * Applications are unaware that "struct sk_buff" and
+ * other overheads allocate from the receive buffer
+ * during socket buffer allocation.
+ *
+ * And after considering the possible alternatives,
+ * returning the value we actually used in getsockopt
+ * is the most desirable behavior.
+ */
if ((val * 2) < SOCK_MIN_RCVBUF)
sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
else
sk->sk_rcvbuf = val * 2;
break;
+ case SO_RCVBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_rcvbuf;
+
case SO_KEEPALIVE:
#ifdef CONFIG_INET
if (sk->sk_protocol == IPPROTO_TCP)
sock_reset_flag(sk, SOCK_LINGER);
else {
#if (BITS_PER_LONG == 32)
- if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+ if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
- sk->sk_lingertime = ling.l_linger * HZ;
+ sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
break;
case SO_PASSCRED:
- sock->passcred = valbool;
+ if (valbool)
+ set_bit(SOCK_PASSCRED, &sock->flags);
+ else
+ clear_bit(SOCK_PASSCRED, &sock->flags);
break;
+#if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
+ case SO_SETXID:
+ if (current->xid) {
+ ret = -EPERM;
+ break;
+ }
+ if (val < 0 || val > MAX_S_CONTEXT) {
+ ret = -EINVAL;
+ break;
+ }
+ sk->sk_xid = val;
+ break;
+#endif
+
case SO_TIMESTAMP:
- sk->sk_rcvtstamp = valbool;
- if (valbool)
+ if (valbool) {
+ sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk);
+ } else
+ sock_reset_flag(sk, SOCK_RCVTSTAMP);
break;
case SO_RCVLOWAT:
if (!valbool) {
sk->sk_bound_dev_if = 0;
} else {
- if (optlen > IFNAMSIZ)
- optlen = IFNAMSIZ;
+ if (optlen > IFNAMSIZ - 1)
+ optlen = IFNAMSIZ - 1;
+ memset(devname, 0, sizeof(devname));
if (copy_from_user(devname, optval, optlen)) {
ret = -EFAULT;
break;
struct timeval tm;
} v;
- unsigned int lv=sizeof(int),len;
+ unsigned int lv = sizeof(int);
+ int len;
if(get_user(len,optlen))
return -EFAULT;
switch(optname)
{
case SO_DEBUG:
- v.val = sk->sk_debug;
+ v.val = sock_flag(sk, SOCK_DBG);
break;
case SO_DONTROUTE:
- v.val = sk->sk_localroute;
+ v.val = sock_flag(sk, SOCK_LOCALROUTE);
break;
case SO_BROADCAST:
break;
case SO_TIMESTAMP:
- v.val = sk->sk_rcvtstamp;
+ v.val = sock_flag(sk, SOCK_RCVTSTAMP);
break;
case SO_RCVTIMEO:
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000) / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
}
break;
v.tm.tv_usec = 0;
} else {
v.tm.tv_sec = sk->sk_sndtimeo / HZ;
- v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000) / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
}
break;
break;
case SO_PASSCRED:
- v.val = sock->passcred;
+ v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
break;
case SO_PEERCRED:
break;
case SO_PEERSEC:
- return security_socket_getpeersec(sock, optval, optlen, len);
+ return security_socket_getpeersec_stream(sock, optval, optlen, len);
default:
return(-ENOPROTOOPT);
return 0;
}
-static kmem_cache_t *sk_cachep;
-
/**
* sk_alloc - All socket objects are allocated here
- * @family - protocol family
- * @priority - for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
- * @zero_it - zeroes the allocated sock
- * @slab - alternate slab
- *
- * All socket objects are allocated here. If @zero_it is non-zero
- * it should have the size of the are to be zeroed, because the
- * private slabcaches have different sizes of the generic struct sock.
- * 1 has been kept as a way to say sizeof(struct sock).
+ * @family: protocol family
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @prot: struct proto associated with this new sock instance
+ * @zero_it: if we should zero the newly allocated sock
*/
-struct sock *sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab)
+struct sock *sk_alloc(int family, gfp_t priority,
+ struct proto *prot, int zero_it)
{
struct sock *sk = NULL;
+ kmem_cache_t *slab = prot->slab;
+
+ if (slab != NULL)
+ sk = kmem_cache_alloc(slab, priority);
+ else
+ sk = kmalloc(prot->obj_size, priority);
- if (!slab)
- slab = sk_cachep;
- sk = kmem_cache_alloc(slab, priority);
if (sk) {
if (zero_it) {
- memset(sk, 0,
- zero_it == 1 ? sizeof(struct sock) : zero_it);
+ memset(sk, 0, prot->obj_size);
sk->sk_family = family;
+ /*
+ * See comment in struct sock definition to understand
+ * why we need sk_prot_creator -acme
+ */
+ sk->sk_prot = sk->sk_prot_creator = prot;
sock_lock_init(sk);
}
- sk->sk_slab = slab;
+ sock_vx_init(sk);
+ sock_nx_init(sk);
- if (security_sk_alloc(sk, family, priority)) {
- kmem_cache_free(slab, sk);
- sk = NULL;
- }
+ if (security_sk_alloc(sk, family, priority))
+ goto out_free;
+
+ if (!try_module_get(prot->owner))
+ goto out_free;
}
return sk;
+
+out_free:
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ return NULL;
}
void sk_free(struct sock *sk)
{
struct sk_filter *filter;
- struct module *owner = sk->sk_owner;
+ struct module *owner = sk->sk_prot_creator->owner;
if (sk->sk_destruct)
sk->sk_destruct(sk);
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
security_sk_free(sk);
- kmem_cache_free(sk->sk_slab, sk);
+ vx_sock_dec(sk);
+ clr_vx_info(&sk->sk_vx_info);
+ sk->sk_xid = -1;
+ clr_nx_info(&sk->sk_nx_info);
+ sk->sk_nid = -1;
+ if (sk->sk_prot_creator->slab != NULL)
+ kmem_cache_free(sk->sk_prot_creator->slab, sk);
+ else
+ kfree(sk);
module_put(owner);
}
-void __init sk_init(void)
+struct sock *sk_clone(struct sock *sk, const gfp_t priority)
{
- sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
- SLAB_HWCACHE_ALIGN, 0, 0);
- if (!sk_cachep)
- printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");
+ struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
+
+ if (newsk != NULL) {
+ struct sk_filter *filter;
+
+ memcpy(newsk, sk, sk->sk_prot->obj_size);
+
+ /* SANITY */
+ sock_vx_init(newsk);
+ sock_nx_init(newsk);
+ sk_node_init(&newsk->sk_node);
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ skb_queue_head_init(&newsk->sk_write_queue);
+
+ rwlock_init(&newsk->sk_dst_lock);
+ rwlock_init(&newsk->sk_callback_lock);
+
+ newsk->sk_dst_cache = NULL;
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
+ newsk->sk_send_head = NULL;
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+
+ sock_reset_flag(newsk, SOCK_DONE);
+ skb_queue_head_init(&newsk->sk_error_queue);
+
+ filter = newsk->sk_filter;
+ if (filter != NULL)
+ sk_filter_charge(newsk, filter);
+
+ if (sk->sk_create_child)
+ sk->sk_create_child(sk, newsk);
+
+ if (unlikely(xfrm_sk_clone_policy(newsk))) {
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
+ sk_free(newsk);
+ newsk = NULL;
+ goto out;
+ }
+
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
+
+ set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
+ newsk->sk_xid = sk->sk_xid;
+ vx_sock_inc(newsk);
+ set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
+ newsk->sk_nid = sk->sk_nid;
+
+ /*
+ * Increment the counter in the same struct proto as the master
+ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+ * is the same as sk->sk_prot->socks, as this field was copied
+ * with memcpy).
+ *
+ * This _changes_ the previous behaviour, where
+ * tcp_create_openreq_child always was incrementing the
+ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
+ * to be taken into account in all callers. -acme
+ */
+ sk_refcnt_debug_inc(newsk);
+ newsk->sk_socket = NULL;
+ newsk->sk_sleep = NULL;
+
+ if (newsk->sk_prot->sockets_allocated)
+ atomic_inc(newsk->sk_prot->sockets_allocated);
+ }
+out:
+ return newsk;
+}
+
+EXPORT_SYMBOL_GPL(sk_clone);
+void __init sk_init(void)
+{
if (num_physpages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
/* In case it might be waiting for more memory. */
atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
- if (!sk->sk_use_write_queue)
+ if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
sk->sk_write_space(sk);
sock_put(sk);
}
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
}
+
+int sock_i_uid(struct sock *sk)
+{
+ int uid;
+
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return uid;
+}
+
+unsigned long sock_i_ino(struct sock *sk)
+{
+ unsigned long ino;
+
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return ino;
+}
+
/*
* Allocate a skb from the socket's send buffer.
*/
-struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
{
if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff * skb = alloc_skb(size, priority);
/*
* Allocate a skb from the socket's receive buffer.
*/
-struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
{
if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
struct sk_buff *skb = alloc_skb(size, priority);
/*
* Allocate a memory block from the socket's option memory buffer.
*/
-void *sock_kmalloc(struct sock *sk, int size, int priority)
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
{
if ((unsigned)size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
* Generic send/receive buffer handlers
*/
-struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
- unsigned long data_len, int noblock, int *errcode)
+static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
+ unsigned long header_len,
+ unsigned long data_len,
+ int noblock, int *errcode)
{
struct sk_buff *skb;
- unsigned int gfp_mask;
+ gfp_t gfp_mask;
long timeo;
int err;
return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
}
-void __lock_sock(struct sock *sk)
+static void __lock_sock(struct sock *sk)
{
DEFINE_WAIT(wait);
finish_wait(&sk->sk_lock.wq, &wait);
}
-void __release_sock(struct sock *sk)
+static void __release_sock(struct sock *sk)
{
struct sk_buff *skb = sk->sk_backlog.head;
skb->next = NULL;
sk->sk_backlog_rcv(sk, skb);
+
+ /*
+ * We are in process context here with softirqs
+ * disabled, use cond_resched_softirq() to preempt.
+ * This is safe to do because we've taken the backlog
+ * queue private:
+ */
+ cond_resched_softirq();
+
skb = next;
} while (skb != NULL);
} while((skb = sk->sk_backlog.head) != NULL);
}
+/**
+ * sk_wait_data - wait for data to arrive at sk_receive_queue
+ * @sk: sock to wait on
+ * @timeo: for how long
+ *
+ * Now socket state including sk->sk_err is changed only under lock,
+ * hence we may omit checks after joining wait queue.
+ * We check receive queue before schedule() only as optimization;
+ * it is very likely that release_sock() added new data.
+ */
+int sk_wait_data(struct sock *sk, long *timeo)
+{
+ int rc;
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ finish_wait(sk->sk_sleep, &wait);
+ return rc;
+}
+
+EXPORT_SYMBOL(sk_wait_data);
+
/*
* Set of default routines for initialising struct proto_ops when
* the protocol does not support a particular function. In certain
* function, some default processing is provided.
*/
-int sock_no_release(struct socket *sock)
-{
- return 0;
-}
-
int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
return -EOPNOTSUPP;
ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
ssize_t res;
- struct msghdr msg;
- struct iovec iov;
- mm_segment_t old_fs;
- char *kaddr;
-
- kaddr = kmap(page);
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_iov = &iov;
- msg.msg_iovlen = 1;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = flags;
-
- /* This cast is ok because of the "set_fs(KERNEL_DS)" */
- iov.iov_base = (void __user *) (kaddr + offset);
+ struct msghdr msg = {.msg_flags = flags};
+ struct kvec iov;
+ char *kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
iov.iov_len = size;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- res = sock_sendmsg(sock, &msg, size);
- set_fs(old_fs);
-
+ res = kernel_sendmsg(sock, &msg, &iov, 1, size);
kunmap(page);
return res;
}
* Default Socket Callbacks
*/
-void sock_def_wakeup(struct sock *sk)
+static void sock_def_wakeup(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
read_unlock(&sk->sk_callback_lock);
}
-void sock_def_error_report(struct sock *sk)
+static void sock_def_error_report(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
read_unlock(&sk->sk_callback_lock);
}
-void sock_def_readable(struct sock *sk, int len)
+static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
read_unlock(&sk->sk_callback_lock);
}
-void sock_def_write_space(struct sock *sk)
+static void sock_def_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
read_unlock(&sk->sk_callback_lock);
}
-void sock_def_destruct(struct sock *sk)
+static void sock_def_destruct(struct sock *sk)
{
- if (sk->sk_protinfo)
- kfree(sk->sk_protinfo);
+ kfree(sk->sk_protinfo);
}
void sk_send_sigurg(struct sock *sk)
sk_wake_async(sk, 3, POLL_PRI);
}
+void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+ unsigned long expires)
+{
+ if (!mod_timer(timer, expires))
+ sock_hold(sk);
+}
+
+EXPORT_SYMBOL(sk_reset_timer);
+
+void sk_stop_timer(struct sock *sk, struct timer_list* timer)
+{
+ if (timer_pending(timer) && del_timer(timer))
+ __sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_stop_timer);
+
void sock_init_data(struct socket *sock, struct sock *sk)
{
skb_queue_head_init(&sk->sk_receive_queue);
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
+ sk->sk_send_head = NULL;
+
init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
sk->sk_state = TCP_CLOSE;
- sk->sk_zapped = 1;
sk->sk_socket = sock;
+ sock_set_flag(sk, SOCK_ZAPPED);
+
if(sock)
{
sk->sk_type = sock->type;
} else
sk->sk_sleep = NULL;
- sk->sk_dst_lock = RW_LOCK_UNLOCKED;
- sk->sk_callback_lock = RW_LOCK_UNLOCKED;
+ rwlock_init(&sk->sk_dst_lock);
+ rwlock_init(&sk->sk_callback_lock);
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
sk->sk_error_report = sock_def_error_report;
sk->sk_destruct = sock_def_destruct;
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+
sk->sk_peercred.pid = 0;
sk->sk_peercred.uid = -1;
sk->sk_peercred.gid = -1;
+ sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
- sk->sk_owner = NULL;
sk->sk_stamp.tv_sec = -1L;
sk->sk_stamp.tv_usec = -1L;
+ set_vx_info(&sk->sk_vx_info, current->vx_info);
+ sk->sk_xid = vx_current_xid();
+ vx_sock_inc(sk);
+ set_nx_info(&sk->sk_nx_info, current->nx_info);
+ sk->sk_nid = nx_current_nid();
atomic_set(&sk->sk_refcnt, 1);
}
}
EXPORT_SYMBOL(release_sock);
-/* When > 0 there are consumers of rx skb time stamps */
-atomic_t netstamp_needed = ATOMIC_INIT(0);
-
-int sock_get_timestamp(struct sock *sk, struct timeval *userstamp)
+int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
{
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk);
{
if (!sock_flag(sk, SOCK_TIMESTAMP)) {
sock_set_flag(sk, SOCK_TIMESTAMP);
- atomic_inc(&netstamp_needed);
+ net_enable_timestamp();
}
}
EXPORT_SYMBOL(sock_enable_timestamp);
-void sock_disable_timestamp(struct sock *sk)
-{
- if (sock_flag(sk, SOCK_TIMESTAMP)) {
- sock_reset_flag(sk, SOCK_TIMESTAMP);
- atomic_dec(&netstamp_needed);
+/*
+ * Get a socket option on an socket.
+ *
+ * FIX: POSIX 1003.1g is very ambiguous here. It states that
+ * asynchronous errors should be reported by getsockopt. We assume
+ * this means if you specify SO_ERROR (otherwise whats the point of it).
+ */
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_getsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_setsockopt != NULL)
+ return sk->sk_prot->compat_getsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_getsockopt);
+#endif
+
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+
+EXPORT_SYMBOL(sock_common_recvmsg);
+
+/*
+ * Set socket options on an inet socket.
+ */
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_setsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_setsockopt != NULL)
+ return sk->sk_prot->compat_setsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_setsockopt);
+#endif
+
+void sk_common_release(struct sock *sk)
+{
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
+
+ /*
+ * Observation: when sock_common_release is called, processes have
+ * no access to socket. But net still has.
+ * Step one, detach it from networking:
+ *
+ * A. Remove from hash tables.
+ */
+
+ sk->sk_prot->unhash(sk);
+
+ /*
+ * In this point socket cannot receive new packets, but it is possible
+ * that some packets are in flight because some CPU runs receiver and
+ * did hash table lookup before we unhashed socket. They will achieve
+ * receive queue and will be purged by socket destructor.
+ *
+ * Also we still have packets pending on receive queue and probably,
+ * our own packets waiting in device queues. sock_destroy will drain
+ * receive queue, but transmitted packets will delay socket destruction
+ * until the last reference will be released.
+ */
+
+ sock_orphan(sk);
+
+ xfrm_sk_free_policy(sk);
+
+ sk_refcnt_debug_release(sk);
+ sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_common_release);
+
+static DEFINE_RWLOCK(proto_list_lock);
+static LIST_HEAD(proto_list);
+
+int proto_register(struct proto *prot, int alloc_slab)
+{
+ char *request_sock_slab_name = NULL;
+ char *timewait_sock_slab_name;
+ int rc = -ENOBUFS;
+
+ if (alloc_slab) {
+ prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
+ prot->name);
+ goto out;
+ }
+
+ if (prot->rsk_prot != NULL) {
+ static const char mask[] = "request_sock_%s";
+
+ request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ if (request_sock_slab_name == NULL)
+ goto out_free_sock_slab;
+
+ sprintf(request_sock_slab_name, mask, prot->name);
+ prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
+ prot->rsk_prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (prot->rsk_prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
+ prot->name);
+ goto out_free_request_sock_slab_name;
+ }
+ }
+
+ if (prot->twsk_prot != NULL) {
+ static const char mask[] = "tw_sock_%s";
+
+ timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+
+ if (timewait_sock_slab_name == NULL)
+ goto out_free_request_sock_slab;
+
+ sprintf(timewait_sock_slab_name, mask, prot->name);
+ prot->twsk_prot->twsk_slab =
+ kmem_cache_create(timewait_sock_slab_name,
+ prot->twsk_prot->twsk_obj_size,
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (prot->twsk_prot->twsk_slab == NULL)
+ goto out_free_timewait_sock_slab_name;
+ }
+ }
+
+ write_lock(&proto_list_lock);
+ list_add(&prot->node, &proto_list);
+ write_unlock(&proto_list_lock);
+ rc = 0;
+out:
+ return rc;
+out_free_timewait_sock_slab_name:
+ kfree(timewait_sock_slab_name);
+out_free_request_sock_slab:
+ if (prot->rsk_prot && prot->rsk_prot->slab) {
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ prot->rsk_prot->slab = NULL;
+ }
+out_free_request_sock_slab_name:
+ kfree(request_sock_slab_name);
+out_free_sock_slab:
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ goto out;
+}
+
+EXPORT_SYMBOL(proto_register);
+
+void proto_unregister(struct proto *prot)
+{
+ write_lock(&proto_list_lock);
+ list_del(&prot->node);
+ write_unlock(&proto_list_lock);
+
+ if (prot->slab != NULL) {
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ }
+
+ if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
+ const char *name = kmem_cache_name(prot->rsk_prot->slab);
+
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ kfree(name);
+ prot->rsk_prot->slab = NULL;
+ }
+
+ if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
+ const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
+
+ kmem_cache_destroy(prot->twsk_prot->twsk_slab);
+ kfree(name);
+ prot->twsk_prot->twsk_slab = NULL;
}
}
-EXPORT_SYMBOL(sock_disable_timestamp);
-EXPORT_SYMBOL(__lock_sock);
-EXPORT_SYMBOL(__release_sock);
+EXPORT_SYMBOL(proto_unregister);
+
+#ifdef CONFIG_PROC_FS
+static inline struct proto *__proto_head(void)
+{
+ return list_entry(proto_list.next, struct proto, node);
+}
+
+static inline struct proto *proto_head(void)
+{
+ return list_empty(&proto_list) ? NULL : __proto_head();
+}
+
+static inline struct proto *proto_next(struct proto *proto)
+{
+ return proto->node.next == &proto_list ? NULL :
+ list_entry(proto->node.next, struct proto, node);
+}
+
+static inline struct proto *proto_get_idx(loff_t pos)
+{
+ struct proto *proto;
+ loff_t i = 0;
+
+ list_for_each_entry(proto, &proto_list, node)
+ if (i++ == pos)
+ goto out;
+
+ proto = NULL;
+out:
+ return proto;
+}
+
+static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ read_lock(&proto_list_lock);
+ return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
+}
+
+static void proto_seq_stop(struct seq_file *seq, void *v)
+{
+ read_unlock(&proto_list_lock);
+}
+
+static char proto_method_implemented(const void *method)
+{
+ return method == NULL ? 'n' : 'y';
+}
+
+static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
+{
+ seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
+ "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
+ proto->name,
+ proto->obj_size,
+ proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
+ proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
+ proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+ proto->max_header,
+ proto->slab == NULL ? "no" : "yes",
+ module_name(proto->owner),
+ proto_method_implemented(proto->close),
+ proto_method_implemented(proto->connect),
+ proto_method_implemented(proto->disconnect),
+ proto_method_implemented(proto->accept),
+ proto_method_implemented(proto->ioctl),
+ proto_method_implemented(proto->init),
+ proto_method_implemented(proto->destroy),
+ proto_method_implemented(proto->shutdown),
+ proto_method_implemented(proto->setsockopt),
+ proto_method_implemented(proto->getsockopt),
+ proto_method_implemented(proto->sendmsg),
+ proto_method_implemented(proto->recvmsg),
+ proto_method_implemented(proto->sendpage),
+ proto_method_implemented(proto->bind),
+ proto_method_implemented(proto->backlog_rcv),
+ proto_method_implemented(proto->hash),
+ proto_method_implemented(proto->unhash),
+ proto_method_implemented(proto->get_port),
+ proto_method_implemented(proto->enter_memory_pressure));
+}
+
+static int proto_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
+ "protocol",
+ "size",
+ "sockets",
+ "memory",
+ "press",
+ "maxhdr",
+ "slab",
+ "module",
+ "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
+ else
+ proto_seq_printf(seq, v);
+ return 0;
+}
+
+static struct seq_operations proto_seq_ops = {
+ .start = proto_seq_start,
+ .next = proto_seq_next,
+ .stop = proto_seq_stop,
+ .show = proto_seq_show,
+};
+
+static int proto_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &proto_seq_ops);
+}
+
+static struct file_operations proto_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = proto_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init proto_init(void)
+{
+ /* register /proc/net/protocols */
+ return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
+}
+
+subsys_initcall(proto_init);
+
+#endif /* PROC_FS */
+
EXPORT_SYMBOL(sk_alloc);
EXPORT_SYMBOL(sk_free);
EXPORT_SYMBOL(sk_send_sigurg);
-EXPORT_SYMBOL(sock_alloc_send_pskb);
EXPORT_SYMBOL(sock_alloc_send_skb);
-EXPORT_SYMBOL(sock_getsockopt);
EXPORT_SYMBOL(sock_init_data);
EXPORT_SYMBOL(sock_kfree_s);
EXPORT_SYMBOL(sock_kmalloc);
EXPORT_SYMBOL(sock_no_mmap);
EXPORT_SYMBOL(sock_no_poll);
EXPORT_SYMBOL(sock_no_recvmsg);
-EXPORT_SYMBOL(sock_no_release);
EXPORT_SYMBOL(sock_no_sendmsg);
EXPORT_SYMBOL(sock_no_sendpage);
EXPORT_SYMBOL(sock_no_setsockopt);
EXPORT_SYMBOL(sock_no_shutdown);
EXPORT_SYMBOL(sock_no_socketpair);
EXPORT_SYMBOL(sock_rfree);
-EXPORT_SYMBOL(sock_rmalloc);
EXPORT_SYMBOL(sock_setsockopt);
EXPORT_SYMBOL(sock_wfree);
EXPORT_SYMBOL(sock_wmalloc);
-#ifdef CONFIG_SYSCTL
+EXPORT_SYMBOL(sock_i_uid);
+EXPORT_SYMBOL(sock_i_ino);
EXPORT_SYMBOL(sysctl_optmem_max);
+#ifdef CONFIG_SYSCTL
EXPORT_SYMBOL(sysctl_rmem_max);
EXPORT_SYMBOL(sysctl_wmem_max);
#endif