2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
94 #include <linux/capability.h>
95 #include <linux/config.h>
96 #include <linux/errno.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
116 #include <asm/uaccess.h>
117 #include <asm/system.h>
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/request_sock.h>
123 #include <net/sock.h>
124 #include <net/xfrm.h>
125 #include <linux/ipsec.h>
127 #include <linux/filter.h>
128 #include <linux/vs_socket.h>
129 #include <linux/vs_limit.h>
130 #include <linux/vs_context.h>
136 /* Take into consideration the size of the struct sk_buff overhead in the
137 * determination of these values, since that is non-constant across
138 * platforms. This makes socket queueing behavior and performance
139 * not depend upon such differences.
141 #define _SK_MEM_PACKETS 256
142 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
143 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
144 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
146 /* Run time adjustable parameters. */
147 __u32 sysctl_wmem_max = SK_WMEM_MAX;
148 __u32 sysctl_rmem_max = SK_RMEM_MAX;
149 __u32 sysctl_wmem_default = SK_WMEM_MAX;
150 __u32 sysctl_rmem_default = SK_RMEM_MAX;
152 /* Maximal space eaten by iovec or ancilliary data plus some space */
153 int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
155 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
159 if (optlen < sizeof(tv))
161 if (copy_from_user(&tv, optval, sizeof(tv)))
164 *timeo_p = MAX_SCHEDULE_TIMEOUT;
165 if (tv.tv_sec == 0 && tv.tv_usec == 0)
167 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
168 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
172 static void sock_warn_obsolete_bsdism(const char *name)
175 static char warncomm[TASK_COMM_LEN];
176 if (strcmp(warncomm, current->comm) && warned < 5) {
177 strcpy(warncomm, current->comm);
178 printk(KERN_WARNING "process `%s' is using obsolete "
179 "%s SO_BSDCOMPAT\n", warncomm, name);
184 static void sock_disable_timestamp(struct sock *sk)
186 if (sock_flag(sk, SOCK_TIMESTAMP)) {
187 sock_reset_flag(sk, SOCK_TIMESTAMP);
188 net_disable_timestamp();
193 * This is meant for all protocols to use and covers goings on
194 * at the socket level. Everything here is generic.
197 int sock_setsockopt(struct socket *sock, int level, int optname,
198 char __user *optval, int optlen)
200 struct sock *sk=sock->sk;
201 struct sk_filter *filter;
208 * Options without arguments
211 #ifdef SO_DONTLINGER /* Compatibility item... */
212 if (optname == SO_DONTLINGER) {
214 sock_reset_flag(sk, SOCK_LINGER);
220 if(optlen<sizeof(int))
223 if (get_user(val, (int __user *)optval))
233 if(val && !capable(CAP_NET_ADMIN))
238 sock_set_flag(sk, SOCK_DBG);
240 sock_reset_flag(sk, SOCK_DBG);
243 sk->sk_reuse = valbool;
251 sock_set_flag(sk, SOCK_LOCALROUTE);
253 sock_reset_flag(sk, SOCK_LOCALROUTE);
256 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
259 /* Don't error on this BSD doesn't and if you think
260 about it this is right. Otherwise apps have to
261 play 'guess the biggest size' games. RCVBUF/SNDBUF
262 are treated in BSD as hints */
264 if (val > sysctl_wmem_max)
265 val = sysctl_wmem_max;
267 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
268 if ((val * 2) < SOCK_MIN_SNDBUF)
269 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
271 sk->sk_sndbuf = val * 2;
274 * Wake up sending tasks if we
277 sk->sk_write_space(sk);
281 if (!capable(CAP_NET_ADMIN)) {
288 /* Don't error on this BSD doesn't and if you think
289 about it this is right. Otherwise apps have to
290 play 'guess the biggest size' games. RCVBUF/SNDBUF
291 are treated in BSD as hints */
293 if (val > sysctl_rmem_max)
294 val = sysctl_rmem_max;
296 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
297 /* FIXME: is this lower bound the right one? */
298 if ((val * 2) < SOCK_MIN_RCVBUF)
299 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
301 sk->sk_rcvbuf = val * 2;
305 if (!capable(CAP_NET_ADMIN)) {
313 if (sk->sk_protocol == IPPROTO_TCP)
314 tcp_set_keepalive(sk, valbool);
316 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
320 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
324 sk->sk_no_check = valbool;
328 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
329 sk->sk_priority = val;
335 if(optlen<sizeof(ling)) {
336 ret = -EINVAL; /* 1003.1g */
339 if (copy_from_user(&ling,optval,sizeof(ling))) {
344 sock_reset_flag(sk, SOCK_LINGER);
346 #if (BITS_PER_LONG == 32)
347 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
348 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
351 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
352 sock_set_flag(sk, SOCK_LINGER);
357 sock_warn_obsolete_bsdism("setsockopt");
362 set_bit(SOCK_PASSCRED, &sock->flags);
364 clear_bit(SOCK_PASSCRED, &sock->flags);
367 #if defined(CONFIG_VNET) || defined(CONFIG_VNET_MODULE)
373 if (val < 0 || val > MAX_S_CONTEXT) {
383 sock_set_flag(sk, SOCK_RCVTSTAMP);
384 sock_enable_timestamp(sk);
386 sock_reset_flag(sk, SOCK_RCVTSTAMP);
392 sk->sk_rcvlowat = val ? : 1;
396 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
400 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
403 #ifdef CONFIG_NETDEVICES
404 case SO_BINDTODEVICE:
406 char devname[IFNAMSIZ];
409 if (!capable(CAP_NET_RAW)) {
414 /* Bind this socket to a particular device like "eth0",
415 * as specified in the passed interface name. If the
416 * name is "" or the option length is zero the socket
421 sk->sk_bound_dev_if = 0;
423 if (optlen > IFNAMSIZ - 1)
424 optlen = IFNAMSIZ - 1;
425 memset(devname, 0, sizeof(devname));
426 if (copy_from_user(devname, optval, optlen)) {
431 /* Remove any cached route for this socket. */
434 if (devname[0] == '\0') {
435 sk->sk_bound_dev_if = 0;
437 struct net_device *dev = dev_get_by_name(devname);
442 sk->sk_bound_dev_if = dev->ifindex;
451 case SO_ATTACH_FILTER:
453 if (optlen == sizeof(struct sock_fprog)) {
454 struct sock_fprog fprog;
457 if (copy_from_user(&fprog, optval, sizeof(fprog)))
460 ret = sk_attach_filter(&fprog, sk);
464 case SO_DETACH_FILTER:
465 spin_lock_bh(&sk->sk_lock.slock);
466 filter = sk->sk_filter;
468 sk->sk_filter = NULL;
469 spin_unlock_bh(&sk->sk_lock.slock);
470 sk_filter_release(sk, filter);
473 spin_unlock_bh(&sk->sk_lock.slock);
477 /* We implement the SO_SNDLOWAT etc to
478 not be settable (1003.1g 5.3) */
488 int sock_getsockopt(struct socket *sock, int level, int optname,
489 char __user *optval, int __user *optlen)
491 struct sock *sk = sock->sk;
500 unsigned int lv = sizeof(int);
503 if(get_user(len,optlen))
511 v.val = sock_flag(sk, SOCK_DBG);
515 v.val = sock_flag(sk, SOCK_LOCALROUTE);
519 v.val = !!sock_flag(sk, SOCK_BROADCAST);
523 v.val = sk->sk_sndbuf;
527 v.val = sk->sk_rcvbuf;
531 v.val = sk->sk_reuse;
535 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
543 v.val = -sock_error(sk);
545 v.val = xchg(&sk->sk_err_soft, 0);
549 v.val = !!sock_flag(sk, SOCK_URGINLINE);
553 v.val = sk->sk_no_check;
557 v.val = sk->sk_priority;
562 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
563 v.ling.l_linger = sk->sk_lingertime / HZ;
567 sock_warn_obsolete_bsdism("getsockopt");
571 v.val = sock_flag(sk, SOCK_RCVTSTAMP);
575 lv=sizeof(struct timeval);
576 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
580 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
581 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
586 lv=sizeof(struct timeval);
587 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
591 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
592 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
597 v.val = sk->sk_rcvlowat;
605 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
609 if (len > sizeof(sk->sk_peercred))
610 len = sizeof(sk->sk_peercred);
611 if (copy_to_user(optval, &sk->sk_peercred, len))
619 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
623 if (copy_to_user(optval, address, len))
628 /* Dubious BSD thing... Probably nobody even uses it, but
629 * the UNIX standard wants it for whatever reason... -DaveM
632 v.val = sk->sk_state == TCP_LISTEN;
636 return security_socket_getpeersec(sock, optval, optlen, len);
639 return(-ENOPROTOOPT);
643 if (copy_to_user(optval, &v, len))
646 if (put_user(len, optlen))
652 * sk_alloc - All socket objects are allocated here
653 * @family: protocol family
654 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
655 * @prot: struct proto associated with this new sock instance
656 * @zero_it: if we should zero the newly allocated sock
658 struct sock *sk_alloc(int family, gfp_t priority,
659 struct proto *prot, int zero_it)
661 struct sock *sk = NULL;
662 kmem_cache_t *slab = prot->slab;
665 sk = kmem_cache_alloc(slab, priority);
667 sk = kmalloc(prot->obj_size, priority);
671 memset(sk, 0, prot->obj_size);
672 sk->sk_family = family;
674 * See comment in struct sock definition to understand
675 * why we need sk_prot_creator -acme
677 sk->sk_prot = sk->sk_prot_creator = prot;
683 if (security_sk_alloc(sk, family, priority))
686 if (!try_module_get(prot->owner))
693 kmem_cache_free(slab, sk);
699 void sk_free(struct sock *sk)
701 struct sk_filter *filter;
702 struct module *owner = sk->sk_prot_creator->owner;
707 filter = sk->sk_filter;
709 sk_filter_release(sk, filter);
710 sk->sk_filter = NULL;
713 sock_disable_timestamp(sk);
715 if (atomic_read(&sk->sk_omem_alloc))
716 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
717 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
719 security_sk_free(sk);
721 clr_vx_info(&sk->sk_vx_info);
723 clr_nx_info(&sk->sk_nx_info);
725 if (sk->sk_prot_creator->slab != NULL)
726 kmem_cache_free(sk->sk_prot_creator->slab, sk);
732 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
734 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
737 struct sk_filter *filter;
739 memcpy(newsk, sk, sk->sk_prot->obj_size);
744 sk_node_init(&newsk->sk_node);
745 sock_lock_init(newsk);
748 atomic_set(&newsk->sk_rmem_alloc, 0);
749 atomic_set(&newsk->sk_wmem_alloc, 0);
750 atomic_set(&newsk->sk_omem_alloc, 0);
751 skb_queue_head_init(&newsk->sk_receive_queue);
752 skb_queue_head_init(&newsk->sk_write_queue);
754 rwlock_init(&newsk->sk_dst_lock);
755 rwlock_init(&newsk->sk_callback_lock);
757 newsk->sk_dst_cache = NULL;
758 newsk->sk_wmem_queued = 0;
759 newsk->sk_forward_alloc = 0;
760 newsk->sk_send_head = NULL;
761 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
762 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
764 sock_reset_flag(newsk, SOCK_DONE);
765 skb_queue_head_init(&newsk->sk_error_queue);
767 filter = newsk->sk_filter;
769 sk_filter_charge(newsk, filter);
771 if (unlikely(xfrm_sk_clone_policy(newsk))) {
772 /* It is still raw copy of parent, so invalidate
773 * destructor and make plain sk_free() */
774 newsk->sk_destruct = NULL;
781 newsk->sk_priority = 0;
782 atomic_set(&newsk->sk_refcnt, 2);
784 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
785 newsk->sk_xid = sk->sk_xid;
787 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
788 newsk->sk_nid = sk->sk_nid;
791 * Increment the counter in the same struct proto as the master
792 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
793 * is the same as sk->sk_prot->socks, as this field was copied
796 * This _changes_ the previous behaviour, where
797 * tcp_create_openreq_child always was incrementing the
798 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
799 * to be taken into account in all callers. -acme
801 sk_refcnt_debug_inc(newsk);
802 newsk->sk_socket = NULL;
803 newsk->sk_sleep = NULL;
805 if (newsk->sk_prot->sockets_allocated)
806 atomic_inc(newsk->sk_prot->sockets_allocated);
812 EXPORT_SYMBOL_GPL(sk_clone);
814 void __init sk_init(void)
816 if (num_physpages <= 4096) {
817 sysctl_wmem_max = 32767;
818 sysctl_rmem_max = 32767;
819 sysctl_wmem_default = 32767;
820 sysctl_rmem_default = 32767;
821 } else if (num_physpages >= 131072) {
822 sysctl_wmem_max = 131071;
823 sysctl_rmem_max = 131071;
828 * Simple resource managers for sockets.
833 * Write buffer destructor automatically called from kfree_skb.
835 void sock_wfree(struct sk_buff *skb)
837 struct sock *sk = skb->sk;
839 /* In case it might be waiting for more memory. */
840 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
841 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
842 sk->sk_write_space(sk);
847 * Read buffer destructor automatically called from kfree_skb.
849 void sock_rfree(struct sk_buff *skb)
851 struct sock *sk = skb->sk;
853 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
857 int sock_i_uid(struct sock *sk)
861 read_lock(&sk->sk_callback_lock);
862 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
863 read_unlock(&sk->sk_callback_lock);
867 unsigned long sock_i_ino(struct sock *sk)
871 read_lock(&sk->sk_callback_lock);
872 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
873 read_unlock(&sk->sk_callback_lock);
878 * Allocate a skb from the socket's send buffer.
880 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
883 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
884 struct sk_buff * skb = alloc_skb(size, priority);
886 skb_set_owner_w(skb, sk);
894 * Allocate a skb from the socket's receive buffer.
896 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
899 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
900 struct sk_buff *skb = alloc_skb(size, priority);
902 skb_set_owner_r(skb, sk);
910 * Allocate a memory block from the socket's option memory buffer.
912 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
914 if ((unsigned)size <= sysctl_optmem_max &&
915 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
917 /* First do the add, to avoid the race if kmalloc
920 atomic_add(size, &sk->sk_omem_alloc);
921 mem = kmalloc(size, priority);
924 atomic_sub(size, &sk->sk_omem_alloc);
930 * Free an option memory block.
932 void sock_kfree_s(struct sock *sk, void *mem, int size)
935 atomic_sub(size, &sk->sk_omem_alloc);
938 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
939 I think, these locks should be removed for datagram sockets.
941 static long sock_wait_for_wmem(struct sock * sk, long timeo)
945 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
949 if (signal_pending(current))
951 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
952 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
953 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
955 if (sk->sk_shutdown & SEND_SHUTDOWN)
959 timeo = schedule_timeout(timeo);
961 finish_wait(sk->sk_sleep, &wait);
967 * Generic send/receive buffer handlers
970 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
971 unsigned long header_len,
972 unsigned long data_len,
973 int noblock, int *errcode)
980 gfp_mask = sk->sk_allocation;
981 if (gfp_mask & __GFP_WAIT)
982 gfp_mask |= __GFP_REPEAT;
984 timeo = sock_sndtimeo(sk, noblock);
986 err = sock_error(sk);
991 if (sk->sk_shutdown & SEND_SHUTDOWN)
994 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
995 skb = alloc_skb(header_len, gfp_mask);
1000 /* No pages, we're done... */
1004 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1005 skb->truesize += data_len;
1006 skb_shinfo(skb)->nr_frags = npages;
1007 for (i = 0; i < npages; i++) {
1011 page = alloc_pages(sk->sk_allocation, 0);
1014 skb_shinfo(skb)->nr_frags = i;
1019 frag = &skb_shinfo(skb)->frags[i];
1021 frag->page_offset = 0;
1022 frag->size = (data_len >= PAGE_SIZE ?
1025 data_len -= PAGE_SIZE;
1028 /* Full success... */
1034 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1035 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1039 if (signal_pending(current))
1041 timeo = sock_wait_for_wmem(sk, timeo);
1044 skb_set_owner_w(skb, sk);
1048 err = sock_intr_errno(timeo);
1054 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1055 int noblock, int *errcode)
1057 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1060 static void __lock_sock(struct sock *sk)
1065 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1066 TASK_UNINTERRUPTIBLE);
1067 spin_unlock_bh(&sk->sk_lock.slock);
1069 spin_lock_bh(&sk->sk_lock.slock);
1070 if(!sock_owned_by_user(sk))
1073 finish_wait(&sk->sk_lock.wq, &wait);
1076 static void __release_sock(struct sock *sk)
1078 struct sk_buff *skb = sk->sk_backlog.head;
1081 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1085 struct sk_buff *next = skb->next;
1088 sk->sk_backlog_rcv(sk, skb);
1091 * We are in process context here with softirqs
1092 * disabled, use cond_resched_softirq() to preempt.
1093 * This is safe to do because we've taken the backlog
1096 cond_resched_softirq();
1099 } while (skb != NULL);
1102 } while((skb = sk->sk_backlog.head) != NULL);
1106 * sk_wait_data - wait for data to arrive at sk_receive_queue
1107 * @sk: sock to wait on
1108 * @timeo: for how long
1110 * Now socket state including sk->sk_err is changed only under lock,
1111 * hence we may omit checks after joining wait queue.
1112 * We check receive queue before schedule() only as optimization;
1113 * it is very likely that release_sock() added new data.
1115 int sk_wait_data(struct sock *sk, long *timeo)
1120 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1121 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1122 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1123 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1124 finish_wait(sk->sk_sleep, &wait);
1128 EXPORT_SYMBOL(sk_wait_data);
1131 * Set of default routines for initialising struct proto_ops when
1132 * the protocol does not support a particular function. In certain
1133 * cases where it makes no sense for a protocol to have a "do nothing"
1134 * function, some default processing is provided.
1137 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1142 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1148 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1153 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1158 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1164 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1169 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1174 int sock_no_listen(struct socket *sock, int backlog)
1179 int sock_no_shutdown(struct socket *sock, int how)
1184 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1185 char __user *optval, int optlen)
1190 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1191 char __user *optval, int __user *optlen)
1196 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1202 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1203 size_t len, int flags)
1208 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1210 /* Mirror missing mmap method error code */
1214 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1217 struct msghdr msg = {.msg_flags = flags};
1219 char *kaddr = kmap(page);
1220 iov.iov_base = kaddr + offset;
1222 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1228 * Default Socket Callbacks
1231 static void sock_def_wakeup(struct sock *sk)
1233 read_lock(&sk->sk_callback_lock);
1234 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1235 wake_up_interruptible_all(sk->sk_sleep);
1236 read_unlock(&sk->sk_callback_lock);
1239 static void sock_def_error_report(struct sock *sk)
1241 read_lock(&sk->sk_callback_lock);
1242 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1243 wake_up_interruptible(sk->sk_sleep);
1244 sk_wake_async(sk,0,POLL_ERR);
1245 read_unlock(&sk->sk_callback_lock);
1248 static void sock_def_readable(struct sock *sk, int len)
1250 read_lock(&sk->sk_callback_lock);
1251 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1252 wake_up_interruptible(sk->sk_sleep);
1253 sk_wake_async(sk,1,POLL_IN);
1254 read_unlock(&sk->sk_callback_lock);
1257 static void sock_def_write_space(struct sock *sk)
1259 read_lock(&sk->sk_callback_lock);
1261 /* Do not wake up a writer until he can make "significant"
1264 if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1265 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1266 wake_up_interruptible(sk->sk_sleep);
1268 /* Should agree with poll, otherwise some programs break */
1269 if (sock_writeable(sk))
1270 sk_wake_async(sk, 2, POLL_OUT);
1273 read_unlock(&sk->sk_callback_lock);
1276 static void sock_def_destruct(struct sock *sk)
1278 kfree(sk->sk_protinfo);
1281 void sk_send_sigurg(struct sock *sk)
1283 if (sk->sk_socket && sk->sk_socket->file)
1284 if (send_sigurg(&sk->sk_socket->file->f_owner))
1285 sk_wake_async(sk, 3, POLL_PRI);
1288 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1289 unsigned long expires)
1291 if (!mod_timer(timer, expires))
1295 EXPORT_SYMBOL(sk_reset_timer);
1297 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1299 if (timer_pending(timer) && del_timer(timer))
1303 EXPORT_SYMBOL(sk_stop_timer);
1305 void sock_init_data(struct socket *sock, struct sock *sk)
1307 skb_queue_head_init(&sk->sk_receive_queue);
1308 skb_queue_head_init(&sk->sk_write_queue);
1309 skb_queue_head_init(&sk->sk_error_queue);
1311 sk->sk_send_head = NULL;
1313 init_timer(&sk->sk_timer);
1315 sk->sk_allocation = GFP_KERNEL;
1316 sk->sk_rcvbuf = sysctl_rmem_default;
1317 sk->sk_sndbuf = sysctl_wmem_default;
1318 sk->sk_state = TCP_CLOSE;
1319 sk->sk_socket = sock;
1321 sock_set_flag(sk, SOCK_ZAPPED);
1325 sk->sk_type = sock->type;
1326 sk->sk_sleep = &sock->wait;
1329 sk->sk_sleep = NULL;
1331 rwlock_init(&sk->sk_dst_lock);
1332 rwlock_init(&sk->sk_callback_lock);
1334 sk->sk_state_change = sock_def_wakeup;
1335 sk->sk_data_ready = sock_def_readable;
1336 sk->sk_write_space = sock_def_write_space;
1337 sk->sk_error_report = sock_def_error_report;
1338 sk->sk_destruct = sock_def_destruct;
1340 sk->sk_sndmsg_page = NULL;
1341 sk->sk_sndmsg_off = 0;
1343 sk->sk_peercred.pid = 0;
1344 sk->sk_peercred.uid = -1;
1345 sk->sk_peercred.gid = -1;
1346 sk->sk_write_pending = 0;
1347 sk->sk_rcvlowat = 1;
1348 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1349 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1351 sk->sk_stamp.tv_sec = -1L;
1352 sk->sk_stamp.tv_usec = -1L;
1354 set_vx_info(&sk->sk_vx_info, current->vx_info);
1355 sk->sk_xid = vx_current_xid();
1357 set_nx_info(&sk->sk_nx_info, current->nx_info);
1358 sk->sk_nid = nx_current_nid();
1359 atomic_set(&sk->sk_refcnt, 1);
1362 void fastcall lock_sock(struct sock *sk)
1365 spin_lock_bh(&(sk->sk_lock.slock));
1366 if (sk->sk_lock.owner)
1368 sk->sk_lock.owner = (void *)1;
1369 spin_unlock_bh(&(sk->sk_lock.slock));
1372 EXPORT_SYMBOL(lock_sock);
1374 void fastcall release_sock(struct sock *sk)
1376 spin_lock_bh(&(sk->sk_lock.slock));
1377 if (sk->sk_backlog.tail)
1379 sk->sk_lock.owner = NULL;
1380 if (waitqueue_active(&(sk->sk_lock.wq)))
1381 wake_up(&(sk->sk_lock.wq));
1382 spin_unlock_bh(&(sk->sk_lock.slock));
1384 EXPORT_SYMBOL(release_sock);
1386 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1388 if (!sock_flag(sk, SOCK_TIMESTAMP))
1389 sock_enable_timestamp(sk);
1390 if (sk->sk_stamp.tv_sec == -1)
1392 if (sk->sk_stamp.tv_sec == 0)
1393 do_gettimeofday(&sk->sk_stamp);
1394 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1397 EXPORT_SYMBOL(sock_get_timestamp);
1399 void sock_enable_timestamp(struct sock *sk)
1401 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1402 sock_set_flag(sk, SOCK_TIMESTAMP);
1403 net_enable_timestamp();
1406 EXPORT_SYMBOL(sock_enable_timestamp);
1409 * Get a socket option on an socket.
1411 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1412 * asynchronous errors should be reported by getsockopt. We assume
1413 * this means if you specify SO_ERROR (otherwise whats the point of it).
1415 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1416 char __user *optval, int __user *optlen)
1418 struct sock *sk = sock->sk;
1420 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1423 EXPORT_SYMBOL(sock_common_getsockopt);
1425 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1426 struct msghdr *msg, size_t size, int flags)
1428 struct sock *sk = sock->sk;
1432 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1433 flags & ~MSG_DONTWAIT, &addr_len);
1435 msg->msg_namelen = addr_len;
1439 EXPORT_SYMBOL(sock_common_recvmsg);
1442 * Set socket options on an inet socket.
1444 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1445 char __user *optval, int optlen)
1447 struct sock *sk = sock->sk;
1449 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1452 EXPORT_SYMBOL(sock_common_setsockopt);
1454 void sk_common_release(struct sock *sk)
1456 if (sk->sk_prot->destroy)
1457 sk->sk_prot->destroy(sk);
1460 * Observation: when sock_common_release is called, processes have
1461 * no access to socket. But net still has.
1462 * Step one, detach it from networking:
1464 * A. Remove from hash tables.
1467 sk->sk_prot->unhash(sk);
1470 * In this point socket cannot receive new packets, but it is possible
1471 * that some packets are in flight because some CPU runs receiver and
1472 * did hash table lookup before we unhashed socket. They will achieve
1473 * receive queue and will be purged by socket destructor.
1475 * Also we still have packets pending on receive queue and probably,
1476 * our own packets waiting in device queues. sock_destroy will drain
1477 * receive queue, but transmitted packets will delay socket destruction
1478 * until the last reference will be released.
1483 xfrm_sk_free_policy(sk);
1485 sk_refcnt_debug_release(sk);
1489 EXPORT_SYMBOL(sk_common_release);
1491 static DEFINE_RWLOCK(proto_list_lock);
1492 static LIST_HEAD(proto_list);
1494 int proto_register(struct proto *prot, int alloc_slab)
1496 char *request_sock_slab_name = NULL;
1497 char *timewait_sock_slab_name;
1501 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1502 SLAB_HWCACHE_ALIGN, NULL, NULL);
1504 if (prot->slab == NULL) {
1505 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1510 if (prot->rsk_prot != NULL) {
1511 static const char mask[] = "request_sock_%s";
1513 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1514 if (request_sock_slab_name == NULL)
1515 goto out_free_sock_slab;
1517 sprintf(request_sock_slab_name, mask, prot->name);
1518 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1519 prot->rsk_prot->obj_size, 0,
1520 SLAB_HWCACHE_ALIGN, NULL, NULL);
1522 if (prot->rsk_prot->slab == NULL) {
1523 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1525 goto out_free_request_sock_slab_name;
1529 if (prot->twsk_prot != NULL) {
1530 static const char mask[] = "tw_sock_%s";
1532 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1534 if (timewait_sock_slab_name == NULL)
1535 goto out_free_request_sock_slab;
1537 sprintf(timewait_sock_slab_name, mask, prot->name);
1538 prot->twsk_prot->twsk_slab =
1539 kmem_cache_create(timewait_sock_slab_name,
1540 prot->twsk_prot->twsk_obj_size,
1541 0, SLAB_HWCACHE_ALIGN,
1543 if (prot->twsk_prot->twsk_slab == NULL)
1544 goto out_free_timewait_sock_slab_name;
1548 write_lock(&proto_list_lock);
1549 list_add(&prot->node, &proto_list);
1550 write_unlock(&proto_list_lock);
1554 out_free_timewait_sock_slab_name:
1555 kfree(timewait_sock_slab_name);
1556 out_free_request_sock_slab:
1557 if (prot->rsk_prot && prot->rsk_prot->slab) {
1558 kmem_cache_destroy(prot->rsk_prot->slab);
1559 prot->rsk_prot->slab = NULL;
1561 out_free_request_sock_slab_name:
1562 kfree(request_sock_slab_name);
1564 kmem_cache_destroy(prot->slab);
1569 EXPORT_SYMBOL(proto_register);
1571 void proto_unregister(struct proto *prot)
1573 write_lock(&proto_list_lock);
1574 list_del(&prot->node);
1575 write_unlock(&proto_list_lock);
1577 if (prot->slab != NULL) {
1578 kmem_cache_destroy(prot->slab);
1582 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
1583 const char *name = kmem_cache_name(prot->rsk_prot->slab);
1585 kmem_cache_destroy(prot->rsk_prot->slab);
1587 prot->rsk_prot->slab = NULL;
1590 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
1591 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
1593 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
1595 prot->twsk_prot->twsk_slab = NULL;
1599 EXPORT_SYMBOL(proto_unregister);
1601 #ifdef CONFIG_PROC_FS
1602 static inline struct proto *__proto_head(void)
1604 return list_entry(proto_list.next, struct proto, node);
1607 static inline struct proto *proto_head(void)
1609 return list_empty(&proto_list) ? NULL : __proto_head();
1612 static inline struct proto *proto_next(struct proto *proto)
1614 return proto->node.next == &proto_list ? NULL :
1615 list_entry(proto->node.next, struct proto, node);
1618 static inline struct proto *proto_get_idx(loff_t pos)
1620 struct proto *proto;
1623 list_for_each_entry(proto, &proto_list, node)
1632 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1634 read_lock(&proto_list_lock);
1635 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
1638 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1641 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1644 static void proto_seq_stop(struct seq_file *seq, void *v)
1646 read_unlock(&proto_list_lock);
1649 static char proto_method_implemented(const void *method)
1651 return method == NULL ? 'n' : 'y';
1654 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1656 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
1657 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
1660 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
1661 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
1662 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
1664 proto->slab == NULL ? "no" : "yes",
1665 module_name(proto->owner),
1666 proto_method_implemented(proto->close),
1667 proto_method_implemented(proto->connect),
1668 proto_method_implemented(proto->disconnect),
1669 proto_method_implemented(proto->accept),
1670 proto_method_implemented(proto->ioctl),
1671 proto_method_implemented(proto->init),
1672 proto_method_implemented(proto->destroy),
1673 proto_method_implemented(proto->shutdown),
1674 proto_method_implemented(proto->setsockopt),
1675 proto_method_implemented(proto->getsockopt),
1676 proto_method_implemented(proto->sendmsg),
1677 proto_method_implemented(proto->recvmsg),
1678 proto_method_implemented(proto->sendpage),
1679 proto_method_implemented(proto->bind),
1680 proto_method_implemented(proto->backlog_rcv),
1681 proto_method_implemented(proto->hash),
1682 proto_method_implemented(proto->unhash),
1683 proto_method_implemented(proto->get_port),
1684 proto_method_implemented(proto->enter_memory_pressure));
1687 static int proto_seq_show(struct seq_file *seq, void *v)
1689 if (v == SEQ_START_TOKEN)
1690 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1699 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1701 proto_seq_printf(seq, v);
1705 static struct seq_operations proto_seq_ops = {
1706 .start = proto_seq_start,
1707 .next = proto_seq_next,
1708 .stop = proto_seq_stop,
1709 .show = proto_seq_show,
1712 static int proto_seq_open(struct inode *inode, struct file *file)
1714 return seq_open(file, &proto_seq_ops);
1717 static struct file_operations proto_seq_fops = {
1718 .owner = THIS_MODULE,
1719 .open = proto_seq_open,
1721 .llseek = seq_lseek,
1722 .release = seq_release,
1725 static int __init proto_init(void)
1727 /* register /proc/net/protocols */
1728 return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
1731 subsys_initcall(proto_init);
1733 #endif /* PROC_FS */
1735 EXPORT_SYMBOL(sk_alloc);
1736 EXPORT_SYMBOL(sk_free);
1737 EXPORT_SYMBOL(sk_send_sigurg);
1738 EXPORT_SYMBOL(sock_alloc_send_skb);
1739 EXPORT_SYMBOL(sock_init_data);
1740 EXPORT_SYMBOL(sock_kfree_s);
1741 EXPORT_SYMBOL(sock_kmalloc);
1742 EXPORT_SYMBOL(sock_no_accept);
1743 EXPORT_SYMBOL(sock_no_bind);
1744 EXPORT_SYMBOL(sock_no_connect);
1745 EXPORT_SYMBOL(sock_no_getname);
1746 EXPORT_SYMBOL(sock_no_getsockopt);
1747 EXPORT_SYMBOL(sock_no_ioctl);
1748 EXPORT_SYMBOL(sock_no_listen);
1749 EXPORT_SYMBOL(sock_no_mmap);
1750 EXPORT_SYMBOL(sock_no_poll);
1751 EXPORT_SYMBOL(sock_no_recvmsg);
1752 EXPORT_SYMBOL(sock_no_sendmsg);
1753 EXPORT_SYMBOL(sock_no_sendpage);
1754 EXPORT_SYMBOL(sock_no_setsockopt);
1755 EXPORT_SYMBOL(sock_no_shutdown);
1756 EXPORT_SYMBOL(sock_no_socketpair);
1757 EXPORT_SYMBOL(sock_rfree);
1758 EXPORT_SYMBOL(sock_setsockopt);
1759 EXPORT_SYMBOL(sock_wfree);
1760 EXPORT_SYMBOL(sock_wmalloc);
1761 EXPORT_SYMBOL(sock_i_uid);
1762 EXPORT_SYMBOL(sock_i_ino);
1763 EXPORT_SYMBOL(sysctl_optmem_max);
1764 #ifdef CONFIG_SYSCTL
1765 EXPORT_SYMBOL(sysctl_rmem_max);
1766 EXPORT_SYMBOL(sysctl_wmem_max);