2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
94 #include <linux/capability.h>
95 #include <linux/config.h>
96 #include <linux/errno.h>
97 #include <linux/types.h>
98 #include <linux/socket.h>
100 #include <linux/kernel.h>
101 #include <linux/module.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <linux/sched.h>
105 #include <linux/timer.h>
106 #include <linux/string.h>
107 #include <linux/sockios.h>
108 #include <linux/net.h>
109 #include <linux/mm.h>
110 #include <linux/slab.h>
111 #include <linux/interrupt.h>
112 #include <linux/poll.h>
113 #include <linux/tcp.h>
114 #include <linux/init.h>
116 #include <asm/uaccess.h>
117 #include <asm/system.h>
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/request_sock.h>
123 #include <net/sock.h>
124 #include <net/xfrm.h>
125 #include <linux/ipsec.h>
127 #include <linux/filter.h>
128 #include <linux/vs_socket.h>
129 #include <linux/vs_limit.h>
130 #include <linux/vs_context.h>
136 /* Take into consideration the size of the struct sk_buff overhead in the
137 * determination of these values, since that is non-constant across
138 * platforms. This makes socket queueing behavior and performance
139 * not depend upon such differences.
141 #define _SK_MEM_PACKETS 256
142 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
143 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
144 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
146 /* Run time adjustable parameters. */
147 __u32 sysctl_wmem_max = SK_WMEM_MAX;
148 __u32 sysctl_rmem_max = SK_RMEM_MAX;
149 __u32 sysctl_wmem_default = SK_WMEM_MAX;
150 __u32 sysctl_rmem_default = SK_RMEM_MAX;
152 /* Maximal space eaten by iovec or ancilliary data plus some space */
153 int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
155 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
159 if (optlen < sizeof(tv))
161 if (copy_from_user(&tv, optval, sizeof(tv)))
164 *timeo_p = MAX_SCHEDULE_TIMEOUT;
165 if (tv.tv_sec == 0 && tv.tv_usec == 0)
167 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
168 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
172 static void sock_warn_obsolete_bsdism(const char *name)
175 static char warncomm[TASK_COMM_LEN];
176 if (strcmp(warncomm, current->comm) && warned < 5) {
177 strcpy(warncomm, current->comm);
178 printk(KERN_WARNING "process `%s' is using obsolete "
179 "%s SO_BSDCOMPAT\n", warncomm, name);
184 static void sock_disable_timestamp(struct sock *sk)
186 if (sock_flag(sk, SOCK_TIMESTAMP)) {
187 sock_reset_flag(sk, SOCK_TIMESTAMP);
188 net_disable_timestamp();
194 * This is meant for all protocols to use and covers goings on
195 * at the socket level. Everything here is generic.
198 int sock_setsockopt(struct socket *sock, int level, int optname,
199 char __user *optval, int optlen)
201 struct sock *sk=sock->sk;
202 struct sk_filter *filter;
209 * Options without arguments
212 #ifdef SO_DONTLINGER /* Compatibility item... */
213 if (optname == SO_DONTLINGER) {
215 sock_reset_flag(sk, SOCK_LINGER);
221 if(optlen<sizeof(int))
224 if (get_user(val, (int __user *)optval))
234 if(val && !capable(CAP_NET_ADMIN))
239 sock_set_flag(sk, SOCK_DBG);
241 sock_reset_flag(sk, SOCK_DBG);
244 sk->sk_reuse = valbool;
252 sock_set_flag(sk, SOCK_LOCALROUTE);
254 sock_reset_flag(sk, SOCK_LOCALROUTE);
257 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
260 /* Don't error on this BSD doesn't and if you think
261 about it this is right. Otherwise apps have to
262 play 'guess the biggest size' games. RCVBUF/SNDBUF
263 are treated in BSD as hints */
265 if (val > sysctl_wmem_max)
266 val = sysctl_wmem_max;
268 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
269 if ((val * 2) < SOCK_MIN_SNDBUF)
270 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
272 sk->sk_sndbuf = val * 2;
275 * Wake up sending tasks if we
278 sk->sk_write_space(sk);
282 if (!capable(CAP_NET_ADMIN)) {
289 /* Don't error on this BSD doesn't and if you think
290 about it this is right. Otherwise apps have to
291 play 'guess the biggest size' games. RCVBUF/SNDBUF
292 are treated in BSD as hints */
294 if (val > sysctl_rmem_max)
295 val = sysctl_rmem_max;
297 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
298 /* FIXME: is this lower bound the right one? */
299 if ((val * 2) < SOCK_MIN_RCVBUF)
300 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
302 sk->sk_rcvbuf = val * 2;
306 if (!capable(CAP_NET_ADMIN)) {
314 if (sk->sk_protocol == IPPROTO_TCP)
315 tcp_set_keepalive(sk, valbool);
317 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
321 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
325 sk->sk_no_check = valbool;
329 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
330 sk->sk_priority = val;
336 if(optlen<sizeof(ling)) {
337 ret = -EINVAL; /* 1003.1g */
340 if (copy_from_user(&ling,optval,sizeof(ling))) {
345 sock_reset_flag(sk, SOCK_LINGER);
347 #if (BITS_PER_LONG == 32)
348 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
349 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
352 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
353 sock_set_flag(sk, SOCK_LINGER);
358 sock_warn_obsolete_bsdism("setsockopt");
363 set_bit(SOCK_PASSCRED, &sock->flags);
365 clear_bit(SOCK_PASSCRED, &sock->flags);
370 sock_set_flag(sk, SOCK_RCVTSTAMP);
371 sock_enable_timestamp(sk);
373 sock_reset_flag(sk, SOCK_RCVTSTAMP);
379 sk->sk_rcvlowat = val ? : 1;
383 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
387 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
390 #ifdef CONFIG_NETDEVICES
391 case SO_BINDTODEVICE:
393 char devname[IFNAMSIZ];
396 if (!capable(CAP_NET_RAW)) {
401 /* Bind this socket to a particular device like "eth0",
402 * as specified in the passed interface name. If the
403 * name is "" or the option length is zero the socket
408 sk->sk_bound_dev_if = 0;
410 if (optlen > IFNAMSIZ - 1)
411 optlen = IFNAMSIZ - 1;
412 memset(devname, 0, sizeof(devname));
413 if (copy_from_user(devname, optval, optlen)) {
418 /* Remove any cached route for this socket. */
421 if (devname[0] == '\0') {
422 sk->sk_bound_dev_if = 0;
424 struct net_device *dev = dev_get_by_name(devname);
429 sk->sk_bound_dev_if = dev->ifindex;
438 case SO_ATTACH_FILTER:
440 if (optlen == sizeof(struct sock_fprog)) {
441 struct sock_fprog fprog;
444 if (copy_from_user(&fprog, optval, sizeof(fprog)))
447 ret = sk_attach_filter(&fprog, sk);
451 case SO_DETACH_FILTER:
452 spin_lock_bh(&sk->sk_lock.slock);
453 filter = sk->sk_filter;
455 sk->sk_filter = NULL;
456 spin_unlock_bh(&sk->sk_lock.slock);
457 sk_filter_release(sk, filter);
460 spin_unlock_bh(&sk->sk_lock.slock);
464 /* We implement the SO_SNDLOWAT etc to
465 not be settable (1003.1g 5.3) */
475 int sock_getsockopt(struct socket *sock, int level, int optname,
476 char __user *optval, int __user *optlen)
478 struct sock *sk = sock->sk;
487 unsigned int lv = sizeof(int);
490 if(get_user(len,optlen))
498 v.val = sock_flag(sk, SOCK_DBG);
502 v.val = sock_flag(sk, SOCK_LOCALROUTE);
506 v.val = !!sock_flag(sk, SOCK_BROADCAST);
510 v.val = sk->sk_sndbuf;
514 v.val = sk->sk_rcvbuf;
518 v.val = sk->sk_reuse;
522 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
530 v.val = -sock_error(sk);
532 v.val = xchg(&sk->sk_err_soft, 0);
536 v.val = !!sock_flag(sk, SOCK_URGINLINE);
540 v.val = sk->sk_no_check;
544 v.val = sk->sk_priority;
549 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
550 v.ling.l_linger = sk->sk_lingertime / HZ;
554 sock_warn_obsolete_bsdism("getsockopt");
558 v.val = sock_flag(sk, SOCK_RCVTSTAMP);
562 lv=sizeof(struct timeval);
563 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
567 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
568 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
573 lv=sizeof(struct timeval);
574 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
578 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
579 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
584 v.val = sk->sk_rcvlowat;
592 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
596 if (len > sizeof(sk->sk_peercred))
597 len = sizeof(sk->sk_peercred);
598 if (copy_to_user(optval, &sk->sk_peercred, len))
606 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
610 if (copy_to_user(optval, address, len))
615 /* Dubious BSD thing... Probably nobody even uses it, but
616 * the UNIX standard wants it for whatever reason... -DaveM
619 v.val = sk->sk_state == TCP_LISTEN;
623 return security_socket_getpeersec(sock, optval, optlen, len);
626 return(-ENOPROTOOPT);
630 if (copy_to_user(optval, &v, len))
633 if (put_user(len, optlen))
639 * sk_alloc - All socket objects are allocated here
640 * @family: protocol family
641 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
642 * @prot: struct proto associated with this new sock instance
643 * @zero_it: if we should zero the newly allocated sock
645 struct sock *sk_alloc(int family, gfp_t priority,
646 struct proto *prot, int zero_it)
648 struct sock *sk = NULL;
649 kmem_cache_t *slab = prot->slab;
652 sk = kmem_cache_alloc(slab, priority);
654 sk = kmalloc(prot->obj_size, priority);
658 memset(sk, 0, prot->obj_size);
659 sk->sk_family = family;
661 * See comment in struct sock definition to understand
662 * why we need sk_prot_creator -acme
664 sk->sk_prot = sk->sk_prot_creator = prot;
670 if (security_sk_alloc(sk, family, priority))
673 if (!try_module_get(prot->owner))
680 kmem_cache_free(slab, sk);
686 void sk_free(struct sock *sk)
688 struct sk_filter *filter;
689 struct module *owner = sk->sk_prot_creator->owner;
694 filter = sk->sk_filter;
696 sk_filter_release(sk, filter);
697 sk->sk_filter = NULL;
700 sock_disable_timestamp(sk);
702 if (atomic_read(&sk->sk_omem_alloc))
703 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
704 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
706 security_sk_free(sk);
708 clr_vx_info(&sk->sk_vx_info);
710 clr_nx_info(&sk->sk_nx_info);
712 if (sk->sk_prot_creator->slab != NULL)
713 kmem_cache_free(sk->sk_prot_creator->slab, sk);
719 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
721 struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
724 struct sk_filter *filter;
726 memcpy(newsk, sk, sk->sk_prot->obj_size);
731 sk_node_init(&newsk->sk_node);
732 sock_lock_init(newsk);
735 atomic_set(&newsk->sk_rmem_alloc, 0);
736 atomic_set(&newsk->sk_wmem_alloc, 0);
737 atomic_set(&newsk->sk_omem_alloc, 0);
738 skb_queue_head_init(&newsk->sk_receive_queue);
739 skb_queue_head_init(&newsk->sk_write_queue);
741 rwlock_init(&newsk->sk_dst_lock);
742 rwlock_init(&newsk->sk_callback_lock);
744 newsk->sk_dst_cache = NULL;
745 newsk->sk_wmem_queued = 0;
746 newsk->sk_forward_alloc = 0;
747 newsk->sk_send_head = NULL;
748 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
749 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
751 sock_reset_flag(newsk, SOCK_DONE);
752 skb_queue_head_init(&newsk->sk_error_queue);
754 filter = newsk->sk_filter;
756 sk_filter_charge(newsk, filter);
758 if (unlikely(xfrm_sk_clone_policy(newsk))) {
759 /* It is still raw copy of parent, so invalidate
760 * destructor and make plain sk_free() */
761 newsk->sk_destruct = NULL;
768 newsk->sk_priority = 0;
769 atomic_set(&newsk->sk_refcnt, 2);
771 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
772 newsk->sk_xid = sk->sk_xid;
774 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
775 newsk->sk_nid = sk->sk_nid;
778 * Increment the counter in the same struct proto as the master
779 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
780 * is the same as sk->sk_prot->socks, as this field was copied
783 * This _changes_ the previous behaviour, where
784 * tcp_create_openreq_child always was incrementing the
785 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
786 * to be taken into account in all callers. -acme
788 sk_refcnt_debug_inc(newsk);
789 newsk->sk_socket = NULL;
790 newsk->sk_sleep = NULL;
792 if (newsk->sk_prot->sockets_allocated)
793 atomic_inc(newsk->sk_prot->sockets_allocated);
799 EXPORT_SYMBOL_GPL(sk_clone);
801 void __init sk_init(void)
803 if (num_physpages <= 4096) {
804 sysctl_wmem_max = 32767;
805 sysctl_rmem_max = 32767;
806 sysctl_wmem_default = 32767;
807 sysctl_rmem_default = 32767;
808 } else if (num_physpages >= 131072) {
809 sysctl_wmem_max = 131071;
810 sysctl_rmem_max = 131071;
815 * Simple resource managers for sockets.
820 * Write buffer destructor automatically called from kfree_skb.
822 void sock_wfree(struct sk_buff *skb)
824 struct sock *sk = skb->sk;
826 /* In case it might be waiting for more memory. */
827 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
828 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
829 sk->sk_write_space(sk);
834 * Read buffer destructor automatically called from kfree_skb.
836 void sock_rfree(struct sk_buff *skb)
838 struct sock *sk = skb->sk;
840 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
844 int sock_i_uid(struct sock *sk)
848 read_lock(&sk->sk_callback_lock);
849 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
850 read_unlock(&sk->sk_callback_lock);
854 unsigned long sock_i_ino(struct sock *sk)
858 read_lock(&sk->sk_callback_lock);
859 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
860 read_unlock(&sk->sk_callback_lock);
865 * Allocate a skb from the socket's send buffer.
867 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
870 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
871 struct sk_buff * skb = alloc_skb(size, priority);
873 skb_set_owner_w(skb, sk);
881 * Allocate a skb from the socket's receive buffer.
883 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
886 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
887 struct sk_buff *skb = alloc_skb(size, priority);
889 skb_set_owner_r(skb, sk);
897 * Allocate a memory block from the socket's option memory buffer.
899 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
901 if ((unsigned)size <= sysctl_optmem_max &&
902 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
904 /* First do the add, to avoid the race if kmalloc
907 atomic_add(size, &sk->sk_omem_alloc);
908 mem = kmalloc(size, priority);
911 atomic_sub(size, &sk->sk_omem_alloc);
917 * Free an option memory block.
919 void sock_kfree_s(struct sock *sk, void *mem, int size)
922 atomic_sub(size, &sk->sk_omem_alloc);
925 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
926 I think, these locks should be removed for datagram sockets.
928 static long sock_wait_for_wmem(struct sock * sk, long timeo)
932 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
936 if (signal_pending(current))
938 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
939 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
940 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
942 if (sk->sk_shutdown & SEND_SHUTDOWN)
946 timeo = schedule_timeout(timeo);
948 finish_wait(sk->sk_sleep, &wait);
954 * Generic send/receive buffer handlers
957 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
958 unsigned long header_len,
959 unsigned long data_len,
960 int noblock, int *errcode)
967 gfp_mask = sk->sk_allocation;
968 if (gfp_mask & __GFP_WAIT)
969 gfp_mask |= __GFP_REPEAT;
971 timeo = sock_sndtimeo(sk, noblock);
973 err = sock_error(sk);
978 if (sk->sk_shutdown & SEND_SHUTDOWN)
981 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
982 skb = alloc_skb(header_len, gfp_mask);
987 /* No pages, we're done... */
991 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
992 skb->truesize += data_len;
993 skb_shinfo(skb)->nr_frags = npages;
994 for (i = 0; i < npages; i++) {
998 page = alloc_pages(sk->sk_allocation, 0);
1001 skb_shinfo(skb)->nr_frags = i;
1006 frag = &skb_shinfo(skb)->frags[i];
1008 frag->page_offset = 0;
1009 frag->size = (data_len >= PAGE_SIZE ?
1012 data_len -= PAGE_SIZE;
1015 /* Full success... */
1021 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1022 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1026 if (signal_pending(current))
1028 timeo = sock_wait_for_wmem(sk, timeo);
1031 skb_set_owner_w(skb, sk);
1035 err = sock_intr_errno(timeo);
1041 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1042 int noblock, int *errcode)
1044 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1047 static void __lock_sock(struct sock *sk)
1052 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1053 TASK_UNINTERRUPTIBLE);
1054 spin_unlock_bh(&sk->sk_lock.slock);
1056 spin_lock_bh(&sk->sk_lock.slock);
1057 if(!sock_owned_by_user(sk))
1060 finish_wait(&sk->sk_lock.wq, &wait);
1063 static void __release_sock(struct sock *sk)
1065 struct sk_buff *skb = sk->sk_backlog.head;
1068 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1072 struct sk_buff *next = skb->next;
1075 sk->sk_backlog_rcv(sk, skb);
1078 * We are in process context here with softirqs
1079 * disabled, use cond_resched_softirq() to preempt.
1080 * This is safe to do because we've taken the backlog
1083 cond_resched_softirq();
1086 } while (skb != NULL);
1089 } while((skb = sk->sk_backlog.head) != NULL);
1093 * sk_wait_data - wait for data to arrive at sk_receive_queue
1094 * @sk: sock to wait on
1095 * @timeo: for how long
1097 * Now socket state including sk->sk_err is changed only under lock,
1098 * hence we may omit checks after joining wait queue.
1099 * We check receive queue before schedule() only as optimization;
1100 * it is very likely that release_sock() added new data.
1102 int sk_wait_data(struct sock *sk, long *timeo)
1107 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1108 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1109 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1110 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1111 finish_wait(sk->sk_sleep, &wait);
1115 EXPORT_SYMBOL(sk_wait_data);
1118 * Set of default routines for initialising struct proto_ops when
1119 * the protocol does not support a particular function. In certain
1120 * cases where it makes no sense for a protocol to have a "do nothing"
1121 * function, some default processing is provided.
1124 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1129 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1135 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1140 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1145 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1151 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1156 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1161 int sock_no_listen(struct socket *sock, int backlog)
1166 int sock_no_shutdown(struct socket *sock, int how)
1171 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1172 char __user *optval, int optlen)
1177 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1178 char __user *optval, int __user *optlen)
1183 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1189 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1190 size_t len, int flags)
1195 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1197 /* Mirror missing mmap method error code */
1201 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1204 struct msghdr msg = {.msg_flags = flags};
1206 char *kaddr = kmap(page);
1207 iov.iov_base = kaddr + offset;
1209 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1215 * Default Socket Callbacks
1218 static void sock_def_wakeup(struct sock *sk)
1220 read_lock(&sk->sk_callback_lock);
1221 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1222 wake_up_interruptible_all(sk->sk_sleep);
1223 read_unlock(&sk->sk_callback_lock);
1226 static void sock_def_error_report(struct sock *sk)
1228 read_lock(&sk->sk_callback_lock);
1229 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1230 wake_up_interruptible(sk->sk_sleep);
1231 sk_wake_async(sk,0,POLL_ERR);
1232 read_unlock(&sk->sk_callback_lock);
1235 static void sock_def_readable(struct sock *sk, int len)
1237 read_lock(&sk->sk_callback_lock);
1238 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1239 wake_up_interruptible(sk->sk_sleep);
1240 sk_wake_async(sk,1,POLL_IN);
1241 read_unlock(&sk->sk_callback_lock);
1244 static void sock_def_write_space(struct sock *sk)
1246 read_lock(&sk->sk_callback_lock);
1248 /* Do not wake up a writer until he can make "significant"
1251 if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1252 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1253 wake_up_interruptible(sk->sk_sleep);
1255 /* Should agree with poll, otherwise some programs break */
1256 if (sock_writeable(sk))
1257 sk_wake_async(sk, 2, POLL_OUT);
1260 read_unlock(&sk->sk_callback_lock);
1263 static void sock_def_destruct(struct sock *sk)
1265 kfree(sk->sk_protinfo);
1268 void sk_send_sigurg(struct sock *sk)
1270 if (sk->sk_socket && sk->sk_socket->file)
1271 if (send_sigurg(&sk->sk_socket->file->f_owner))
1272 sk_wake_async(sk, 3, POLL_PRI);
1275 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1276 unsigned long expires)
1278 if (!mod_timer(timer, expires))
1282 EXPORT_SYMBOL(sk_reset_timer);
1284 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1286 if (timer_pending(timer) && del_timer(timer))
1290 EXPORT_SYMBOL(sk_stop_timer);
1292 void sock_init_data(struct socket *sock, struct sock *sk)
1294 skb_queue_head_init(&sk->sk_receive_queue);
1295 skb_queue_head_init(&sk->sk_write_queue);
1296 skb_queue_head_init(&sk->sk_error_queue);
1298 sk->sk_send_head = NULL;
1300 init_timer(&sk->sk_timer);
1302 sk->sk_allocation = GFP_KERNEL;
1303 sk->sk_rcvbuf = sysctl_rmem_default;
1304 sk->sk_sndbuf = sysctl_wmem_default;
1305 sk->sk_state = TCP_CLOSE;
1306 sk->sk_socket = sock;
1308 sock_set_flag(sk, SOCK_ZAPPED);
1312 sk->sk_type = sock->type;
1313 sk->sk_sleep = &sock->wait;
1316 sk->sk_sleep = NULL;
1318 rwlock_init(&sk->sk_dst_lock);
1319 rwlock_init(&sk->sk_callback_lock);
1321 sk->sk_state_change = sock_def_wakeup;
1322 sk->sk_data_ready = sock_def_readable;
1323 sk->sk_write_space = sock_def_write_space;
1324 sk->sk_error_report = sock_def_error_report;
1325 sk->sk_destruct = sock_def_destruct;
1327 sk->sk_sndmsg_page = NULL;
1328 sk->sk_sndmsg_off = 0;
1330 sk->sk_peercred.pid = 0;
1331 sk->sk_peercred.uid = -1;
1332 sk->sk_peercred.gid = -1;
1333 sk->sk_write_pending = 0;
1334 sk->sk_rcvlowat = 1;
1335 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1336 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1338 sk->sk_stamp.tv_sec = -1L;
1339 sk->sk_stamp.tv_usec = -1L;
1341 set_vx_info(&sk->sk_vx_info, current->vx_info);
1342 sk->sk_xid = vx_current_xid();
1344 set_nx_info(&sk->sk_nx_info, current->nx_info);
1345 sk->sk_nid = nx_current_nid();
1346 atomic_set(&sk->sk_refcnt, 1);
1349 void fastcall lock_sock(struct sock *sk)
1352 spin_lock_bh(&(sk->sk_lock.slock));
1353 if (sk->sk_lock.owner)
1355 sk->sk_lock.owner = (void *)1;
1356 spin_unlock_bh(&(sk->sk_lock.slock));
1359 EXPORT_SYMBOL(lock_sock);
1361 void fastcall release_sock(struct sock *sk)
1363 spin_lock_bh(&(sk->sk_lock.slock));
1364 if (sk->sk_backlog.tail)
1366 sk->sk_lock.owner = NULL;
1367 if (waitqueue_active(&(sk->sk_lock.wq)))
1368 wake_up(&(sk->sk_lock.wq));
1369 spin_unlock_bh(&(sk->sk_lock.slock));
1371 EXPORT_SYMBOL(release_sock);
1373 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1375 if (!sock_flag(sk, SOCK_TIMESTAMP))
1376 sock_enable_timestamp(sk);
1377 if (sk->sk_stamp.tv_sec == -1)
1379 if (sk->sk_stamp.tv_sec == 0)
1380 do_gettimeofday(&sk->sk_stamp);
1381 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1384 EXPORT_SYMBOL(sock_get_timestamp);
1386 void sock_enable_timestamp(struct sock *sk)
1388 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1389 sock_set_flag(sk, SOCK_TIMESTAMP);
1390 net_enable_timestamp();
1393 EXPORT_SYMBOL(sock_enable_timestamp);
1396 * Get a socket option on an socket.
1398 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1399 * asynchronous errors should be reported by getsockopt. We assume
1400 * this means if you specify SO_ERROR (otherwise whats the point of it).
1402 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1403 char __user *optval, int __user *optlen)
1405 struct sock *sk = sock->sk;
1407 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1410 EXPORT_SYMBOL(sock_common_getsockopt);
1412 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1413 struct msghdr *msg, size_t size, int flags)
1415 struct sock *sk = sock->sk;
1419 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1420 flags & ~MSG_DONTWAIT, &addr_len);
1422 msg->msg_namelen = addr_len;
1426 EXPORT_SYMBOL(sock_common_recvmsg);
1429 * Set socket options on an inet socket.
1431 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1432 char __user *optval, int optlen)
1434 struct sock *sk = sock->sk;
1436 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1439 EXPORT_SYMBOL(sock_common_setsockopt);
1441 void sk_common_release(struct sock *sk)
1443 if (sk->sk_prot->destroy)
1444 sk->sk_prot->destroy(sk);
1447 * Observation: when sock_common_release is called, processes have
1448 * no access to socket. But net still has.
1449 * Step one, detach it from networking:
1451 * A. Remove from hash tables.
1454 sk->sk_prot->unhash(sk);
1457 * In this point socket cannot receive new packets, but it is possible
1458 * that some packets are in flight because some CPU runs receiver and
1459 * did hash table lookup before we unhashed socket. They will achieve
1460 * receive queue and will be purged by socket destructor.
1462 * Also we still have packets pending on receive queue and probably,
1463 * our own packets waiting in device queues. sock_destroy will drain
1464 * receive queue, but transmitted packets will delay socket destruction
1465 * until the last reference will be released.
1470 xfrm_sk_free_policy(sk);
1472 sk_refcnt_debug_release(sk);
1476 EXPORT_SYMBOL(sk_common_release);
1478 static DEFINE_RWLOCK(proto_list_lock);
1479 static LIST_HEAD(proto_list);
1481 int proto_register(struct proto *prot, int alloc_slab)
1483 char *request_sock_slab_name = NULL;
1484 char *timewait_sock_slab_name;
1488 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1489 SLAB_HWCACHE_ALIGN, NULL, NULL);
1491 if (prot->slab == NULL) {
1492 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1497 if (prot->rsk_prot != NULL) {
1498 static const char mask[] = "request_sock_%s";
1500 request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1501 if (request_sock_slab_name == NULL)
1502 goto out_free_sock_slab;
1504 sprintf(request_sock_slab_name, mask, prot->name);
1505 prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1506 prot->rsk_prot->obj_size, 0,
1507 SLAB_HWCACHE_ALIGN, NULL, NULL);
1509 if (prot->rsk_prot->slab == NULL) {
1510 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1512 goto out_free_request_sock_slab_name;
1516 if (prot->twsk_prot != NULL) {
1517 static const char mask[] = "tw_sock_%s";
1519 timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1521 if (timewait_sock_slab_name == NULL)
1522 goto out_free_request_sock_slab;
1524 sprintf(timewait_sock_slab_name, mask, prot->name);
1525 prot->twsk_prot->twsk_slab =
1526 kmem_cache_create(timewait_sock_slab_name,
1527 prot->twsk_prot->twsk_obj_size,
1528 0, SLAB_HWCACHE_ALIGN,
1530 if (prot->twsk_prot->twsk_slab == NULL)
1531 goto out_free_timewait_sock_slab_name;
1535 write_lock(&proto_list_lock);
1536 list_add(&prot->node, &proto_list);
1537 write_unlock(&proto_list_lock);
1541 out_free_timewait_sock_slab_name:
1542 kfree(timewait_sock_slab_name);
1543 out_free_request_sock_slab:
1544 if (prot->rsk_prot && prot->rsk_prot->slab) {
1545 kmem_cache_destroy(prot->rsk_prot->slab);
1546 prot->rsk_prot->slab = NULL;
1548 out_free_request_sock_slab_name:
1549 kfree(request_sock_slab_name);
1551 kmem_cache_destroy(prot->slab);
1556 EXPORT_SYMBOL(proto_register);
1558 void proto_unregister(struct proto *prot)
1560 write_lock(&proto_list_lock);
1561 list_del(&prot->node);
1562 write_unlock(&proto_list_lock);
1564 if (prot->slab != NULL) {
1565 kmem_cache_destroy(prot->slab);
1569 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
1570 const char *name = kmem_cache_name(prot->rsk_prot->slab);
1572 kmem_cache_destroy(prot->rsk_prot->slab);
1574 prot->rsk_prot->slab = NULL;
1577 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
1578 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
1580 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
1582 prot->twsk_prot->twsk_slab = NULL;
1586 EXPORT_SYMBOL(proto_unregister);
1588 #ifdef CONFIG_PROC_FS
1589 static inline struct proto *__proto_head(void)
1591 return list_entry(proto_list.next, struct proto, node);
1594 static inline struct proto *proto_head(void)
1596 return list_empty(&proto_list) ? NULL : __proto_head();
1599 static inline struct proto *proto_next(struct proto *proto)
1601 return proto->node.next == &proto_list ? NULL :
1602 list_entry(proto->node.next, struct proto, node);
1605 static inline struct proto *proto_get_idx(loff_t pos)
1607 struct proto *proto;
1610 list_for_each_entry(proto, &proto_list, node)
1619 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1621 read_lock(&proto_list_lock);
1622 return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
1625 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1628 return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1631 static void proto_seq_stop(struct seq_file *seq, void *v)
1633 read_unlock(&proto_list_lock);
1636 static char proto_method_implemented(const void *method)
1638 return method == NULL ? 'n' : 'y';
1641 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1643 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
1644 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
1647 proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
1648 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
1649 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
1651 proto->slab == NULL ? "no" : "yes",
1652 module_name(proto->owner),
1653 proto_method_implemented(proto->close),
1654 proto_method_implemented(proto->connect),
1655 proto_method_implemented(proto->disconnect),
1656 proto_method_implemented(proto->accept),
1657 proto_method_implemented(proto->ioctl),
1658 proto_method_implemented(proto->init),
1659 proto_method_implemented(proto->destroy),
1660 proto_method_implemented(proto->shutdown),
1661 proto_method_implemented(proto->setsockopt),
1662 proto_method_implemented(proto->getsockopt),
1663 proto_method_implemented(proto->sendmsg),
1664 proto_method_implemented(proto->recvmsg),
1665 proto_method_implemented(proto->sendpage),
1666 proto_method_implemented(proto->bind),
1667 proto_method_implemented(proto->backlog_rcv),
1668 proto_method_implemented(proto->hash),
1669 proto_method_implemented(proto->unhash),
1670 proto_method_implemented(proto->get_port),
1671 proto_method_implemented(proto->enter_memory_pressure));
1674 static int proto_seq_show(struct seq_file *seq, void *v)
1676 if (v == SEQ_START_TOKEN)
1677 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1686 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1688 proto_seq_printf(seq, v);
1692 static struct seq_operations proto_seq_ops = {
1693 .start = proto_seq_start,
1694 .next = proto_seq_next,
1695 .stop = proto_seq_stop,
1696 .show = proto_seq_show,
1699 static int proto_seq_open(struct inode *inode, struct file *file)
1701 return seq_open(file, &proto_seq_ops);
1704 static struct file_operations proto_seq_fops = {
1705 .owner = THIS_MODULE,
1706 .open = proto_seq_open,
1708 .llseek = seq_lseek,
1709 .release = seq_release,
1712 static int __init proto_init(void)
1714 /* register /proc/net/protocols */
1715 return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
1718 subsys_initcall(proto_init);
1720 #endif /* PROC_FS */
1722 EXPORT_SYMBOL(sk_alloc);
1723 EXPORT_SYMBOL(sk_free);
1724 EXPORT_SYMBOL(sk_send_sigurg);
1725 EXPORT_SYMBOL(sock_alloc_send_skb);
1726 EXPORT_SYMBOL(sock_init_data);
1727 EXPORT_SYMBOL(sock_kfree_s);
1728 EXPORT_SYMBOL(sock_kmalloc);
1729 EXPORT_SYMBOL(sock_no_accept);
1730 EXPORT_SYMBOL(sock_no_bind);
1731 EXPORT_SYMBOL(sock_no_connect);
1732 EXPORT_SYMBOL(sock_no_getname);
1733 EXPORT_SYMBOL(sock_no_getsockopt);
1734 EXPORT_SYMBOL(sock_no_ioctl);
1735 EXPORT_SYMBOL(sock_no_listen);
1736 EXPORT_SYMBOL(sock_no_mmap);
1737 EXPORT_SYMBOL(sock_no_poll);
1738 EXPORT_SYMBOL(sock_no_recvmsg);
1739 EXPORT_SYMBOL(sock_no_sendmsg);
1740 EXPORT_SYMBOL(sock_no_sendpage);
1741 EXPORT_SYMBOL(sock_no_setsockopt);
1742 EXPORT_SYMBOL(sock_no_shutdown);
1743 EXPORT_SYMBOL(sock_no_socketpair);
1744 EXPORT_SYMBOL(sock_rfree);
1745 EXPORT_SYMBOL(sock_setsockopt);
1746 EXPORT_SYMBOL(sock_wfree);
1747 EXPORT_SYMBOL(sock_wmalloc);
1748 EXPORT_SYMBOL(sock_i_uid);
1749 EXPORT_SYMBOL(sock_i_ino);
1750 EXPORT_SYMBOL(sysctl_optmem_max);
1751 #ifdef CONFIG_SYSCTL
1752 EXPORT_SYMBOL(sysctl_rmem_max);
1753 EXPORT_SYMBOL(sysctl_wmem_max);