2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic socket support routines. Memory allocators, socket lock/release
7 * handler for protocols to use and generic option handler.
10 * Version: $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
12 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
13 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Alan Cox, <A.Cox@swansea.ac.uk>
18 * Alan Cox : Numerous verify_area() problems
19 * Alan Cox : Connecting on a connecting socket
20 * now returns an error for tcp.
21 * Alan Cox : sock->protocol is set correctly.
22 * and is not sometimes left as 0.
23 * Alan Cox : connect handles icmp errors on a
24 * connect properly. Unfortunately there
25 * is a restart syscall nasty there. I
26 * can't match BSD without hacking the C
27 * library. Ideas urgently sought!
28 * Alan Cox : Disallow bind() to addresses that are
29 * not ours - especially broadcast ones!!
30 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
31 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
32 * instead they leave that for the DESTROY timer.
33 * Alan Cox : Clean up error flag in accept
34 * Alan Cox : TCP ack handling is buggy, the DESTROY timer
35 * was buggy. Put a remove_sock() in the handler
36 * for memory when we hit 0. Also altered the timer
37 * code. The ACK stuff can wait and needs major
39 * Alan Cox : Fixed TCP ack bug, removed remove sock
40 * and fixed timer/inet_bh race.
41 * Alan Cox : Added zapped flag for TCP
42 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
43 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
45 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
46 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47 * Rick Sladkey : Relaxed UDP rules for matching packets.
48 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
49 * Pauline Middelink : identd support
50 * Alan Cox : Fixed connect() taking signals I think.
51 * Alan Cox : SO_LINGER supported
52 * Alan Cox : Error reporting fixes
53 * Anonymous : inet_create tidied up (sk->reuse setting)
54 * Alan Cox : inet sockets don't set sk->type!
55 * Alan Cox : Split socket option code
56 * Alan Cox : Callbacks
57 * Alan Cox : Nagle flag for Charles & Johannes stuff
58 * Alex : Removed restriction on inet fioctl
59 * Alan Cox : Splitting INET from NET core
60 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
61 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
62 * Alan Cox : Split IP from generic code
63 * Alan Cox : New kfree_skbmem()
64 * Alan Cox : Make SO_DEBUG superuser only.
65 * Alan Cox : Allow anyone to clear SO_DEBUG
67 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
68 * Alan Cox : Allocator for a socket is settable.
69 * Alan Cox : SO_ERROR includes soft errors.
70 * Alan Cox : Allow NULL arguments on some SO_ opts
71 * Alan Cox : Generic socket allocation to make hooks
72 * easier (suggested by Craig Metz).
73 * Michael Pall : SO_ERROR returns positive errno again
74 * Steve Whitehouse: Added default destructor to free
75 * protocol private data.
76 * Steve Whitehouse: Added various other default routines
77 * common to several socket families.
78 * Chris Evans : Call suser() check last on F_SETOWN
79 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
81 * Andi Kleen : Fix write_space callback
82 * Chris Evans : Security fixes - signedness again
83 * Arnaldo C. Melo : cleanups, use skb_queue_purge
88 * This program is free software; you can redistribute it and/or
89 * modify it under the terms of the GNU General Public License
90 * as published by the Free Software Foundation; either version
91 * 2 of the License, or (at your option) any later version.
94 #include <linux/config.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
99 #include <linux/kernel.h>
100 #include <linux/major.h>
101 #include <linux/module.h>
102 #include <linux/sched.h>
103 #include <linux/timer.h>
104 #include <linux/string.h>
105 #include <linux/sockios.h>
106 #include <linux/net.h>
107 #include <linux/mm.h>
108 #include <linux/slab.h>
109 #include <linux/interrupt.h>
110 #include <linux/poll.h>
111 #include <linux/tcp.h>
112 #include <linux/init.h>
114 #include <asm/uaccess.h>
115 #include <asm/system.h>
117 #include <linux/netdevice.h>
118 #include <net/protocol.h>
119 #include <linux/skbuff.h>
120 #include <net/sock.h>
121 #include <net/xfrm.h>
122 #include <linux/ipsec.h>
124 #include <linux/filter.h>
125 #include <linux/vs_socket.h>
126 #include <linux/vs_limit.h>
132 /* Take into consideration the size of the struct sk_buff overhead in the
133 * determination of these values, since that is non-constant across
134 * platforms. This makes socket queueing behavior and performance
135 * not depend upon such differences.
137 #define _SK_MEM_PACKETS 256
138 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
139 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
140 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
142 /* Run time adjustable parameters. */
143 __u32 sysctl_wmem_max = SK_WMEM_MAX;
144 __u32 sysctl_rmem_max = SK_RMEM_MAX;
145 __u32 sysctl_wmem_default = SK_WMEM_MAX;
146 __u32 sysctl_rmem_default = SK_RMEM_MAX;
148 /* Maximal space eaten by iovec or ancilliary data plus some space */
149 int sysctl_optmem_max = sizeof(unsigned long)*(2*UIO_MAXIOV + 512);
151 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
155 if (optlen < sizeof(tv))
157 if (copy_from_user(&tv, optval, sizeof(tv)))
160 *timeo_p = MAX_SCHEDULE_TIMEOUT;
161 if (tv.tv_sec == 0 && tv.tv_usec == 0)
163 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
164 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
168 static void sock_warn_obsolete_bsdism(const char *name)
171 static char warncomm[TASK_COMM_LEN];
172 if (strcmp(warncomm, current->comm) && warned < 5) {
173 strcpy(warncomm, current->comm);
174 printk(KERN_WARNING "process `%s' is using obsolete "
175 "%s SO_BSDCOMPAT\n", warncomm, name);
180 static void sock_disable_timestamp(struct sock *sk)
182 if (sock_flag(sk, SOCK_TIMESTAMP)) {
183 sock_reset_flag(sk, SOCK_TIMESTAMP);
184 net_disable_timestamp();
190 * This is meant for all protocols to use and covers goings on
191 * at the socket level. Everything here is generic.
194 int sock_setsockopt(struct socket *sock, int level, int optname,
195 char __user *optval, int optlen)
197 struct sock *sk=sock->sk;
198 struct sk_filter *filter;
205 * Options without arguments
208 #ifdef SO_DONTLINGER /* Compatibility item... */
211 sock_reset_flag(sk, SOCK_LINGER);
216 if(optlen<sizeof(int))
219 if (get_user(val, (int __user *)optval))
229 if(val && !capable(CAP_NET_ADMIN))
234 sk->sk_debug = valbool;
237 sk->sk_reuse = valbool;
244 sk->sk_localroute = valbool;
247 sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
250 /* Don't error on this BSD doesn't and if you think
251 about it this is right. Otherwise apps have to
252 play 'guess the biggest size' games. RCVBUF/SNDBUF
253 are treated in BSD as hints */
255 if (val > sysctl_wmem_max)
256 val = sysctl_wmem_max;
258 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
259 if ((val * 2) < SOCK_MIN_SNDBUF)
260 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
262 sk->sk_sndbuf = val * 2;
265 * Wake up sending tasks if we
268 sk->sk_write_space(sk);
272 /* Don't error on this BSD doesn't and if you think
273 about it this is right. Otherwise apps have to
274 play 'guess the biggest size' games. RCVBUF/SNDBUF
275 are treated in BSD as hints */
277 if (val > sysctl_rmem_max)
278 val = sysctl_rmem_max;
280 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
281 /* FIXME: is this lower bound the right one? */
282 if ((val * 2) < SOCK_MIN_RCVBUF)
283 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
285 sk->sk_rcvbuf = val * 2;
290 if (sk->sk_protocol == IPPROTO_TCP)
291 tcp_set_keepalive(sk, valbool);
293 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
297 sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
301 sk->sk_no_check = valbool;
305 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
306 sk->sk_priority = val;
312 if(optlen<sizeof(ling)) {
313 ret = -EINVAL; /* 1003.1g */
316 if (copy_from_user(&ling,optval,sizeof(ling))) {
321 sock_reset_flag(sk, SOCK_LINGER);
323 #if (BITS_PER_LONG == 32)
324 if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
325 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
328 sk->sk_lingertime = ling.l_linger * HZ;
329 sock_set_flag(sk, SOCK_LINGER);
334 sock_warn_obsolete_bsdism("setsockopt");
339 set_bit(SOCK_PASS_CRED, &sock->flags);
341 clear_bit(SOCK_PASS_CRED, &sock->flags);
345 sk->sk_rcvtstamp = valbool;
347 sock_enable_timestamp(sk);
353 sk->sk_rcvlowat = val ? : 1;
357 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
361 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
364 #ifdef CONFIG_NETDEVICES
365 case SO_BINDTODEVICE:
367 char devname[IFNAMSIZ];
370 if (!capable(CAP_NET_RAW)) {
375 /* Bind this socket to a particular device like "eth0",
376 * as specified in the passed interface name. If the
377 * name is "" or the option length is zero the socket
382 sk->sk_bound_dev_if = 0;
384 if (optlen > IFNAMSIZ)
386 if (copy_from_user(devname, optval, optlen)) {
391 /* Remove any cached route for this socket. */
394 if (devname[0] == '\0') {
395 sk->sk_bound_dev_if = 0;
397 struct net_device *dev = dev_get_by_name(devname);
402 sk->sk_bound_dev_if = dev->ifindex;
411 case SO_ATTACH_FILTER:
413 if (optlen == sizeof(struct sock_fprog)) {
414 struct sock_fprog fprog;
417 if (copy_from_user(&fprog, optval, sizeof(fprog)))
420 ret = sk_attach_filter(&fprog, sk);
424 case SO_DETACH_FILTER:
425 spin_lock_bh(&sk->sk_lock.slock);
426 filter = sk->sk_filter;
428 sk->sk_filter = NULL;
429 spin_unlock_bh(&sk->sk_lock.slock);
430 sk_filter_release(sk, filter);
433 spin_unlock_bh(&sk->sk_lock.slock);
437 /* We implement the SO_SNDLOWAT etc to
438 not be settable (1003.1g 5.3) */
448 int sock_getsockopt(struct socket *sock, int level, int optname,
449 char __user *optval, int __user *optlen)
451 struct sock *sk = sock->sk;
460 unsigned int lv = sizeof(int);
463 if(get_user(len,optlen))
471 v.val = sk->sk_debug;
475 v.val = sk->sk_localroute;
479 v.val = !!sock_flag(sk, SOCK_BROADCAST);
483 v.val = sk->sk_sndbuf;
487 v.val = sk->sk_rcvbuf;
491 v.val = sk->sk_reuse;
495 v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
503 v.val = -sock_error(sk);
505 v.val = xchg(&sk->sk_err_soft, 0);
509 v.val = !!sock_flag(sk, SOCK_URGINLINE);
513 v.val = sk->sk_no_check;
517 v.val = sk->sk_priority;
522 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
523 v.ling.l_linger = sk->sk_lingertime / HZ;
527 sock_warn_obsolete_bsdism("getsockopt");
531 v.val = sk->sk_rcvtstamp;
535 lv=sizeof(struct timeval);
536 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
540 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
541 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
546 lv=sizeof(struct timeval);
547 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
551 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
552 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
557 v.val = sk->sk_rcvlowat;
565 v.val = test_bit(SOCK_PASS_CRED, &sock->flags)?1:0;
569 if (len > sizeof(sk->sk_peercred))
570 len = sizeof(sk->sk_peercred);
571 if (copy_to_user(optval, &sk->sk_peercred, len))
579 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
583 if (copy_to_user(optval, address, len))
588 /* Dubious BSD thing... Probably nobody even uses it, but
589 * the UNIX standard wants it for whatever reason... -DaveM
592 v.val = sk->sk_state == TCP_LISTEN;
596 return security_socket_getpeersec(sock, optval, optlen, len);
599 return(-ENOPROTOOPT);
603 if (copy_to_user(optval, &v, len))
606 if (put_user(len, optlen))
611 static kmem_cache_t *sk_cachep;
614 * sk_alloc - All socket objects are allocated here
615 * @family - protocol family
616 * @priority - for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
617 * @zero_it - zeroes the allocated sock
618 * @slab - alternate slab
620 * All socket objects are allocated here. If @zero_it is non-zero
621 * it should have the size of the are to be zeroed, because the
622 * private slabcaches have different sizes of the generic struct sock.
623 * 1 has been kept as a way to say sizeof(struct sock).
625 struct sock *sk_alloc(int family, int priority, int zero_it, kmem_cache_t *slab)
627 struct sock *sk = NULL;
631 sk = kmem_cache_alloc(slab, priority);
635 zero_it == 1 ? sizeof(struct sock) : zero_it);
636 sk->sk_family = family;
643 if (security_sk_alloc(sk, family, priority)) {
644 kmem_cache_free(slab, sk);
651 void sk_free(struct sock *sk)
653 struct sk_filter *filter;
654 struct module *owner = sk->sk_owner;
659 filter = sk->sk_filter;
661 sk_filter_release(sk, filter);
662 sk->sk_filter = NULL;
665 sock_disable_timestamp(sk);
667 if (atomic_read(&sk->sk_omem_alloc))
668 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
669 __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
671 security_sk_free(sk);
673 // BUG_ON(sk->sk_vx_info);
674 clr_vx_info(&sk->sk_vx_info);
676 // BUG_ON(sk->sk_nx_info);
677 clr_nx_info(&sk->sk_nx_info);
679 kmem_cache_free(sk->sk_slab, sk);
683 void __init sk_init(void)
685 sk_cachep = kmem_cache_create("sock", sizeof(struct sock), 0,
686 SLAB_HWCACHE_ALIGN, NULL, NULL);
688 printk(KERN_CRIT "sk_init: Cannot create sock SLAB cache!");
690 if (num_physpages <= 4096) {
691 sysctl_wmem_max = 32767;
692 sysctl_rmem_max = 32767;
693 sysctl_wmem_default = 32767;
694 sysctl_rmem_default = 32767;
695 } else if (num_physpages >= 131072) {
696 sysctl_wmem_max = 131071;
697 sysctl_rmem_max = 131071;
702 * Simple resource managers for sockets.
707 * Write buffer destructor automatically called from kfree_skb.
709 void sock_wfree(struct sk_buff *skb)
711 struct sock *sk = skb->sk;
713 /* In case it might be waiting for more memory. */
714 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
715 if (!sk->sk_use_write_queue)
716 sk->sk_write_space(sk);
721 * Read buffer destructor automatically called from kfree_skb.
723 void sock_rfree(struct sk_buff *skb)
725 struct sock *sk = skb->sk;
727 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
731 int sock_i_uid(struct sock *sk)
735 read_lock(&sk->sk_callback_lock);
736 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
737 read_unlock(&sk->sk_callback_lock);
741 unsigned long sock_i_ino(struct sock *sk)
745 read_lock(&sk->sk_callback_lock);
746 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
747 read_unlock(&sk->sk_callback_lock);
752 * Allocate a skb from the socket's send buffer.
754 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
756 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
757 struct sk_buff * skb = alloc_skb(size, priority);
759 skb_set_owner_w(skb, sk);
767 * Allocate a skb from the socket's receive buffer.
769 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
771 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
772 struct sk_buff *skb = alloc_skb(size, priority);
774 skb_set_owner_r(skb, sk);
782 * Allocate a memory block from the socket's option memory buffer.
784 void *sock_kmalloc(struct sock *sk, int size, int priority)
786 if ((unsigned)size <= sysctl_optmem_max &&
787 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
789 /* First do the add, to avoid the race if kmalloc
792 atomic_add(size, &sk->sk_omem_alloc);
793 mem = kmalloc(size, priority);
796 atomic_sub(size, &sk->sk_omem_alloc);
802 * Free an option memory block.
804 void sock_kfree_s(struct sock *sk, void *mem, int size)
807 atomic_sub(size, &sk->sk_omem_alloc);
810 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
811 I think, these locks should be removed for datagram sockets.
813 static long sock_wait_for_wmem(struct sock * sk, long timeo)
817 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
821 if (signal_pending(current))
823 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
824 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
825 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
827 if (sk->sk_shutdown & SEND_SHUTDOWN)
831 timeo = schedule_timeout(timeo);
833 finish_wait(sk->sk_sleep, &wait);
839 * Generic send/receive buffer handlers
842 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
843 unsigned long header_len,
844 unsigned long data_len,
845 int noblock, int *errcode)
848 unsigned int gfp_mask;
852 gfp_mask = sk->sk_allocation;
853 if (gfp_mask & __GFP_WAIT)
854 gfp_mask |= __GFP_REPEAT;
856 timeo = sock_sndtimeo(sk, noblock);
858 err = sock_error(sk);
863 if (sk->sk_shutdown & SEND_SHUTDOWN)
866 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
867 skb = alloc_skb(header_len, sk->sk_allocation);
872 /* No pages, we're done... */
876 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
877 skb->truesize += data_len;
878 skb_shinfo(skb)->nr_frags = npages;
879 for (i = 0; i < npages; i++) {
883 page = alloc_pages(sk->sk_allocation, 0);
886 skb_shinfo(skb)->nr_frags = i;
891 frag = &skb_shinfo(skb)->frags[i];
893 frag->page_offset = 0;
894 frag->size = (data_len >= PAGE_SIZE ?
897 data_len -= PAGE_SIZE;
900 /* Full success... */
906 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
907 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
911 if (signal_pending(current))
913 timeo = sock_wait_for_wmem(sk, timeo);
916 skb_set_owner_w(skb, sk);
920 err = sock_intr_errno(timeo);
926 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
927 int noblock, int *errcode)
929 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
932 static void __lock_sock(struct sock *sk)
937 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
938 TASK_UNINTERRUPTIBLE);
939 spin_unlock_bh(&sk->sk_lock.slock);
941 spin_lock_bh(&sk->sk_lock.slock);
942 if(!sock_owned_by_user(sk))
945 finish_wait(&sk->sk_lock.wq, &wait);
948 static void __release_sock(struct sock *sk)
950 struct sk_buff *skb = sk->sk_backlog.head;
953 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
957 struct sk_buff *next = skb->next;
960 sk->sk_backlog_rcv(sk, skb);
963 * We are in process context here with softirqs
964 * disabled, use cond_resched_softirq() to preempt.
965 * This is safe to do because we've taken the backlog
968 cond_resched_softirq();
971 } while (skb != NULL);
974 } while((skb = sk->sk_backlog.head) != NULL);
978 * sk_wait_data - wait for data to arrive at sk_receive_queue
979 * sk - sock to wait on
980 * timeo - for how long
982 * Now socket state including sk->sk_err is changed only under lock,
983 * hence we may omit checks after joining wait queue.
984 * We check receive queue before schedule() only as optimization;
985 * it is very likely that release_sock() added new data.
987 int sk_wait_data(struct sock *sk, long *timeo)
992 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
993 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
994 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
995 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
996 finish_wait(sk->sk_sleep, &wait);
1000 EXPORT_SYMBOL(sk_wait_data);
1003 * Set of default routines for initialising struct proto_ops when
1004 * the protocol does not support a particular function. In certain
1005 * cases where it makes no sense for a protocol to have a "do nothing"
1006 * function, some default processing is provided.
1009 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1014 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
1020 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1025 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1030 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
1036 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1041 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1046 int sock_no_listen(struct socket *sock, int backlog)
1051 int sock_no_shutdown(struct socket *sock, int how)
1056 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1057 char __user *optval, int optlen)
1062 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1063 char __user *optval, int __user *optlen)
1068 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1074 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1075 size_t len, int flags)
1080 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1082 /* Mirror missing mmap method error code */
1086 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1089 struct msghdr msg = {.msg_flags = flags};
1091 char *kaddr = kmap(page);
1092 iov.iov_base = kaddr + offset;
1094 res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1100 * Default Socket Callbacks
1103 static void sock_def_wakeup(struct sock *sk)
1105 read_lock(&sk->sk_callback_lock);
1106 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1107 wake_up_interruptible_all(sk->sk_sleep);
1108 read_unlock(&sk->sk_callback_lock);
1111 static void sock_def_error_report(struct sock *sk)
1113 read_lock(&sk->sk_callback_lock);
1114 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1115 wake_up_interruptible(sk->sk_sleep);
1116 sk_wake_async(sk,0,POLL_ERR);
1117 read_unlock(&sk->sk_callback_lock);
1120 static void sock_def_readable(struct sock *sk, int len)
1122 read_lock(&sk->sk_callback_lock);
1123 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1124 wake_up_interruptible(sk->sk_sleep);
1125 sk_wake_async(sk,1,POLL_IN);
1126 read_unlock(&sk->sk_callback_lock);
1129 static void sock_def_write_space(struct sock *sk)
1131 read_lock(&sk->sk_callback_lock);
1133 /* Do not wake up a writer until he can make "significant"
1136 if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1137 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1138 wake_up_interruptible(sk->sk_sleep);
1140 /* Should agree with poll, otherwise some programs break */
1141 if (sock_writeable(sk))
1142 sk_wake_async(sk, 2, POLL_OUT);
1145 read_unlock(&sk->sk_callback_lock);
1148 static void sock_def_destruct(struct sock *sk)
1150 if (sk->sk_protinfo)
1151 kfree(sk->sk_protinfo);
1154 void sk_send_sigurg(struct sock *sk)
1156 if (sk->sk_socket && sk->sk_socket->file)
1157 if (send_sigurg(&sk->sk_socket->file->f_owner))
1158 sk_wake_async(sk, 3, POLL_PRI);
1161 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1162 unsigned long expires)
1164 if (!mod_timer(timer, expires))
1168 EXPORT_SYMBOL(sk_reset_timer);
1170 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1172 if (timer_pending(timer) && del_timer(timer))
1176 EXPORT_SYMBOL(sk_stop_timer);
1178 void sock_init_data(struct socket *sock, struct sock *sk)
1180 skb_queue_head_init(&sk->sk_receive_queue);
1181 skb_queue_head_init(&sk->sk_write_queue);
1182 skb_queue_head_init(&sk->sk_error_queue);
1184 sk->sk_send_head = NULL;
1186 init_timer(&sk->sk_timer);
1188 sk->sk_allocation = GFP_KERNEL;
1189 sk->sk_rcvbuf = sysctl_rmem_default;
1190 sk->sk_sndbuf = sysctl_wmem_default;
1191 sk->sk_state = TCP_CLOSE;
1193 sk->sk_socket = sock;
1197 sk->sk_type = sock->type;
1198 sk->sk_sleep = &sock->wait;
1201 sk->sk_sleep = NULL;
1203 rwlock_init(&sk->sk_dst_lock);
1204 rwlock_init(&sk->sk_callback_lock);
1206 sk->sk_state_change = sock_def_wakeup;
1207 sk->sk_data_ready = sock_def_readable;
1208 sk->sk_write_space = sock_def_write_space;
1209 sk->sk_error_report = sock_def_error_report;
1210 sk->sk_destruct = sock_def_destruct;
1212 sk->sk_sndmsg_page = NULL;
1213 sk->sk_sndmsg_off = 0;
1215 sk->sk_peercred.pid = 0;
1216 sk->sk_peercred.uid = -1;
1217 sk->sk_peercred.gid = -1;
1218 sk->sk_write_pending = 0;
1219 sk->sk_rcvlowat = 1;
1220 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1221 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1222 sk->sk_owner = NULL;
1224 sk->sk_stamp.tv_sec = -1L;
1225 sk->sk_stamp.tv_usec = -1L;
1227 set_vx_info(&sk->sk_vx_info, current->vx_info);
1228 sk->sk_xid = vx_current_xid();
1230 set_nx_info(&sk->sk_nx_info, current->nx_info);
1231 sk->sk_nid = nx_current_nid();
1232 atomic_set(&sk->sk_refcnt, 1);
1235 void fastcall lock_sock(struct sock *sk)
1238 spin_lock_bh(&(sk->sk_lock.slock));
1239 if (sk->sk_lock.owner)
1241 sk->sk_lock.owner = (void *)1;
1242 spin_unlock_bh(&(sk->sk_lock.slock));
1245 EXPORT_SYMBOL(lock_sock);
1247 void fastcall release_sock(struct sock *sk)
1249 spin_lock_bh(&(sk->sk_lock.slock));
1250 if (sk->sk_backlog.tail)
1252 sk->sk_lock.owner = NULL;
1253 if (waitqueue_active(&(sk->sk_lock.wq)))
1254 wake_up(&(sk->sk_lock.wq));
1255 spin_unlock_bh(&(sk->sk_lock.slock));
1257 EXPORT_SYMBOL(release_sock);
1259 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1261 if (!sock_flag(sk, SOCK_TIMESTAMP))
1262 sock_enable_timestamp(sk);
1263 if (sk->sk_stamp.tv_sec == -1)
1265 if (sk->sk_stamp.tv_sec == 0)
1266 do_gettimeofday(&sk->sk_stamp);
1267 return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1270 EXPORT_SYMBOL(sock_get_timestamp);
1272 void sock_enable_timestamp(struct sock *sk)
1274 if (!sock_flag(sk, SOCK_TIMESTAMP)) {
1275 sock_set_flag(sk, SOCK_TIMESTAMP);
1276 net_enable_timestamp();
1279 EXPORT_SYMBOL(sock_enable_timestamp);
1282 * Get a socket option on an socket.
1284 * FIX: POSIX 1003.1g is very ambiguous here. It states that
1285 * asynchronous errors should be reported by getsockopt. We assume
1286 * this means if you specify SO_ERROR (otherwise whats the point of it).
1288 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1289 char __user *optval, int __user *optlen)
1291 struct sock *sk = sock->sk;
1293 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1296 EXPORT_SYMBOL(sock_common_getsockopt);
1298 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1299 struct msghdr *msg, size_t size, int flags)
1301 struct sock *sk = sock->sk;
1305 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1306 flags & ~MSG_DONTWAIT, &addr_len);
1308 msg->msg_namelen = addr_len;
1312 EXPORT_SYMBOL(sock_common_recvmsg);
1315 * Set socket options on an inet socket.
1317 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1318 char __user *optval, int optlen)
1320 struct sock *sk = sock->sk;
1322 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1325 EXPORT_SYMBOL(sock_common_setsockopt);
1327 void sk_common_release(struct sock *sk)
1329 if (sk->sk_prot->destroy)
1330 sk->sk_prot->destroy(sk);
1333 * Observation: when sock_common_release is called, processes have
1334 * no access to socket. But net still has.
1335 * Step one, detach it from networking:
1337 * A. Remove from hash tables.
1340 sk->sk_prot->unhash(sk);
1343 * In this point socket cannot receive new packets, but it is possible
1344 * that some packets are in flight because some CPU runs receiver and
1345 * did hash table lookup before we unhashed socket. They will achieve
1346 * receive queue and will be purged by socket destructor.
1348 * Also we still have packets pending on receive queue and probably,
1349 * our own packets waiting in device queues. sock_destroy will drain
1350 * receive queue, but transmitted packets will delay socket destruction
1351 * until the last reference will be released.
1356 xfrm_sk_free_policy(sk);
1358 #ifdef INET_REFCNT_DEBUG
1359 if (atomic_read(&sk->sk_refcnt) != 1)
1360 printk(KERN_DEBUG "Destruction of the socket %p delayed, c=%d\n",
1361 sk, atomic_read(&sk->sk_refcnt));
1366 EXPORT_SYMBOL(sk_common_release);
1368 int sk_alloc_slab(struct proto *prot, char *name)
1370 prot->slab = kmem_cache_create(name,
1371 prot->slab_obj_size, 0,
1372 SLAB_HWCACHE_ALIGN, NULL, NULL);
1374 return prot->slab != NULL ? 0 : -ENOBUFS;
1377 EXPORT_SYMBOL(sk_alloc_slab);
1379 void sk_free_slab(struct proto *prot)
1381 if (prot->slab != NULL) {
1382 kmem_cache_destroy(prot->slab);
1387 EXPORT_SYMBOL(sk_free_slab);
1389 EXPORT_SYMBOL(sk_alloc);
1390 EXPORT_SYMBOL(sk_free);
1391 EXPORT_SYMBOL(sk_send_sigurg);
1392 EXPORT_SYMBOL(sock_alloc_send_skb);
1393 EXPORT_SYMBOL(sock_init_data);
1394 EXPORT_SYMBOL(sock_kfree_s);
1395 EXPORT_SYMBOL(sock_kmalloc);
1396 EXPORT_SYMBOL(sock_no_accept);
1397 EXPORT_SYMBOL(sock_no_bind);
1398 EXPORT_SYMBOL(sock_no_connect);
1399 EXPORT_SYMBOL(sock_no_getname);
1400 EXPORT_SYMBOL(sock_no_getsockopt);
1401 EXPORT_SYMBOL(sock_no_ioctl);
1402 EXPORT_SYMBOL(sock_no_listen);
1403 EXPORT_SYMBOL(sock_no_mmap);
1404 EXPORT_SYMBOL(sock_no_poll);
1405 EXPORT_SYMBOL(sock_no_recvmsg);
1406 EXPORT_SYMBOL(sock_no_sendmsg);
1407 EXPORT_SYMBOL(sock_no_sendpage);
1408 EXPORT_SYMBOL(sock_no_setsockopt);
1409 EXPORT_SYMBOL(sock_no_shutdown);
1410 EXPORT_SYMBOL(sock_no_socketpair);
1411 EXPORT_SYMBOL(sock_rfree);
1412 EXPORT_SYMBOL(sock_setsockopt);
1413 EXPORT_SYMBOL(sock_wfree);
1414 EXPORT_SYMBOL(sock_wmalloc);
1415 EXPORT_SYMBOL(sock_i_uid);
1416 EXPORT_SYMBOL(sock_i_ino);
1417 #ifdef CONFIG_SYSCTL
1418 EXPORT_SYMBOL(sysctl_optmem_max);
1419 EXPORT_SYMBOL(sysctl_rmem_max);
1420 EXPORT_SYMBOL(sysctl_wmem_max);