fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / net / core / sock.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              Generic socket support routines. Memory allocators, socket lock/release
7  *              handler for protocols to use and generic option handler.
8  *
9  *
10  * Version:     $Id: sock.c,v 1.117 2002/02/01 22:01:03 davem Exp $
11  *
12  * Authors:     Ross Biro
13  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
14  *              Florian La Roche, <flla@stud.uni-sb.de>
15  *              Alan Cox, <A.Cox@swansea.ac.uk>
16  *
17  * Fixes:
18  *              Alan Cox        :       Numerous verify_area() problems
19  *              Alan Cox        :       Connecting on a connecting socket
20  *                                      now returns an error for tcp.
21  *              Alan Cox        :       sock->protocol is set correctly.
22  *                                      and is not sometimes left as 0.
23  *              Alan Cox        :       connect handles icmp errors on a
24  *                                      connect properly. Unfortunately there
25  *                                      is a restart syscall nasty there. I
26  *                                      can't match BSD without hacking the C
27  *                                      library. Ideas urgently sought!
28  *              Alan Cox        :       Disallow bind() to addresses that are
29  *                                      not ours - especially broadcast ones!!
30  *              Alan Cox        :       Socket 1024 _IS_ ok for users. (fencepost)
31  *              Alan Cox        :       sock_wfree/sock_rfree don't destroy sockets,
32  *                                      instead they leave that for the DESTROY timer.
33  *              Alan Cox        :       Clean up error flag in accept
34  *              Alan Cox        :       TCP ack handling is buggy, the DESTROY timer
35  *                                      was buggy. Put a remove_sock() in the handler
36  *                                      for memory when we hit 0. Also altered the timer
37  *                                      code. The ACK stuff can wait and needs major 
38  *                                      TCP layer surgery.
39  *              Alan Cox        :       Fixed TCP ack bug, removed remove sock
40  *                                      and fixed timer/inet_bh race.
41  *              Alan Cox        :       Added zapped flag for TCP
42  *              Alan Cox        :       Move kfree_skb into skbuff.c and tidied up surplus code
43  *              Alan Cox        :       for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
44  *              Alan Cox        :       kfree_s calls now are kfree_skbmem so we can track skb resources
45  *              Alan Cox        :       Supports socket option broadcast now as does udp. Packet and raw need fixing.
46  *              Alan Cox        :       Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
47  *              Rick Sladkey    :       Relaxed UDP rules for matching packets.
48  *              C.E.Hawkins     :       IFF_PROMISC/SIOCGHWADDR support
49  *      Pauline Middelink       :       identd support
50  *              Alan Cox        :       Fixed connect() taking signals I think.
51  *              Alan Cox        :       SO_LINGER supported
52  *              Alan Cox        :       Error reporting fixes
53  *              Anonymous       :       inet_create tidied up (sk->reuse setting)
54  *              Alan Cox        :       inet sockets don't set sk->type!
55  *              Alan Cox        :       Split socket option code
56  *              Alan Cox        :       Callbacks
57  *              Alan Cox        :       Nagle flag for Charles & Johannes stuff
58  *              Alex            :       Removed restriction on inet fioctl
59  *              Alan Cox        :       Splitting INET from NET core
60  *              Alan Cox        :       Fixed bogus SO_TYPE handling in getsockopt()
61  *              Adam Caldwell   :       Missing return in SO_DONTROUTE/SO_DEBUG code
62  *              Alan Cox        :       Split IP from generic code
63  *              Alan Cox        :       New kfree_skbmem()
64  *              Alan Cox        :       Make SO_DEBUG superuser only.
65  *              Alan Cox        :       Allow anyone to clear SO_DEBUG
66  *                                      (compatibility fix)
67  *              Alan Cox        :       Added optimistic memory grabbing for AF_UNIX throughput.
68  *              Alan Cox        :       Allocator for a socket is settable.
69  *              Alan Cox        :       SO_ERROR includes soft errors.
70  *              Alan Cox        :       Allow NULL arguments on some SO_ opts
71  *              Alan Cox        :       Generic socket allocation to make hooks
72  *                                      easier (suggested by Craig Metz).
73  *              Michael Pall    :       SO_ERROR returns positive errno again
74  *              Steve Whitehouse:       Added default destructor to free
75  *                                      protocol private data.
76  *              Steve Whitehouse:       Added various other default routines
77  *                                      common to several socket families.
78  *              Chris Evans     :       Call suser() check last on F_SETOWN
79  *              Jay Schulist    :       Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
80  *              Andi Kleen      :       Add sock_kmalloc()/sock_kfree_s()
81  *              Andi Kleen      :       Fix write_space callback
82  *              Chris Evans     :       Security fixes - signedness again
83  *              Arnaldo C. Melo :       cleanups, use skb_queue_purge
84  *
85  * To Fix:
86  *
87  *
88  *              This program is free software; you can redistribute it and/or
89  *              modify it under the terms of the GNU General Public License
90  *              as published by the Free Software Foundation; either version
91  *              2 of the License, or (at your option) any later version.
92  */
93
94 #include <linux/capability.h>
95 #include <linux/errno.h>
96 #include <linux/types.h>
97 #include <linux/socket.h>
98 #include <linux/in.h>
99 #include <linux/kernel.h>
100 #include <linux/module.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/sched.h>
104 #include <linux/timer.h>
105 #include <linux/string.h>
106 #include <linux/sockios.h>
107 #include <linux/net.h>
108 #include <linux/mm.h>
109 #include <linux/slab.h>
110 #include <linux/interrupt.h>
111 #include <linux/poll.h>
112 #include <linux/tcp.h>
113 #include <linux/init.h>
114 #include <linux/highmem.h>
115
116 #include <asm/uaccess.h>
117 #include <asm/system.h>
118
119 #include <linux/netdevice.h>
120 #include <net/protocol.h>
121 #include <linux/skbuff.h>
122 #include <net/request_sock.h>
123 #include <net/sock.h>
124 #include <net/xfrm.h>
125 #include <linux/ipsec.h>
126
127 #include <linux/filter.h>
128 #include <linux/vs_socket.h>
129 #include <linux/vs_limit.h>
130 #include <linux/vs_context.h>
131
132 #ifdef CONFIG_INET
133 #include <net/tcp.h>
134 #endif
135
136 /*
137  * Each address family might have different locking rules, so we have
138  * one slock key per address family:
139  */
140 static struct lock_class_key af_family_keys[AF_MAX];
141 static struct lock_class_key af_family_slock_keys[AF_MAX];
142
143 #ifdef CONFIG_DEBUG_LOCK_ALLOC
144 /*
145  * Make lock validator output more readable. (we pre-construct these
146  * strings build-time, so that runtime initialization of socket
147  * locks is fast):
148  */
149 static const char *af_family_key_strings[AF_MAX+1] = {
150   "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
151   "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
152   "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
153   "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
154   "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
155   "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
156   "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
157   "sk_lock-21"       , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
158   "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
159   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-29"          ,
160   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-AF_MAX"
161 };
162 static const char *af_family_slock_key_strings[AF_MAX+1] = {
163   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
164   "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
165   "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
166   "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
167   "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
168   "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
169   "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
170   "slock-21"       , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
171   "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
172   "slock-27"       , "slock-28"          , "slock-29"          ,
173   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_MAX"
174 };
175 #endif
176
177 /*
178  * sk_callback_lock locking rules are per-address-family,
179  * so split the lock classes by using a per-AF key:
180  */
181 static struct lock_class_key af_callback_keys[AF_MAX];
182
183 /* Take into consideration the size of the struct sk_buff overhead in the
184  * determination of these values, since that is non-constant across
185  * platforms.  This makes socket queueing behavior and performance
186  * not depend upon such differences.
187  */
188 #define _SK_MEM_PACKETS         256
189 #define _SK_MEM_OVERHEAD        (sizeof(struct sk_buff) + 256)
190 #define SK_WMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
191 #define SK_RMEM_MAX             (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
192
193 /* Run time adjustable parameters. */
194 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
195 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
196 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
197 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
198
199 /* Maximal space eaten by iovec or ancilliary data plus some space */
200 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
201
202 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
203 {
204         struct timeval tv;
205
206         if (optlen < sizeof(tv))
207                 return -EINVAL;
208         if (copy_from_user(&tv, optval, sizeof(tv)))
209                 return -EFAULT;
210
211         *timeo_p = MAX_SCHEDULE_TIMEOUT;
212         if (tv.tv_sec == 0 && tv.tv_usec == 0)
213                 return 0;
214         if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
215                 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
216         return 0;
217 }
218
219 static void sock_warn_obsolete_bsdism(const char *name)
220 {
221         static int warned;
222         static char warncomm[TASK_COMM_LEN];
223         if (strcmp(warncomm, current->comm) && warned < 5) { 
224                 strcpy(warncomm,  current->comm); 
225                 printk(KERN_WARNING "process `%s' is using obsolete "
226                        "%s SO_BSDCOMPAT\n", warncomm, name);
227                 warned++;
228         }
229 }
230
231 static void sock_disable_timestamp(struct sock *sk)
232 {       
233         if (sock_flag(sk, SOCK_TIMESTAMP)) { 
234                 sock_reset_flag(sk, SOCK_TIMESTAMP);
235                 net_disable_timestamp();
236         }
237 }
238
239
240 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
241 {
242         int err = 0;
243         int skb_len;
244
245         /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
246            number of warnings when compiling with -W --ANK
247          */
248         if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
249             (unsigned)sk->sk_rcvbuf) {
250                 err = -ENOMEM;
251                 goto out;
252         }
253
254         err = sk_filter(sk, skb);
255         if (err)
256                 goto out;
257
258         skb->dev = NULL;
259         skb_set_owner_r(skb, sk);
260
261         /* Cache the SKB length before we tack it onto the receive
262          * queue.  Once it is added it no longer belongs to us and
263          * may be freed by other threads of control pulling packets
264          * from the queue.
265          */
266         skb_len = skb->len;
267
268         skb_queue_tail(&sk->sk_receive_queue, skb);
269
270         if (!sock_flag(sk, SOCK_DEAD))
271                 sk->sk_data_ready(sk, skb_len);
272 out:
273         return err;
274 }
275 EXPORT_SYMBOL(sock_queue_rcv_skb);
276
277 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
278 {
279         int rc = NET_RX_SUCCESS;
280
281         if (sk_filter(sk, skb))
282                 goto discard_and_relse;
283
284         skb->dev = NULL;
285
286         if (nested)
287                 bh_lock_sock_nested(sk);
288         else
289                 bh_lock_sock(sk);
290         if (!sock_owned_by_user(sk)) {
291                 /*
292                  * trylock + unlock semantics:
293                  */
294                 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
295
296                 rc = sk->sk_backlog_rcv(sk, skb);
297
298                 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
299         } else
300                 sk_add_backlog(sk, skb);
301         bh_unlock_sock(sk);
302 out:
303         sock_put(sk);
304         return rc;
305 discard_and_relse:
306         kfree_skb(skb);
307         goto out;
308 }
309 EXPORT_SYMBOL(sk_receive_skb);
310
311 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
312 {
313         struct dst_entry *dst = sk->sk_dst_cache;
314
315         if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
316                 sk->sk_dst_cache = NULL;
317                 dst_release(dst);
318                 return NULL;
319         }
320
321         return dst;
322 }
323 EXPORT_SYMBOL(__sk_dst_check);
324
325 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
326 {
327         struct dst_entry *dst = sk_dst_get(sk);
328
329         if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
330                 sk_dst_reset(sk);
331                 dst_release(dst);
332                 return NULL;
333         }
334
335         return dst;
336 }
337 EXPORT_SYMBOL(sk_dst_check);
338
339 /*
340  *      This is meant for all protocols to use and covers goings on
341  *      at the socket level. Everything here is generic.
342  */
343
344 int sock_setsockopt(struct socket *sock, int level, int optname,
345                     char __user *optval, int optlen)
346 {
347         struct sock *sk=sock->sk;
348         struct sk_filter *filter;
349         int val;
350         int valbool;
351         struct linger ling;
352         int ret = 0;
353         
354         /*
355          *      Options without arguments
356          */
357
358 #ifdef SO_DONTLINGER            /* Compatibility item... */
359         if (optname == SO_DONTLINGER) {
360                 lock_sock(sk);
361                 sock_reset_flag(sk, SOCK_LINGER);
362                 release_sock(sk);
363                 return 0;
364         }
365 #endif
366         
367         if(optlen<sizeof(int))
368                 return(-EINVAL);
369         
370         if (get_user(val, (int __user *)optval))
371                 return -EFAULT;
372         
373         valbool = val?1:0;
374
375         lock_sock(sk);
376
377         switch(optname) 
378         {
379                 case SO_DEBUG:  
380                         if(val && !capable(CAP_NET_ADMIN))
381                         {
382                                 ret = -EACCES;
383                         }
384                         else if (valbool)
385                                 sock_set_flag(sk, SOCK_DBG);
386                         else
387                                 sock_reset_flag(sk, SOCK_DBG);
388                         break;
389                 case SO_REUSEADDR:
390                         sk->sk_reuse = valbool;
391                         break;
392                 case SO_TYPE:
393                 case SO_ERROR:
394                         ret = -ENOPROTOOPT;
395                         break;
396                 case SO_DONTROUTE:
397                         if (valbool)
398                                 sock_set_flag(sk, SOCK_LOCALROUTE);
399                         else
400                                 sock_reset_flag(sk, SOCK_LOCALROUTE);
401                         break;
402                 case SO_BROADCAST:
403                         sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
404                         break;
405                 case SO_SNDBUF:
406                         /* Don't error on this BSD doesn't and if you think
407                            about it this is right. Otherwise apps have to
408                            play 'guess the biggest size' games. RCVBUF/SNDBUF
409                            are treated in BSD as hints */
410                            
411                         if (val > sysctl_wmem_max)
412                                 val = sysctl_wmem_max;
413 set_sndbuf:
414                         sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
415                         if ((val * 2) < SOCK_MIN_SNDBUF)
416                                 sk->sk_sndbuf = SOCK_MIN_SNDBUF;
417                         else
418                                 sk->sk_sndbuf = val * 2;
419
420                         /*
421                          *      Wake up sending tasks if we
422                          *      upped the value.
423                          */
424                         sk->sk_write_space(sk);
425                         break;
426
427                 case SO_SNDBUFFORCE:
428                         if (!capable(CAP_NET_ADMIN)) {
429                                 ret = -EPERM;
430                                 break;
431                         }
432                         goto set_sndbuf;
433
434                 case SO_RCVBUF:
435                         /* Don't error on this BSD doesn't and if you think
436                            about it this is right. Otherwise apps have to
437                            play 'guess the biggest size' games. RCVBUF/SNDBUF
438                            are treated in BSD as hints */
439                           
440                         if (val > sysctl_rmem_max)
441                                 val = sysctl_rmem_max;
442 set_rcvbuf:
443                         sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
444                         /*
445                          * We double it on the way in to account for
446                          * "struct sk_buff" etc. overhead.   Applications
447                          * assume that the SO_RCVBUF setting they make will
448                          * allow that much actual data to be received on that
449                          * socket.
450                          *
451                          * Applications are unaware that "struct sk_buff" and
452                          * other overheads allocate from the receive buffer
453                          * during socket buffer allocation.
454                          *
455                          * And after considering the possible alternatives,
456                          * returning the value we actually used in getsockopt
457                          * is the most desirable behavior.
458                          */
459                         if ((val * 2) < SOCK_MIN_RCVBUF)
460                                 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
461                         else
462                                 sk->sk_rcvbuf = val * 2;
463                         break;
464
465                 case SO_RCVBUFFORCE:
466                         if (!capable(CAP_NET_ADMIN)) {
467                                 ret = -EPERM;
468                                 break;
469                         }
470                         goto set_rcvbuf;
471
472                 case SO_KEEPALIVE:
473 #ifdef CONFIG_INET
474                         if (sk->sk_protocol == IPPROTO_TCP)
475                                 tcp_set_keepalive(sk, valbool);
476 #endif
477                         sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
478                         break;
479
480                 case SO_OOBINLINE:
481                         sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
482                         break;
483
484                 case SO_NO_CHECK:
485                         sk->sk_no_check = valbool;
486                         break;
487
488                 case SO_PRIORITY:
489                         if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
490                                 sk->sk_priority = val;
491                         else
492                                 ret = -EPERM;
493                         break;
494
495                 case SO_LINGER:
496                         if(optlen<sizeof(ling)) {
497                                 ret = -EINVAL;  /* 1003.1g */
498                                 break;
499                         }
500                         if (copy_from_user(&ling,optval,sizeof(ling))) {
501                                 ret = -EFAULT;
502                                 break;
503                         }
504                         if (!ling.l_onoff)
505                                 sock_reset_flag(sk, SOCK_LINGER);
506                         else {
507 #if (BITS_PER_LONG == 32)
508                                 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
509                                         sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
510                                 else
511 #endif
512                                         sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
513                                 sock_set_flag(sk, SOCK_LINGER);
514                         }
515                         break;
516
517                 case SO_BSDCOMPAT:
518                         sock_warn_obsolete_bsdism("setsockopt");
519                         break;
520
521                 case SO_PASSCRED:
522                         if (valbool)
523                                 set_bit(SOCK_PASSCRED, &sock->flags);
524                         else
525                                 clear_bit(SOCK_PASSCRED, &sock->flags);
526                         break;
527
528                 case SO_TIMESTAMP:
529                         if (valbool)  {
530                                 sock_set_flag(sk, SOCK_RCVTSTAMP);
531                                 sock_enable_timestamp(sk);
532                         } else
533                                 sock_reset_flag(sk, SOCK_RCVTSTAMP);
534                         break;
535
536                 case SO_RCVLOWAT:
537                         if (val < 0)
538                                 val = INT_MAX;
539                         sk->sk_rcvlowat = val ? : 1;
540                         break;
541
542                 case SO_RCVTIMEO:
543                         ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
544                         break;
545
546                 case SO_SNDTIMEO:
547                         ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
548                         break;
549
550 #ifdef CONFIG_NETDEVICES
551                 case SO_BINDTODEVICE:
552                 {
553                         char devname[IFNAMSIZ]; 
554
555                         /* Sorry... */ 
556                         if (!capable(CAP_NET_RAW)) {
557                                 ret = -EPERM;
558                                 break;
559                         }
560
561                         /* Bind this socket to a particular device like "eth0",
562                          * as specified in the passed interface name. If the
563                          * name is "" or the option length is zero the socket 
564                          * is not bound. 
565                          */ 
566
567                         if (!valbool) {
568                                 sk->sk_bound_dev_if = 0;
569                         } else {
570                                 if (optlen > IFNAMSIZ - 1)
571                                         optlen = IFNAMSIZ - 1;
572                                 memset(devname, 0, sizeof(devname));
573                                 if (copy_from_user(devname, optval, optlen)) {
574                                         ret = -EFAULT;
575                                         break;
576                                 }
577
578                                 /* Remove any cached route for this socket. */
579                                 sk_dst_reset(sk);
580
581                                 if (devname[0] == '\0') {
582                                         sk->sk_bound_dev_if = 0;
583                                 } else {
584                                         struct net_device *dev = dev_get_by_name(devname);
585                                         if (!dev) {
586                                                 ret = -ENODEV;
587                                                 break;
588                                         }
589                                         sk->sk_bound_dev_if = dev->ifindex;
590                                         dev_put(dev);
591                                 }
592                         }
593                         break;
594                 }
595 #endif
596
597
598                 case SO_ATTACH_FILTER:
599                         ret = -EINVAL;
600                         if (optlen == sizeof(struct sock_fprog)) {
601                                 struct sock_fprog fprog;
602
603                                 ret = -EFAULT;
604                                 if (copy_from_user(&fprog, optval, sizeof(fprog)))
605                                         break;
606
607                                 ret = sk_attach_filter(&fprog, sk);
608                         }
609                         break;
610
611                 case SO_DETACH_FILTER:
612                         rcu_read_lock_bh();
613                         filter = rcu_dereference(sk->sk_filter);
614                         if (filter) {
615                                 rcu_assign_pointer(sk->sk_filter, NULL);
616                                 sk_filter_release(sk, filter);
617                                 rcu_read_unlock_bh();
618                                 break;
619                         }
620                         rcu_read_unlock_bh();
621                         ret = -ENONET;
622                         break;
623
624                 case SO_PASSSEC:
625                         if (valbool)
626                                 set_bit(SOCK_PASSSEC, &sock->flags);
627                         else
628                                 clear_bit(SOCK_PASSSEC, &sock->flags);
629                         break;
630
631                 /* We implement the SO_SNDLOWAT etc to
632                    not be settable (1003.1g 5.3) */
633                 default:
634                         ret = -ENOPROTOOPT;
635                         break;
636         }
637         release_sock(sk);
638         return ret;
639 }
640
641
642 int sock_getsockopt(struct socket *sock, int level, int optname,
643                     char __user *optval, int __user *optlen)
644 {
645         struct sock *sk = sock->sk;
646         
647         union
648         {
649                 int val;
650                 struct linger ling;
651                 struct timeval tm;
652         } v;
653         
654         unsigned int lv = sizeof(int);
655         int len;
656         
657         if(get_user(len,optlen))
658                 return -EFAULT;
659         if(len < 0)
660                 return -EINVAL;
661                 
662         switch(optname) 
663         {
664                 case SO_DEBUG:          
665                         v.val = sock_flag(sk, SOCK_DBG);
666                         break;
667                 
668                 case SO_DONTROUTE:
669                         v.val = sock_flag(sk, SOCK_LOCALROUTE);
670                         break;
671                 
672                 case SO_BROADCAST:
673                         v.val = !!sock_flag(sk, SOCK_BROADCAST);
674                         break;
675
676                 case SO_SNDBUF:
677                         v.val = sk->sk_sndbuf;
678                         break;
679                 
680                 case SO_RCVBUF:
681                         v.val = sk->sk_rcvbuf;
682                         break;
683
684                 case SO_REUSEADDR:
685                         v.val = sk->sk_reuse;
686                         break;
687
688                 case SO_KEEPALIVE:
689                         v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
690                         break;
691
692                 case SO_TYPE:
693                         v.val = sk->sk_type;                            
694                         break;
695
696                 case SO_ERROR:
697                         v.val = -sock_error(sk);
698                         if(v.val==0)
699                                 v.val = xchg(&sk->sk_err_soft, 0);
700                         break;
701
702                 case SO_OOBINLINE:
703                         v.val = !!sock_flag(sk, SOCK_URGINLINE);
704                         break;
705         
706                 case SO_NO_CHECK:
707                         v.val = sk->sk_no_check;
708                         break;
709
710                 case SO_PRIORITY:
711                         v.val = sk->sk_priority;
712                         break;
713                 
714                 case SO_LINGER: 
715                         lv              = sizeof(v.ling);
716                         v.ling.l_onoff  = !!sock_flag(sk, SOCK_LINGER);
717                         v.ling.l_linger = sk->sk_lingertime / HZ;
718                         break;
719                                         
720                 case SO_BSDCOMPAT:
721                         sock_warn_obsolete_bsdism("getsockopt");
722                         break;
723
724                 case SO_TIMESTAMP:
725                         v.val = sock_flag(sk, SOCK_RCVTSTAMP);
726                         break;
727
728                 case SO_RCVTIMEO:
729                         lv=sizeof(struct timeval);
730                         if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
731                                 v.tm.tv_sec = 0;
732                                 v.tm.tv_usec = 0;
733                         } else {
734                                 v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
735                                 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
736                         }
737                         break;
738
739                 case SO_SNDTIMEO:
740                         lv=sizeof(struct timeval);
741                         if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
742                                 v.tm.tv_sec = 0;
743                                 v.tm.tv_usec = 0;
744                         } else {
745                                 v.tm.tv_sec = sk->sk_sndtimeo / HZ;
746                                 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
747                         }
748                         break;
749
750                 case SO_RCVLOWAT:
751                         v.val = sk->sk_rcvlowat;
752                         break;
753
754                 case SO_SNDLOWAT:
755                         v.val=1;
756                         break; 
757
758                 case SO_PASSCRED:
759                         v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
760                         break;
761
762                 case SO_PEERCRED:
763                         if (len > sizeof(sk->sk_peercred))
764                                 len = sizeof(sk->sk_peercred);
765                         if (copy_to_user(optval, &sk->sk_peercred, len))
766                                 return -EFAULT;
767                         goto lenout;
768
769                 case SO_PEERNAME:
770                 {
771                         char address[128];
772
773                         if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
774                                 return -ENOTCONN;
775                         if (lv < len)
776                                 return -EINVAL;
777                         if (copy_to_user(optval, address, len))
778                                 return -EFAULT;
779                         goto lenout;
780                 }
781
782                 /* Dubious BSD thing... Probably nobody even uses it, but
783                  * the UNIX standard wants it for whatever reason... -DaveM
784                  */
785                 case SO_ACCEPTCONN:
786                         v.val = sk->sk_state == TCP_LISTEN;
787                         break;
788
789                 case SO_PASSSEC:
790                         v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
791                         break;
792
793                 case SO_PEERSEC:
794                         return security_socket_getpeersec_stream(sock, optval, optlen, len);
795
796                 default:
797                         return(-ENOPROTOOPT);
798         }
799         if (len > lv)
800                 len = lv;
801         if (copy_to_user(optval, &v, len))
802                 return -EFAULT;
803 lenout:
804         if (put_user(len, optlen))
805                 return -EFAULT;
806         return 0;
807 }
808
809 /*
810  * Initialize an sk_lock.
811  *
812  * (We also register the sk_lock with the lock validator.)
813  */
814 static void inline sock_lock_init(struct sock *sk)
815 {
816         sock_lock_init_class_and_name(sk,
817                         af_family_slock_key_strings[sk->sk_family],
818                         af_family_slock_keys + sk->sk_family,
819                         af_family_key_strings[sk->sk_family],
820                         af_family_keys + sk->sk_family);
821 }
822
823 /**
824  *      sk_alloc - All socket objects are allocated here
825  *      @family: protocol family
826  *      @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
827  *      @prot: struct proto associated with this new sock instance
828  *      @zero_it: if we should zero the newly allocated sock
829  */
830 struct sock *sk_alloc(int family, gfp_t priority,
831                       struct proto *prot, int zero_it)
832 {
833         struct sock *sk = NULL;
834         struct kmem_cache *slab = prot->slab;
835
836         if (slab != NULL)
837                 sk = kmem_cache_alloc(slab, priority);
838         else
839                 sk = kmalloc(prot->obj_size, priority);
840
841         if (sk) {
842                 if (zero_it) {
843                         memset(sk, 0, prot->obj_size);
844                         sk->sk_family = family;
845                         /*
846                          * See comment in struct sock definition to understand
847                          * why we need sk_prot_creator -acme
848                          */
849                         sk->sk_prot = sk->sk_prot_creator = prot;
850                         sock_lock_init(sk);
851                 }
852                 sock_vx_init(sk);
853                 sock_nx_init(sk);
854                 
855                 if (security_sk_alloc(sk, family, priority))
856                         goto out_free;
857
858                 if (!try_module_get(prot->owner))
859                         goto out_free;
860         }
861         return sk;
862
863 out_free:
864         if (slab != NULL)
865                 kmem_cache_free(slab, sk);
866         else
867                 kfree(sk);
868         return NULL;
869 }
870
871 void sk_free(struct sock *sk)
872 {
873         struct sk_filter *filter;
874         struct module *owner = sk->sk_prot_creator->owner;
875
876         if (sk->sk_destruct)
877                 sk->sk_destruct(sk);
878
879         filter = rcu_dereference(sk->sk_filter);
880         if (filter) {
881                 sk_filter_release(sk, filter);
882                 rcu_assign_pointer(sk->sk_filter, NULL);
883         }
884
885         sock_disable_timestamp(sk);
886
887         if (atomic_read(&sk->sk_omem_alloc))
888                 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
889                        __FUNCTION__, atomic_read(&sk->sk_omem_alloc));
890
891         security_sk_free(sk);
892         vx_sock_dec(sk);
893         clr_vx_info(&sk->sk_vx_info);
894         sk->sk_xid = -1;
895         clr_nx_info(&sk->sk_nx_info);
896         sk->sk_nid = -1;
897         if (sk->sk_prot_creator->slab != NULL)
898                 kmem_cache_free(sk->sk_prot_creator->slab, sk);
899         else
900                 kfree(sk);
901         module_put(owner);
902 }
903
904 struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
905 {
906         struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
907
908         if (newsk != NULL) {
909                 struct sk_filter *filter;
910
911                 sock_copy(newsk, sk);
912
913                 /* SANITY */
914                 sock_vx_init(newsk);
915                 sock_nx_init(newsk);
916                 sk_node_init(&newsk->sk_node);
917                 sock_lock_init(newsk);
918                 bh_lock_sock(newsk);
919
920                 atomic_set(&newsk->sk_rmem_alloc, 0);
921                 atomic_set(&newsk->sk_wmem_alloc, 0);
922                 atomic_set(&newsk->sk_omem_alloc, 0);
923                 skb_queue_head_init(&newsk->sk_receive_queue);
924                 skb_queue_head_init(&newsk->sk_write_queue);
925 #ifdef CONFIG_NET_DMA
926                 skb_queue_head_init(&newsk->sk_async_wait_queue);
927 #endif
928
929                 rwlock_init(&newsk->sk_dst_lock);
930                 rwlock_init(&newsk->sk_callback_lock);
931                 lockdep_set_class(&newsk->sk_callback_lock,
932                                    af_callback_keys + newsk->sk_family);
933
934                 newsk->sk_dst_cache     = NULL;
935                 newsk->sk_wmem_queued   = 0;
936                 newsk->sk_forward_alloc = 0;
937                 newsk->sk_send_head     = NULL;
938                 newsk->sk_backlog.head  = newsk->sk_backlog.tail = NULL;
939                 newsk->sk_userlocks     = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
940
941                 sock_reset_flag(newsk, SOCK_DONE);
942                 skb_queue_head_init(&newsk->sk_error_queue);
943
944                 filter = newsk->sk_filter;
945                 if (filter != NULL)
946                         sk_filter_charge(newsk, filter);
947
948                 if (unlikely(xfrm_sk_clone_policy(newsk))) {
949                         /* It is still raw copy of parent, so invalidate
950                          * destructor and make plain sk_free() */
951                         newsk->sk_destruct = NULL;
952                         sk_free(newsk);
953                         newsk = NULL;
954                         goto out;
955                 }
956
957                 newsk->sk_err      = 0;
958                 newsk->sk_priority = 0;
959                 atomic_set(&newsk->sk_refcnt, 2);
960
961                 set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
962                 newsk->sk_xid = sk->sk_xid;
963                 vx_sock_inc(newsk);
964                 set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
965                 newsk->sk_nid = sk->sk_nid;
966
967                 /*
968                  * Increment the counter in the same struct proto as the master
969                  * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
970                  * is the same as sk->sk_prot->socks, as this field was copied
971                  * with memcpy).
972                  *
973                  * This _changes_ the previous behaviour, where
974                  * tcp_create_openreq_child always was incrementing the
975                  * equivalent to tcp_prot->socks (inet_sock_nr), so this have
976                  * to be taken into account in all callers. -acme
977                  */
978                 sk_refcnt_debug_inc(newsk);
979                 newsk->sk_socket = NULL;
980                 newsk->sk_sleep  = NULL;
981
982                 if (newsk->sk_prot->sockets_allocated)
983                         atomic_inc(newsk->sk_prot->sockets_allocated);
984         }
985 out:
986         return newsk;
987 }
988
989 EXPORT_SYMBOL_GPL(sk_clone);
990
991 void __init sk_init(void)
992 {
993         if (num_physpages <= 4096) {
994                 sysctl_wmem_max = 32767;
995                 sysctl_rmem_max = 32767;
996                 sysctl_wmem_default = 32767;
997                 sysctl_rmem_default = 32767;
998         } else if (num_physpages >= 131072) {
999                 sysctl_wmem_max = 131071;
1000                 sysctl_rmem_max = 131071;
1001         }
1002 }
1003
1004 /*
1005  *      Simple resource managers for sockets.
1006  */
1007
1008
1009 /* 
1010  * Write buffer destructor automatically called from kfree_skb. 
1011  */
1012 void sock_wfree(struct sk_buff *skb)
1013 {
1014         struct sock *sk = skb->sk;
1015
1016         /* In case it might be waiting for more memory. */
1017         atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1018         if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
1019                 sk->sk_write_space(sk);
1020         sock_put(sk);
1021 }
1022
1023 /* 
1024  * Read buffer destructor automatically called from kfree_skb. 
1025  */
1026 void sock_rfree(struct sk_buff *skb)
1027 {
1028         struct sock *sk = skb->sk;
1029
1030         atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1031 }
1032
1033
1034 int sock_i_uid(struct sock *sk)
1035 {
1036         int uid;
1037
1038         read_lock(&sk->sk_callback_lock);
1039         uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
1040         read_unlock(&sk->sk_callback_lock);
1041         return uid;
1042 }
1043
1044 unsigned long sock_i_ino(struct sock *sk)
1045 {
1046         unsigned long ino;
1047
1048         read_lock(&sk->sk_callback_lock);
1049         ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
1050         read_unlock(&sk->sk_callback_lock);
1051         return ino;
1052 }
1053
1054 /*
1055  * Allocate a skb from the socket's send buffer.
1056  */
1057 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1058                              gfp_t priority)
1059 {
1060         if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1061                 struct sk_buff * skb = alloc_skb(size, priority);
1062                 if (skb) {
1063                         skb_set_owner_w(skb, sk);
1064                         return skb;
1065                 }
1066         }
1067         return NULL;
1068 }
1069
1070 /*
1071  * Allocate a skb from the socket's receive buffer.
1072  */ 
1073 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1074                              gfp_t priority)
1075 {
1076         if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1077                 struct sk_buff *skb = alloc_skb(size, priority);
1078                 if (skb) {
1079                         skb_set_owner_r(skb, sk);
1080                         return skb;
1081                 }
1082         }
1083         return NULL;
1084 }
1085
1086 /* 
1087  * Allocate a memory block from the socket's option memory buffer.
1088  */ 
1089 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1090 {
1091         if ((unsigned)size <= sysctl_optmem_max &&
1092             atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1093                 void *mem;
1094                 /* First do the add, to avoid the race if kmalloc
1095                  * might sleep.
1096                  */
1097                 atomic_add(size, &sk->sk_omem_alloc);
1098                 mem = kmalloc(size, priority);
1099                 if (mem)
1100                         return mem;
1101                 atomic_sub(size, &sk->sk_omem_alloc);
1102         }
1103         return NULL;
1104 }
1105
1106 /*
1107  * Free an option memory block.
1108  */
1109 void sock_kfree_s(struct sock *sk, void *mem, int size)
1110 {
1111         kfree(mem);
1112         atomic_sub(size, &sk->sk_omem_alloc);
1113 }
1114
1115 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
1116    I think, these locks should be removed for datagram sockets.
1117  */
1118 static long sock_wait_for_wmem(struct sock * sk, long timeo)
1119 {
1120         DEFINE_WAIT(wait);
1121
1122         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1123         for (;;) {
1124                 if (!timeo)
1125                         break;
1126                 if (signal_pending(current))
1127                         break;
1128                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1129                 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1130                 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1131                         break;
1132                 if (sk->sk_shutdown & SEND_SHUTDOWN)
1133                         break;
1134                 if (sk->sk_err)
1135                         break;
1136                 timeo = schedule_timeout(timeo);
1137         }
1138         finish_wait(sk->sk_sleep, &wait);
1139         return timeo;
1140 }
1141
1142
1143 /*
1144  *      Generic send/receive buffer handlers
1145  */
1146
1147 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1148                                             unsigned long header_len,
1149                                             unsigned long data_len,
1150                                             int noblock, int *errcode)
1151 {
1152         struct sk_buff *skb;
1153         gfp_t gfp_mask;
1154         long timeo;
1155         int err;
1156
1157         gfp_mask = sk->sk_allocation;
1158         if (gfp_mask & __GFP_WAIT)
1159                 gfp_mask |= __GFP_REPEAT;
1160
1161         timeo = sock_sndtimeo(sk, noblock);
1162         while (1) {
1163                 err = sock_error(sk);
1164                 if (err != 0)
1165                         goto failure;
1166
1167                 err = -EPIPE;
1168                 if (sk->sk_shutdown & SEND_SHUTDOWN)
1169                         goto failure;
1170
1171                 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1172                         skb = alloc_skb(header_len, gfp_mask);
1173                         if (skb) {
1174                                 int npages;
1175                                 int i;
1176
1177                                 /* No pages, we're done... */
1178                                 if (!data_len)
1179                                         break;
1180
1181                                 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1182                                 skb->truesize += data_len;
1183                                 skb_shinfo(skb)->nr_frags = npages;
1184                                 for (i = 0; i < npages; i++) {
1185                                         struct page *page;
1186                                         skb_frag_t *frag;
1187
1188                                         page = alloc_pages(sk->sk_allocation, 0);
1189                                         if (!page) {
1190                                                 err = -ENOBUFS;
1191                                                 skb_shinfo(skb)->nr_frags = i;
1192                                                 kfree_skb(skb);
1193                                                 goto failure;
1194                                         }
1195
1196                                         frag = &skb_shinfo(skb)->frags[i];
1197                                         frag->page = page;
1198                                         frag->page_offset = 0;
1199                                         frag->size = (data_len >= PAGE_SIZE ?
1200                                                       PAGE_SIZE :
1201                                                       data_len);
1202                                         data_len -= PAGE_SIZE;
1203                                 }
1204
1205                                 /* Full success... */
1206                                 break;
1207                         }
1208                         err = -ENOBUFS;
1209                         goto failure;
1210                 }
1211                 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1212                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1213                 err = -EAGAIN;
1214                 if (!timeo)
1215                         goto failure;
1216                 if (signal_pending(current))
1217                         goto interrupted;
1218                 timeo = sock_wait_for_wmem(sk, timeo);
1219         }
1220
1221         skb_set_owner_w(skb, sk);
1222         return skb;
1223
1224 interrupted:
1225         err = sock_intr_errno(timeo);
1226 failure:
1227         *errcode = err;
1228         return NULL;
1229 }
1230
1231 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 
1232                                     int noblock, int *errcode)
1233 {
1234         return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
1235 }
1236
1237 static void __lock_sock(struct sock *sk)
1238 {
1239         DEFINE_WAIT(wait);
1240
1241         for(;;) {
1242                 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1243                                         TASK_UNINTERRUPTIBLE);
1244                 spin_unlock_bh(&sk->sk_lock.slock);
1245                 schedule();
1246                 spin_lock_bh(&sk->sk_lock.slock);
1247                 if(!sock_owned_by_user(sk))
1248                         break;
1249         }
1250         finish_wait(&sk->sk_lock.wq, &wait);
1251 }
1252
1253 static void __release_sock(struct sock *sk)
1254 {
1255         struct sk_buff *skb = sk->sk_backlog.head;
1256
1257         do {
1258                 sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
1259                 bh_unlock_sock(sk);
1260
1261                 do {
1262                         struct sk_buff *next = skb->next;
1263
1264                         skb->next = NULL;
1265                         sk->sk_backlog_rcv(sk, skb);
1266
1267                         /*
1268                          * We are in process context here with softirqs
1269                          * disabled, use cond_resched_softirq() to preempt.
1270                          * This is safe to do because we've taken the backlog
1271                          * queue private:
1272                          */
1273                         cond_resched_softirq();
1274
1275                         skb = next;
1276                 } while (skb != NULL);
1277
1278                 bh_lock_sock(sk);
1279         } while((skb = sk->sk_backlog.head) != NULL);
1280 }
1281
1282 /**
1283  * sk_wait_data - wait for data to arrive at sk_receive_queue
1284  * @sk:    sock to wait on
1285  * @timeo: for how long
1286  *
1287  * Now socket state including sk->sk_err is changed only under lock,
1288  * hence we may omit checks after joining wait queue.
1289  * We check receive queue before schedule() only as optimization;
1290  * it is very likely that release_sock() added new data.
1291  */
1292 int sk_wait_data(struct sock *sk, long *timeo)
1293 {
1294         int rc;
1295         DEFINE_WAIT(wait);
1296
1297         prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1298         set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1299         rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1300         clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1301         finish_wait(sk->sk_sleep, &wait);
1302         return rc;
1303 }
1304
1305 EXPORT_SYMBOL(sk_wait_data);
1306
1307 /*
1308  * Set of default routines for initialising struct proto_ops when
1309  * the protocol does not support a particular function. In certain
1310  * cases where it makes no sense for a protocol to have a "do nothing"
1311  * function, some default processing is provided.
1312  */
1313
1314 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
1315 {
1316         return -EOPNOTSUPP;
1317 }
1318
1319 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 
1320                     int len, int flags)
1321 {
1322         return -EOPNOTSUPP;
1323 }
1324
1325 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
1326 {
1327         return -EOPNOTSUPP;
1328 }
1329
1330 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
1331 {
1332         return -EOPNOTSUPP;
1333 }
1334
1335 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 
1336                     int *len, int peer)
1337 {
1338         return -EOPNOTSUPP;
1339 }
1340
1341 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
1342 {
1343         return 0;
1344 }
1345
1346 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1347 {
1348         return -EOPNOTSUPP;
1349 }
1350
1351 int sock_no_listen(struct socket *sock, int backlog)
1352 {
1353         return -EOPNOTSUPP;
1354 }
1355
1356 int sock_no_shutdown(struct socket *sock, int how)
1357 {
1358         return -EOPNOTSUPP;
1359 }
1360
1361 int sock_no_setsockopt(struct socket *sock, int level, int optname,
1362                     char __user *optval, int optlen)
1363 {
1364         return -EOPNOTSUPP;
1365 }
1366
1367 int sock_no_getsockopt(struct socket *sock, int level, int optname,
1368                     char __user *optval, int __user *optlen)
1369 {
1370         return -EOPNOTSUPP;
1371 }
1372
1373 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1374                     size_t len)
1375 {
1376         return -EOPNOTSUPP;
1377 }
1378
1379 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
1380                     size_t len, int flags)
1381 {
1382         return -EOPNOTSUPP;
1383 }
1384
1385 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1386 {
1387         /* Mirror missing mmap method error code */
1388         return -ENODEV;
1389 }
1390
1391 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
1392 {
1393         ssize_t res;
1394         struct msghdr msg = {.msg_flags = flags};
1395         struct kvec iov;
1396         char *kaddr = kmap(page);
1397         iov.iov_base = kaddr + offset;
1398         iov.iov_len = size;
1399         res = kernel_sendmsg(sock, &msg, &iov, 1, size);
1400         kunmap(page);
1401         return res;
1402 }
1403
1404 /*
1405  *      Default Socket Callbacks
1406  */
1407
1408 static void sock_def_wakeup(struct sock *sk)
1409 {
1410         read_lock(&sk->sk_callback_lock);
1411         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1412                 wake_up_interruptible_all(sk->sk_sleep);
1413         read_unlock(&sk->sk_callback_lock);
1414 }
1415
1416 static void sock_def_error_report(struct sock *sk)
1417 {
1418         read_lock(&sk->sk_callback_lock);
1419         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1420                 wake_up_interruptible(sk->sk_sleep);
1421         sk_wake_async(sk,0,POLL_ERR); 
1422         read_unlock(&sk->sk_callback_lock);
1423 }
1424
1425 static void sock_def_readable(struct sock *sk, int len)
1426 {
1427         read_lock(&sk->sk_callback_lock);
1428         if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1429                 wake_up_interruptible(sk->sk_sleep);
1430         sk_wake_async(sk,1,POLL_IN);
1431         read_unlock(&sk->sk_callback_lock);
1432 }
1433
1434 static void sock_def_write_space(struct sock *sk)
1435 {
1436         read_lock(&sk->sk_callback_lock);
1437
1438         /* Do not wake up a writer until he can make "significant"
1439          * progress.  --DaveM
1440          */
1441         if((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1442                 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1443                         wake_up_interruptible(sk->sk_sleep);
1444
1445                 /* Should agree with poll, otherwise some programs break */
1446                 if (sock_writeable(sk))
1447                         sk_wake_async(sk, 2, POLL_OUT);
1448         }
1449
1450         read_unlock(&sk->sk_callback_lock);
1451 }
1452
1453 static void sock_def_destruct(struct sock *sk)
1454 {
1455         kfree(sk->sk_protinfo);
1456 }
1457
1458 void sk_send_sigurg(struct sock *sk)
1459 {
1460         if (sk->sk_socket && sk->sk_socket->file)
1461                 if (send_sigurg(&sk->sk_socket->file->f_owner))
1462                         sk_wake_async(sk, 3, POLL_PRI);
1463 }
1464
1465 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1466                     unsigned long expires)
1467 {
1468         if (!mod_timer(timer, expires))
1469                 sock_hold(sk);
1470 }
1471
1472 EXPORT_SYMBOL(sk_reset_timer);
1473
1474 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
1475 {
1476         if (timer_pending(timer) && del_timer(timer))
1477                 __sock_put(sk);
1478 }
1479
1480 EXPORT_SYMBOL(sk_stop_timer);
1481
1482 void sock_init_data(struct socket *sock, struct sock *sk)
1483 {
1484         skb_queue_head_init(&sk->sk_receive_queue);
1485         skb_queue_head_init(&sk->sk_write_queue);
1486         skb_queue_head_init(&sk->sk_error_queue);
1487 #ifdef CONFIG_NET_DMA
1488         skb_queue_head_init(&sk->sk_async_wait_queue);
1489 #endif
1490
1491         sk->sk_send_head        =       NULL;
1492
1493         init_timer(&sk->sk_timer);
1494         
1495         sk->sk_allocation       =       GFP_KERNEL;
1496         sk->sk_rcvbuf           =       sysctl_rmem_default;
1497         sk->sk_sndbuf           =       sysctl_wmem_default;
1498         sk->sk_state            =       TCP_CLOSE;
1499         sk->sk_socket           =       sock;
1500
1501         sock_set_flag(sk, SOCK_ZAPPED);
1502
1503         if(sock)
1504         {
1505                 sk->sk_type     =       sock->type;
1506                 sk->sk_sleep    =       &sock->wait;
1507                 sock->sk        =       sk;
1508         } else
1509                 sk->sk_sleep    =       NULL;
1510
1511         rwlock_init(&sk->sk_dst_lock);
1512         rwlock_init(&sk->sk_callback_lock);
1513         lockdep_set_class(&sk->sk_callback_lock,
1514                            af_callback_keys + sk->sk_family);
1515
1516         sk->sk_state_change     =       sock_def_wakeup;
1517         sk->sk_data_ready       =       sock_def_readable;
1518         sk->sk_write_space      =       sock_def_write_space;
1519         sk->sk_error_report     =       sock_def_error_report;
1520         sk->sk_destruct         =       sock_def_destruct;
1521
1522         sk->sk_sndmsg_page      =       NULL;
1523         sk->sk_sndmsg_off       =       0;
1524
1525         sk->sk_peercred.pid     =       0;
1526         sk->sk_peercred.uid     =       -1;
1527         sk->sk_peercred.gid     =       -1;
1528         sk->sk_write_pending    =       0;
1529         sk->sk_rcvlowat         =       1;
1530         sk->sk_rcvtimeo         =       MAX_SCHEDULE_TIMEOUT;
1531         sk->sk_sndtimeo         =       MAX_SCHEDULE_TIMEOUT;
1532
1533         sk->sk_stamp.tv_sec     = -1L;
1534         sk->sk_stamp.tv_usec    = -1L;
1535
1536         set_vx_info(&sk->sk_vx_info, current->vx_info);
1537         sk->sk_xid = vx_current_xid();
1538         vx_sock_inc(sk);
1539         set_nx_info(&sk->sk_nx_info, current->nx_info);
1540         sk->sk_nid = nx_current_nid();
1541         atomic_set(&sk->sk_refcnt, 1);
1542 }
1543
1544 void fastcall lock_sock_nested(struct sock *sk, int subclass)
1545 {
1546         might_sleep();
1547         spin_lock_bh(&sk->sk_lock.slock);
1548         if (sk->sk_lock.owner)
1549                 __lock_sock(sk);
1550         sk->sk_lock.owner = (void *)1;
1551         spin_unlock(&sk->sk_lock.slock);
1552         /*
1553          * The sk_lock has mutex_lock() semantics here:
1554          */
1555         mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
1556         local_bh_enable();
1557 }
1558
1559 EXPORT_SYMBOL(lock_sock_nested);
1560
1561 void fastcall release_sock(struct sock *sk)
1562 {
1563         /*
1564          * The sk_lock has mutex_unlock() semantics:
1565          */
1566         mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
1567
1568         spin_lock_bh(&sk->sk_lock.slock);
1569         if (sk->sk_backlog.tail)
1570                 __release_sock(sk);
1571         sk->sk_lock.owner = NULL;
1572         if (waitqueue_active(&sk->sk_lock.wq))
1573                 wake_up(&sk->sk_lock.wq);
1574         spin_unlock_bh(&sk->sk_lock.slock);
1575 }
1576 EXPORT_SYMBOL(release_sock);
1577
1578 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
1579
1580         if (!sock_flag(sk, SOCK_TIMESTAMP))
1581                 sock_enable_timestamp(sk);
1582         if (sk->sk_stamp.tv_sec == -1) 
1583                 return -ENOENT;
1584         if (sk->sk_stamp.tv_sec == 0)
1585                 do_gettimeofday(&sk->sk_stamp);
1586         return copy_to_user(userstamp, &sk->sk_stamp, sizeof(struct timeval)) ?
1587                 -EFAULT : 0; 
1588
1589 EXPORT_SYMBOL(sock_get_timestamp);
1590
1591 void sock_enable_timestamp(struct sock *sk)
1592 {       
1593         if (!sock_flag(sk, SOCK_TIMESTAMP)) { 
1594                 sock_set_flag(sk, SOCK_TIMESTAMP);
1595                 net_enable_timestamp();
1596         }
1597 }
1598 EXPORT_SYMBOL(sock_enable_timestamp); 
1599
1600 /*
1601  *      Get a socket option on an socket.
1602  *
1603  *      FIX: POSIX 1003.1g is very ambiguous here. It states that
1604  *      asynchronous errors should be reported by getsockopt. We assume
1605  *      this means if you specify SO_ERROR (otherwise whats the point of it).
1606  */
1607 int sock_common_getsockopt(struct socket *sock, int level, int optname,
1608                            char __user *optval, int __user *optlen)
1609 {
1610         struct sock *sk = sock->sk;
1611
1612         return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1613 }
1614
1615 EXPORT_SYMBOL(sock_common_getsockopt);
1616
1617 #ifdef CONFIG_COMPAT
1618 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
1619                                   char __user *optval, int __user *optlen)
1620 {
1621         struct sock *sk = sock->sk;
1622
1623         if (sk->sk_prot->compat_getsockopt != NULL)
1624                 return sk->sk_prot->compat_getsockopt(sk, level, optname,
1625                                                       optval, optlen);
1626         return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
1627 }
1628 EXPORT_SYMBOL(compat_sock_common_getsockopt);
1629 #endif
1630
1631 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1632                         struct msghdr *msg, size_t size, int flags)
1633 {
1634         struct sock *sk = sock->sk;
1635         int addr_len = 0;
1636         int err;
1637
1638         err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
1639                                    flags & ~MSG_DONTWAIT, &addr_len);
1640         if (err >= 0)
1641                 msg->msg_namelen = addr_len;
1642         return err;
1643 }
1644
1645 EXPORT_SYMBOL(sock_common_recvmsg);
1646
1647 /*
1648  *      Set socket options on an inet socket.
1649  */
1650 int sock_common_setsockopt(struct socket *sock, int level, int optname,
1651                            char __user *optval, int optlen)
1652 {
1653         struct sock *sk = sock->sk;
1654
1655         return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1656 }
1657
1658 EXPORT_SYMBOL(sock_common_setsockopt);
1659
1660 #ifdef CONFIG_COMPAT
1661 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
1662                                   char __user *optval, int optlen)
1663 {
1664         struct sock *sk = sock->sk;
1665
1666         if (sk->sk_prot->compat_setsockopt != NULL)
1667                 return sk->sk_prot->compat_setsockopt(sk, level, optname,
1668                                                       optval, optlen);
1669         return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
1670 }
1671 EXPORT_SYMBOL(compat_sock_common_setsockopt);
1672 #endif
1673
1674 void sk_common_release(struct sock *sk)
1675 {
1676         if (sk->sk_prot->destroy)
1677                 sk->sk_prot->destroy(sk);
1678
1679         /*
1680          * Observation: when sock_common_release is called, processes have
1681          * no access to socket. But net still has.
1682          * Step one, detach it from networking:
1683          *
1684          * A. Remove from hash tables.
1685          */
1686
1687         sk->sk_prot->unhash(sk);
1688
1689         /*
1690          * In this point socket cannot receive new packets, but it is possible
1691          * that some packets are in flight because some CPU runs receiver and
1692          * did hash table lookup before we unhashed socket. They will achieve
1693          * receive queue and will be purged by socket destructor.
1694          *
1695          * Also we still have packets pending on receive queue and probably,
1696          * our own packets waiting in device queues. sock_destroy will drain
1697          * receive queue, but transmitted packets will delay socket destruction
1698          * until the last reference will be released.
1699          */
1700
1701         sock_orphan(sk);
1702
1703         xfrm_sk_free_policy(sk);
1704
1705         sk_refcnt_debug_release(sk);
1706         sock_put(sk);
1707 }
1708
1709 EXPORT_SYMBOL(sk_common_release);
1710
1711 static DEFINE_RWLOCK(proto_list_lock);
1712 static LIST_HEAD(proto_list);
1713
1714 int proto_register(struct proto *prot, int alloc_slab)
1715 {
1716         char *request_sock_slab_name = NULL;
1717         char *timewait_sock_slab_name;
1718         int rc = -ENOBUFS;
1719
1720         if (alloc_slab) {
1721                 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
1722                                                SLAB_HWCACHE_ALIGN, NULL, NULL);
1723
1724                 if (prot->slab == NULL) {
1725                         printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
1726                                prot->name);
1727                         goto out;
1728                 }
1729
1730                 if (prot->rsk_prot != NULL) {
1731                         static const char mask[] = "request_sock_%s";
1732
1733                         request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1734                         if (request_sock_slab_name == NULL)
1735                                 goto out_free_sock_slab;
1736
1737                         sprintf(request_sock_slab_name, mask, prot->name);
1738                         prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name,
1739                                                                  prot->rsk_prot->obj_size, 0,
1740                                                                  SLAB_HWCACHE_ALIGN, NULL, NULL);
1741
1742                         if (prot->rsk_prot->slab == NULL) {
1743                                 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
1744                                        prot->name);
1745                                 goto out_free_request_sock_slab_name;
1746                         }
1747                 }
1748
1749                 if (prot->twsk_prot != NULL) {
1750                         static const char mask[] = "tw_sock_%s";
1751
1752                         timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
1753
1754                         if (timewait_sock_slab_name == NULL)
1755                                 goto out_free_request_sock_slab;
1756
1757                         sprintf(timewait_sock_slab_name, mask, prot->name);
1758                         prot->twsk_prot->twsk_slab =
1759                                 kmem_cache_create(timewait_sock_slab_name,
1760                                                   prot->twsk_prot->twsk_obj_size,
1761                                                   0, SLAB_HWCACHE_ALIGN,
1762                                                   NULL, NULL);
1763                         if (prot->twsk_prot->twsk_slab == NULL)
1764                                 goto out_free_timewait_sock_slab_name;
1765                 }
1766         }
1767
1768         write_lock(&proto_list_lock);
1769         list_add(&prot->node, &proto_list);
1770         write_unlock(&proto_list_lock);
1771         rc = 0;
1772 out:
1773         return rc;
1774 out_free_timewait_sock_slab_name:
1775         kfree(timewait_sock_slab_name);
1776 out_free_request_sock_slab:
1777         if (prot->rsk_prot && prot->rsk_prot->slab) {
1778                 kmem_cache_destroy(prot->rsk_prot->slab);
1779                 prot->rsk_prot->slab = NULL;
1780         }
1781 out_free_request_sock_slab_name:
1782         kfree(request_sock_slab_name);
1783 out_free_sock_slab:
1784         kmem_cache_destroy(prot->slab);
1785         prot->slab = NULL;
1786         goto out;
1787 }
1788
1789 EXPORT_SYMBOL(proto_register);
1790
1791 void proto_unregister(struct proto *prot)
1792 {
1793         write_lock(&proto_list_lock);
1794         list_del(&prot->node);
1795         write_unlock(&proto_list_lock);
1796
1797         if (prot->slab != NULL) {
1798                 kmem_cache_destroy(prot->slab);
1799                 prot->slab = NULL;
1800         }
1801
1802         if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
1803                 const char *name = kmem_cache_name(prot->rsk_prot->slab);
1804
1805                 kmem_cache_destroy(prot->rsk_prot->slab);
1806                 kfree(name);
1807                 prot->rsk_prot->slab = NULL;
1808         }
1809
1810         if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
1811                 const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab);
1812
1813                 kmem_cache_destroy(prot->twsk_prot->twsk_slab);
1814                 kfree(name);
1815                 prot->twsk_prot->twsk_slab = NULL;
1816         }
1817 }
1818
1819 EXPORT_SYMBOL(proto_unregister);
1820
1821 #ifdef CONFIG_PROC_FS
1822 static inline struct proto *__proto_head(void)
1823 {
1824         return list_entry(proto_list.next, struct proto, node);
1825 }
1826
1827 static inline struct proto *proto_head(void)
1828 {
1829         return list_empty(&proto_list) ? NULL : __proto_head();
1830 }
1831
1832 static inline struct proto *proto_next(struct proto *proto)
1833 {
1834         return proto->node.next == &proto_list ? NULL :
1835                 list_entry(proto->node.next, struct proto, node);
1836 }
1837
1838 static inline struct proto *proto_get_idx(loff_t pos)
1839 {
1840         struct proto *proto;
1841         loff_t i = 0;
1842
1843         list_for_each_entry(proto, &proto_list, node)
1844                 if (i++ == pos)
1845                         goto out;
1846
1847         proto = NULL;
1848 out:
1849         return proto;
1850 }
1851
1852 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
1853 {
1854         read_lock(&proto_list_lock);
1855         return *pos ? proto_get_idx(*pos - 1) : SEQ_START_TOKEN;
1856 }
1857
1858 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1859 {
1860         ++*pos;
1861         return v == SEQ_START_TOKEN ? proto_head() : proto_next(v);
1862 }
1863
1864 static void proto_seq_stop(struct seq_file *seq, void *v)
1865 {
1866         read_unlock(&proto_list_lock);
1867 }
1868
1869 static char proto_method_implemented(const void *method)
1870 {
1871         return method == NULL ? 'n' : 'y';
1872 }
1873
1874 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
1875 {
1876         seq_printf(seq, "%-9s %4u %6d  %6d   %-3s %6u   %-3s  %-10s "
1877                         "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
1878                    proto->name,
1879                    proto->obj_size,
1880                    proto->sockets_allocated != NULL ? atomic_read(proto->sockets_allocated) : -1,
1881                    proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
1882                    proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
1883                    proto->max_header,
1884                    proto->slab == NULL ? "no" : "yes",
1885                    module_name(proto->owner),
1886                    proto_method_implemented(proto->close),
1887                    proto_method_implemented(proto->connect),
1888                    proto_method_implemented(proto->disconnect),
1889                    proto_method_implemented(proto->accept),
1890                    proto_method_implemented(proto->ioctl),
1891                    proto_method_implemented(proto->init),
1892                    proto_method_implemented(proto->destroy),
1893                    proto_method_implemented(proto->shutdown),
1894                    proto_method_implemented(proto->setsockopt),
1895                    proto_method_implemented(proto->getsockopt),
1896                    proto_method_implemented(proto->sendmsg),
1897                    proto_method_implemented(proto->recvmsg),
1898                    proto_method_implemented(proto->sendpage),
1899                    proto_method_implemented(proto->bind),
1900                    proto_method_implemented(proto->backlog_rcv),
1901                    proto_method_implemented(proto->hash),
1902                    proto_method_implemented(proto->unhash),
1903                    proto_method_implemented(proto->get_port),
1904                    proto_method_implemented(proto->enter_memory_pressure));
1905 }
1906
1907 static int proto_seq_show(struct seq_file *seq, void *v)
1908 {
1909         if (v == SEQ_START_TOKEN)
1910                 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
1911                            "protocol",
1912                            "size",
1913                            "sockets",
1914                            "memory",
1915                            "press",
1916                            "maxhdr",
1917                            "slab",
1918                            "module",
1919                            "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
1920         else
1921                 proto_seq_printf(seq, v);
1922         return 0;
1923 }
1924
1925 static struct seq_operations proto_seq_ops = {
1926         .start  = proto_seq_start,
1927         .next   = proto_seq_next,
1928         .stop   = proto_seq_stop,
1929         .show   = proto_seq_show,
1930 };
1931
1932 static int proto_seq_open(struct inode *inode, struct file *file)
1933 {
1934         return seq_open(file, &proto_seq_ops);
1935 }
1936
1937 static struct file_operations proto_seq_fops = {
1938         .owner          = THIS_MODULE,
1939         .open           = proto_seq_open,
1940         .read           = seq_read,
1941         .llseek         = seq_lseek,
1942         .release        = seq_release,
1943 };
1944
1945 static int __init proto_init(void)
1946 {
1947         /* register /proc/net/protocols */
1948         return proc_net_fops_create("protocols", S_IRUGO, &proto_seq_fops) == NULL ? -ENOBUFS : 0;
1949 }
1950
1951 subsys_initcall(proto_init);
1952
1953 #endif /* PROC_FS */
1954
1955 EXPORT_SYMBOL(sk_alloc);
1956 EXPORT_SYMBOL(sk_free);
1957 EXPORT_SYMBOL(sk_send_sigurg);
1958 EXPORT_SYMBOL(sock_alloc_send_skb);
1959 EXPORT_SYMBOL(sock_init_data);
1960 EXPORT_SYMBOL(sock_kfree_s);
1961 EXPORT_SYMBOL(sock_kmalloc);
1962 EXPORT_SYMBOL(sock_no_accept);
1963 EXPORT_SYMBOL(sock_no_bind);
1964 EXPORT_SYMBOL(sock_no_connect);
1965 EXPORT_SYMBOL(sock_no_getname);
1966 EXPORT_SYMBOL(sock_no_getsockopt);
1967 EXPORT_SYMBOL(sock_no_ioctl);
1968 EXPORT_SYMBOL(sock_no_listen);
1969 EXPORT_SYMBOL(sock_no_mmap);
1970 EXPORT_SYMBOL(sock_no_poll);
1971 EXPORT_SYMBOL(sock_no_recvmsg);
1972 EXPORT_SYMBOL(sock_no_sendmsg);
1973 EXPORT_SYMBOL(sock_no_sendpage);
1974 EXPORT_SYMBOL(sock_no_setsockopt);
1975 EXPORT_SYMBOL(sock_no_shutdown);
1976 EXPORT_SYMBOL(sock_no_socketpair);
1977 EXPORT_SYMBOL(sock_rfree);
1978 EXPORT_SYMBOL(sock_setsockopt);
1979 EXPORT_SYMBOL(sock_wfree);
1980 EXPORT_SYMBOL(sock_wmalloc);
1981 EXPORT_SYMBOL(sock_i_uid);
1982 EXPORT_SYMBOL(sock_i_ino);
1983 EXPORT_SYMBOL(sysctl_optmem_max);
1984 #ifdef CONFIG_SYSCTL
1985 EXPORT_SYMBOL(sysctl_rmem_max);
1986 EXPORT_SYMBOL(sysctl_wmem_max);
1987 #endif