2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan.cox@linux.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
14 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support.
16 * Carsten Paeth : PF_UNIX check, address fixes.
17 * Alan Cox : Limit size of allocated blocks.
18 * Alan Cox : Fixed the stupid socketpair bug.
19 * Alan Cox : BSD compatibility fine tuning.
20 * Alan Cox : Fixed a bug in connect when interrupted.
21 * Alan Cox : Sorted out a proper draft version of
22 * file descriptor passing hacked up from
24 * Marty Leisner : Fixes to fd passing
25 * Nick Nevin : recvmsg bugfix.
26 * Alan Cox : Started proper garbage collector
27 * Heiko EiBfeldt : Missing verify_area check
28 * Alan Cox : Started POSIXisms
29 * Andreas Schwab : Replace inode by dentry for proper
31 * Kirk Petersen : Made this a module
32 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
34 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
35 * by above two patches.
36 * Andrea Arcangeli : If possible we block in connect(2)
37 * if the max backlog of the listen socket
38 * is been reached. This won't break
39 * old apps and it will avoid huge amount
40 * of socks hashed (this for unix_gc()
41 * performances reasons).
42 * Security fix that limits the max
43 * number of socks to 2*max_files and
44 * the number of skb queueable in the
46 * Artur Skawina : Hash function optimizations
47 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
48 * Malcolm Beattie : Set peercred for socketpair
49 * Michal Ostrowski : Module initialization cleanup.
50 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
51 * the core infrastructure is doing that
52 * for all net proto families now (2.5.69+)
55 * Known differences from reference BSD that was tested:
58 * ECONNREFUSED is not returned from one end of a connected() socket to the
59 * other the moment one end closes.
60 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
61 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
63 * accept() returns a path name even if the connecting socket has closed
64 * in the meantime (BSD loses the path and gives up).
65 * accept() returns 0 length path for an unbound connector. BSD returns 16
66 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
68 * BSD af_unix apparently has connect forgetting to block properly.
69 * (need to check this with the POSIX spec in detail)
71 * Differences from 2.0.0-11-... (ANK)
72 * Bug fixes and improvements.
73 * - client shutdown killed server socket.
74 * - removed all useless cli/sti pairs.
76 * Semantic changes/extensions.
77 * - generic control message passing.
78 * - SCM_CREDENTIALS control message.
79 * - "Abstract" (not FS based) socket bindings.
80 * Abstract names are sequences of bytes (not zero terminated)
81 * started by 0, so that this name space does not intersect
85 #include <linux/module.h>
86 #include <linux/config.h>
87 #include <linux/kernel.h>
88 #include <linux/major.h>
89 #include <linux/signal.h>
90 #include <linux/sched.h>
91 #include <linux/errno.h>
92 #include <linux/string.h>
93 #include <linux/stat.h>
94 #include <linux/dcache.h>
95 #include <linux/namei.h>
96 #include <linux/socket.h>
98 #include <linux/fcntl.h>
99 #include <linux/termios.h>
100 #include <linux/sockios.h>
101 #include <linux/net.h>
102 #include <linux/in.h>
103 #include <linux/fs.h>
104 #include <linux/slab.h>
105 #include <asm/uaccess.h>
106 #include <linux/skbuff.h>
107 #include <linux/netdevice.h>
108 #include <net/sock.h>
109 #include <linux/tcp.h>
110 #include <net/af_unix.h>
111 #include <linux/proc_fs.h>
112 #include <linux/seq_file.h>
114 #include <linux/init.h>
115 #include <linux/poll.h>
116 #include <linux/smp_lock.h>
117 #include <linux/rtnetlink.h>
118 #include <linux/mount.h>
119 #include <net/checksum.h>
120 #include <linux/security.h>
122 int sysctl_unix_max_dgram_qlen = 10;
124 kmem_cache_t *unix_sk_cachep;
126 struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
127 rwlock_t unix_table_lock = RW_LOCK_UNLOCKED;
128 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
130 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
132 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
135 * SMP locking strategy:
136 * hash table is protected with rwlock unix_table_lock
137 * each socket state is protected by separate rwlock.
140 static inline unsigned unix_hash_fold(unsigned hash)
144 return hash&(UNIX_HASH_SIZE-1);
147 #define unix_peer(sk) ((sk)->sk_pair)
149 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
151 return unix_peer(osk) == sk;
154 static inline int unix_may_send(struct sock *sk, struct sock *osk)
156 return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
159 static struct sock *unix_peer_get(struct sock *s)
167 unix_state_runlock(s);
171 static inline void unix_release_addr(struct unix_address *addr)
173 if (atomic_dec_and_test(&addr->refcnt))
178 * Check unix socket name:
179 * - should be not zero length.
180 * - if started by not zero, should be NULL terminated (FS object)
181 * - if started by zero, it is abstract name.
184 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
186 if (len <= sizeof(short) || len > sizeof(*sunaddr))
188 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
190 if (sunaddr->sun_path[0])
193 * This may look like an off by one error but it is
194 * a bit more subtle. 108 is the longest valid AF_UNIX
195 * path for a binding. sun_path[108] doesn't as such
196 * exist. However in kernel space we are guaranteed that
197 * it is a valid memory location in our kernel
200 if (len > sizeof(*sunaddr))
201 len = sizeof(*sunaddr);
202 ((char *)sunaddr)[len]=0;
203 len = strlen(sunaddr->sun_path)+1+sizeof(short);
207 *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
211 static void __unix_remove_socket(struct sock *sk)
213 sk_del_node_init(sk);
216 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
218 BUG_TRAP(sk_unhashed(sk));
219 sk_add_node(sk, list);
222 static inline void unix_remove_socket(struct sock *sk)
224 write_lock(&unix_table_lock);
225 __unix_remove_socket(sk);
226 write_unlock(&unix_table_lock);
229 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
231 write_lock(&unix_table_lock);
232 __unix_insert_socket(list, sk);
233 write_unlock(&unix_table_lock);
236 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
237 int len, int type, unsigned hash)
240 struct hlist_node *node;
242 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
243 struct unix_sock *u = unix_sk(s);
245 if (u->addr->len == len &&
246 !memcmp(u->addr->name, sunname, len))
254 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
260 read_lock(&unix_table_lock);
261 s = __unix_find_socket_byname(sunname, len, type, hash);
264 read_unlock(&unix_table_lock);
268 static struct sock *unix_find_socket_byinode(struct inode *i)
271 struct hlist_node *node;
273 read_lock(&unix_table_lock);
275 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
276 struct dentry *dentry = unix_sk(s)->dentry;
278 if(dentry && dentry->d_inode == i)
286 read_unlock(&unix_table_lock);
290 static inline int unix_writable(struct sock *sk)
292 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
295 static void unix_write_space(struct sock *sk)
297 read_lock(&sk->sk_callback_lock);
298 if (unix_writable(sk)) {
299 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
300 wake_up_interruptible(sk->sk_sleep);
301 sk_wake_async(sk, 2, POLL_OUT);
303 read_unlock(&sk->sk_callback_lock);
306 /* When dgram socket disconnects (or changes its peer), we clear its receive
307 * queue of packets arrived from previous peer. First, it allows to do
308 * flow control based only on wmem_alloc; second, sk connected to peer
309 * may receive messages only from that peer. */
310 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
312 if (skb_queue_len(&sk->sk_receive_queue)) {
313 skb_queue_purge(&sk->sk_receive_queue);
314 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
316 /* If one link of bidirectional dgram pipe is disconnected,
317 * we signal error. Messages are lost. Do not make this,
318 * when peer was not connected to us.
320 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
321 other->sk_err = ECONNRESET;
322 other->sk_error_report(other);
327 static void unix_sock_destructor(struct sock *sk)
329 struct unix_sock *u = unix_sk(sk);
331 skb_queue_purge(&sk->sk_receive_queue);
333 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
334 BUG_TRAP(sk_unhashed(sk));
335 BUG_TRAP(!sk->sk_socket);
336 if (!sock_flag(sk, SOCK_DEAD)) {
337 printk("Attempt to release alive unix socket: %p\n", sk);
342 unix_release_addr(u->addr);
344 atomic_dec(&unix_nr_socks);
345 #ifdef UNIX_REFCNT_DEBUG
346 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
350 static int unix_release_sock (struct sock *sk, int embrion)
352 struct unix_sock *u = unix_sk(sk);
353 struct dentry *dentry;
354 struct vfsmount *mnt;
359 unix_remove_socket(sk);
362 unix_state_wlock(sk);
364 sk->sk_shutdown = SHUTDOWN_MASK;
369 state = sk->sk_state;
370 sk->sk_state = TCP_CLOSE;
371 unix_state_wunlock(sk);
373 wake_up_interruptible_all(&u->peer_wait);
375 skpair=unix_peer(sk);
378 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
379 unix_state_wlock(skpair);
381 skpair->sk_shutdown = SHUTDOWN_MASK;
382 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
383 skpair->sk_err = ECONNRESET;
384 unix_state_wunlock(skpair);
385 skpair->sk_state_change(skpair);
386 read_lock(&skpair->sk_callback_lock);
387 sk_wake_async(skpair,1,POLL_HUP);
388 read_unlock(&skpair->sk_callback_lock);
390 sock_put(skpair); /* It may now die */
391 unix_peer(sk) = NULL;
394 /* Try to flush out this socket. Throw out buffers at least */
396 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
397 if (state==TCP_LISTEN)
398 unix_release_sock(skb->sk, 1);
399 /* passed fds are erased in the kfree_skb hook */
410 /* ---- Socket is dead now and most probably destroyed ---- */
413 * Fixme: BSD difference: In BSD all sockets connected to use get
414 * ECONNRESET and we die on the spot. In Linux we behave
415 * like files and pipes do and wait for the last
418 * Can't we simply set sock->err?
420 * What the above comment does talk about? --ANK(980817)
423 if (atomic_read(&unix_tot_inflight))
424 unix_gc(); /* Garbage collect fds */
429 static int unix_listen(struct socket *sock, int backlog)
432 struct sock *sk = sock->sk;
433 struct unix_sock *u = unix_sk(sk);
436 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
437 goto out; /* Only stream/seqpacket sockets accept */
440 goto out; /* No listens on an unbound socket */
441 unix_state_wlock(sk);
442 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
444 if (backlog > sk->sk_max_ack_backlog)
445 wake_up_interruptible_all(&u->peer_wait);
446 sk->sk_max_ack_backlog = backlog;
447 sk->sk_state = TCP_LISTEN;
448 /* set credentials so connect can copy them */
449 sk->sk_peercred.pid = current->tgid;
450 sk->sk_peercred.uid = current->euid;
451 sk->sk_peercred.gid = current->egid;
455 unix_state_wunlock(sk);
460 static int unix_release(struct socket *);
461 static int unix_bind(struct socket *, struct sockaddr *, int);
462 static int unix_stream_connect(struct socket *, struct sockaddr *,
463 int addr_len, int flags);
464 static int unix_socketpair(struct socket *, struct socket *);
465 static int unix_accept(struct socket *, struct socket *, int);
466 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
467 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
468 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
469 static int unix_shutdown(struct socket *, int);
470 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
471 struct msghdr *, size_t);
472 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
473 struct msghdr *, size_t, int);
474 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
475 struct msghdr *, size_t);
476 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
477 struct msghdr *, size_t, int);
478 static int unix_dgram_connect(struct socket *, struct sockaddr *,
481 static struct proto_ops unix_stream_ops = {
483 .owner = THIS_MODULE,
484 .release = unix_release,
486 .connect = unix_stream_connect,
487 .socketpair = unix_socketpair,
488 .accept = unix_accept,
489 .getname = unix_getname,
492 .listen = unix_listen,
493 .shutdown = unix_shutdown,
494 .setsockopt = sock_no_setsockopt,
495 .getsockopt = sock_no_getsockopt,
496 .sendmsg = unix_stream_sendmsg,
497 .recvmsg = unix_stream_recvmsg,
498 .mmap = sock_no_mmap,
499 .sendpage = sock_no_sendpage,
502 static struct proto_ops unix_dgram_ops = {
504 .owner = THIS_MODULE,
505 .release = unix_release,
507 .connect = unix_dgram_connect,
508 .socketpair = unix_socketpair,
509 .accept = sock_no_accept,
510 .getname = unix_getname,
511 .poll = datagram_poll,
513 .listen = sock_no_listen,
514 .shutdown = unix_shutdown,
515 .setsockopt = sock_no_setsockopt,
516 .getsockopt = sock_no_getsockopt,
517 .sendmsg = unix_dgram_sendmsg,
518 .recvmsg = unix_dgram_recvmsg,
519 .mmap = sock_no_mmap,
520 .sendpage = sock_no_sendpage,
523 static struct proto_ops unix_seqpacket_ops = {
525 .owner = THIS_MODULE,
526 .release = unix_release,
528 .connect = unix_stream_connect,
529 .socketpair = unix_socketpair,
530 .accept = unix_accept,
531 .getname = unix_getname,
532 .poll = datagram_poll,
534 .listen = unix_listen,
535 .shutdown = unix_shutdown,
536 .setsockopt = sock_no_setsockopt,
537 .getsockopt = sock_no_getsockopt,
538 .sendmsg = unix_dgram_sendmsg,
539 .recvmsg = unix_dgram_recvmsg,
540 .mmap = sock_no_mmap,
541 .sendpage = sock_no_sendpage,
544 static struct sock * unix_create1(struct socket *sock)
546 struct sock *sk = NULL;
549 if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files)
552 sk = sk_alloc(PF_UNIX, GFP_KERNEL, sizeof(struct unix_sock),
557 atomic_inc(&unix_nr_socks);
559 sock_init_data(sock,sk);
560 sk_set_owner(sk, THIS_MODULE);
562 sk->sk_write_space = unix_write_space;
563 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
564 sk->sk_destruct = unix_sock_destructor;
568 rwlock_init(&u->lock);
569 atomic_set(&u->inflight, sock ? 0 : -1);
570 init_MUTEX(&u->readsem); /* single task reading lock */
571 init_waitqueue_head(&u->peer_wait);
572 unix_insert_socket(unix_sockets_unbound, sk);
577 static int unix_create(struct socket *sock, int protocol)
579 if (protocol && protocol != PF_UNIX)
580 return -EPROTONOSUPPORT;
582 sock->state = SS_UNCONNECTED;
584 switch (sock->type) {
586 sock->ops = &unix_stream_ops;
589 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
593 sock->type=SOCK_DGRAM;
595 sock->ops = &unix_dgram_ops;
598 sock->ops = &unix_seqpacket_ops;
601 return -ESOCKTNOSUPPORT;
604 return unix_create1(sock) ? 0 : -ENOMEM;
607 static int unix_release(struct socket *sock)
609 struct sock *sk = sock->sk;
616 return unix_release_sock (sk, 0);
619 static int unix_autobind(struct socket *sock)
621 struct sock *sk = sock->sk;
622 struct unix_sock *u = unix_sk(sk);
623 static u32 ordernum = 1;
624 struct unix_address * addr;
634 addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
638 memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
639 addr->name->sun_family = AF_UNIX;
640 atomic_set(&addr->refcnt, 1);
643 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
644 addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
646 write_lock(&unix_table_lock);
647 ordernum = (ordernum+1)&0xFFFFF;
649 if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
651 write_unlock(&unix_table_lock);
652 /* Sanity yield. It is unusual case, but yet... */
653 if (!(ordernum&0xFF))
657 addr->hash ^= sk->sk_type;
659 __unix_remove_socket(sk);
661 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
662 write_unlock(&unix_table_lock);
665 out: up(&u->readsem);
669 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
670 int type, unsigned hash, int *error)
676 if (sunname->sun_path[0]) {
677 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
680 err = permission(nd.dentry->d_inode,MAY_WRITE, &nd);
685 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
687 u=unix_find_socket_byinode(nd.dentry->d_inode);
691 if (u->sk_type == type)
692 touch_atime(nd.mnt, nd.dentry);
697 if (u->sk_type != type) {
703 u=unix_find_socket_byname(sunname, len, type, hash);
705 struct dentry *dentry;
706 dentry = unix_sk(u)->dentry;
708 touch_atime(unix_sk(u)->mnt, dentry);
722 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
724 struct sock *sk = sock->sk;
725 struct unix_sock *u = unix_sk(sk);
726 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
727 struct dentry * dentry = NULL;
731 struct unix_address *addr;
732 struct hlist_head *list;
735 if (sunaddr->sun_family != AF_UNIX)
738 if (addr_len==sizeof(short)) {
739 err = unix_autobind(sock);
743 err = unix_mkname(sunaddr, addr_len, &hash);
755 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
759 memcpy(addr->name, sunaddr, addr_len);
760 addr->len = addr_len;
761 addr->hash = hash ^ sk->sk_type;
762 atomic_set(&addr->refcnt, 1);
764 if (sunaddr->sun_path[0]) {
768 * Get the parent directory, calculate the hash for last
771 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
773 goto out_mknod_parent;
775 * Yucky last component or no last component at all?
776 * (foo/., foo/.., /////)
779 if (nd.last_type != LAST_NORM)
782 * Lock the directory.
784 down(&nd.dentry->d_inode->i_sem);
786 * Do the final lookup.
788 dentry = lookup_hash(&nd.last, nd.dentry);
789 err = PTR_ERR(dentry);
791 goto out_mknod_unlock;
794 * Special case - lookup gave negative, but... we had foo/bar/
795 * From the vfs_mknod() POV we just have a negative dentry -
796 * all is fine. Let's be bastards - you had / on the end, you've
797 * been asking for (non-existent) directory. -ENOENT for you.
799 if (nd.last.name[nd.last.len] && !dentry->d_inode)
802 * All right, let's create it.
805 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
806 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
809 up(&nd.dentry->d_inode->i_sem);
813 addr->hash = UNIX_HASH_SIZE;
816 write_lock(&unix_table_lock);
818 if (!sunaddr->sun_path[0]) {
820 if (__unix_find_socket_byname(sunaddr, addr_len,
821 sk->sk_type, hash)) {
822 unix_release_addr(addr);
826 list = &unix_socket_table[addr->hash];
828 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
829 u->dentry = nd.dentry;
834 __unix_remove_socket(sk);
836 __unix_insert_socket(list, sk);
839 write_unlock(&unix_table_lock);
848 up(&nd.dentry->d_inode->i_sem);
854 unix_release_addr(addr);
858 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
861 struct sock *sk = sock->sk;
862 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
867 if (addr->sa_family != AF_UNSPEC) {
868 err = unix_mkname(sunaddr, alen, &hash);
873 if (sock->passcred && !unix_sk(sk)->addr &&
874 (err = unix_autobind(sock)) != 0)
877 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
881 unix_state_wlock(sk);
884 if (!unix_may_send(sk, other))
887 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
893 * 1003.1g breaking connected state with AF_UNSPEC
896 unix_state_wlock(sk);
900 * If it was connected, reconnect.
903 struct sock *old_peer = unix_peer(sk);
905 unix_state_wunlock(sk);
907 if (other != old_peer)
908 unix_dgram_disconnected(sk, old_peer);
912 unix_state_wunlock(sk);
917 unix_state_wunlock(sk);
923 static long unix_wait_for_peer(struct sock *other, long timeo)
925 struct unix_sock *u = unix_sk(other);
929 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
931 sched = !sock_flag(other, SOCK_DEAD) &&
932 !(other->sk_shutdown & RCV_SHUTDOWN) &&
933 (skb_queue_len(&other->sk_receive_queue) >
934 other->sk_max_ack_backlog);
936 unix_state_runlock(other);
939 timeo = schedule_timeout(timeo);
941 finish_wait(&u->peer_wait, &wait);
945 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
946 int addr_len, int flags)
948 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
949 struct sock *sk = sock->sk;
950 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
951 struct sock *newsk = NULL;
952 struct sock *other = NULL;
953 struct sk_buff *skb = NULL;
959 err = unix_mkname(sunaddr, addr_len, &hash);
964 if (sock->passcred && !u->addr && (err = unix_autobind(sock)) != 0)
967 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
969 /* First of all allocate resources.
970 If we will make it after state is locked,
971 we will have to recheck all again in any case.
976 /* create new sock for complete connection */
977 newsk = unix_create1(NULL);
981 /* Allocate skb for sending to listening sock */
982 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
987 /* Find listening sock. */
988 other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
992 /* Latch state of peer */
993 unix_state_rlock(other);
995 /* Apparently VFS overslept socket death. Retry. */
996 if (sock_flag(other, SOCK_DEAD)) {
997 unix_state_runlock(other);
1002 err = -ECONNREFUSED;
1003 if (other->sk_state != TCP_LISTEN)
1006 if (skb_queue_len(&other->sk_receive_queue) >
1007 other->sk_max_ack_backlog) {
1012 timeo = unix_wait_for_peer(other, timeo);
1014 err = sock_intr_errno(timeo);
1015 if (signal_pending(current))
1023 It is tricky place. We need to grab write lock and cannot
1024 drop lock on peer. It is dangerous because deadlock is
1025 possible. Connect to self case and simultaneous
1026 attempt to connect are eliminated by checking socket
1027 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1028 check this before attempt to grab lock.
1030 Well, and we have to recheck the state after socket locked.
1036 /* This is ok... continue with connect */
1038 case TCP_ESTABLISHED:
1039 /* Socket is already connected */
1047 unix_state_wlock(sk);
1049 if (sk->sk_state != st) {
1050 unix_state_wunlock(sk);
1051 unix_state_runlock(other);
1056 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1058 unix_state_wunlock(sk);
1062 /* The way is open! Fastly set all the necessary fields... */
1065 unix_peer(newsk) = sk;
1066 newsk->sk_state = TCP_ESTABLISHED;
1067 newsk->sk_type = sk->sk_type;
1068 newsk->sk_peercred.pid = current->tgid;
1069 newsk->sk_peercred.uid = current->euid;
1070 newsk->sk_peercred.gid = current->egid;
1071 newu = unix_sk(newsk);
1072 newsk->sk_sleep = &newu->peer_wait;
1073 otheru = unix_sk(other);
1075 /* copy address information from listening to new sock*/
1077 atomic_inc(&otheru->addr->refcnt);
1078 newu->addr = otheru->addr;
1080 if (otheru->dentry) {
1081 newu->dentry = dget(otheru->dentry);
1082 newu->mnt = mntget(otheru->mnt);
1085 /* Set credentials */
1086 sk->sk_peercred = other->sk_peercred;
1089 unix_peer(sk) = newsk;
1090 sock->state = SS_CONNECTED;
1091 sk->sk_state = TCP_ESTABLISHED;
1093 unix_state_wunlock(sk);
1095 /* take ten and and send info to listening sock */
1096 spin_lock(&other->sk_receive_queue.lock);
1097 __skb_queue_tail(&other->sk_receive_queue, skb);
1098 /* Undo artificially decreased inflight after embrion
1099 * is installed to listening socket. */
1100 atomic_inc(&newu->inflight);
1101 spin_unlock(&other->sk_receive_queue.lock);
1102 unix_state_runlock(other);
1103 other->sk_data_ready(other, 0);
1109 unix_state_runlock(other);
1115 unix_release_sock(newsk, 0);
1121 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1123 struct sock *ska=socka->sk, *skb = sockb->sk;
1125 /* Join our sockets back to back */
1130 ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1131 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1132 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1134 if (ska->sk_type != SOCK_DGRAM) {
1135 ska->sk_state = TCP_ESTABLISHED;
1136 skb->sk_state = TCP_ESTABLISHED;
1137 socka->state = SS_CONNECTED;
1138 sockb->state = SS_CONNECTED;
1143 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1145 struct sock *sk = sock->sk;
1147 struct sk_buff *skb;
1151 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1155 if (sk->sk_state != TCP_LISTEN)
1158 /* If socket state is TCP_LISTEN it cannot change (for now...),
1159 * so that no locks are necessary.
1162 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1164 /* This means receive shutdown. */
1171 skb_free_datagram(sk, skb);
1172 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1174 /* attach accepted sock to socket */
1175 unix_state_wlock(tsk);
1176 newsock->state = SS_CONNECTED;
1177 sock_graft(tsk, newsock);
1178 unix_state_wunlock(tsk);
1186 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1188 struct sock *sk = sock->sk;
1189 struct unix_sock *u;
1190 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1194 sk = unix_peer_get(sk);
1205 unix_state_rlock(sk);
1207 sunaddr->sun_family = AF_UNIX;
1208 sunaddr->sun_path[0] = 0;
1209 *uaddr_len = sizeof(short);
1211 struct unix_address *addr = u->addr;
1213 *uaddr_len = addr->len;
1214 memcpy(sunaddr, addr->name, *uaddr_len);
1216 unix_state_runlock(sk);
1222 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1226 scm->fp = UNIXCB(skb).fp;
1227 skb->destructor = sock_wfree;
1228 UNIXCB(skb).fp = NULL;
1230 for (i=scm->fp->count-1; i>=0; i--)
1231 unix_notinflight(scm->fp->fp[i]);
1234 static void unix_destruct_fds(struct sk_buff *skb)
1236 struct scm_cookie scm;
1237 memset(&scm, 0, sizeof(scm));
1238 unix_detach_fds(&scm, skb);
1240 /* Alas, it calls VFS */
1241 /* So fscking what? fput() had been SMP-safe since the last Summer */
1246 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1249 for (i=scm->fp->count-1; i>=0; i--)
1250 unix_inflight(scm->fp->fp[i]);
1251 UNIXCB(skb).fp = scm->fp;
1252 skb->destructor = unix_destruct_fds;
1257 * Send AF_UNIX data.
1260 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1261 struct msghdr *msg, size_t len)
1263 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1264 struct sock *sk = sock->sk;
1265 struct unix_sock *u = unix_sk(sk);
1266 struct sockaddr_un *sunaddr=msg->msg_name;
1267 struct sock *other = NULL;
1268 int namelen = 0; /* fake GCC */
1271 struct sk_buff *skb;
1273 struct scm_cookie tmp_scm;
1275 if (NULL == siocb->scm)
1276 siocb->scm = &tmp_scm;
1277 err = scm_send(sock, msg, siocb->scm);
1282 if (msg->msg_flags&MSG_OOB)
1285 if (msg->msg_namelen) {
1286 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1293 other = unix_peer_get(sk);
1298 if (sock->passcred && !u->addr && (err = unix_autobind(sock)) != 0)
1302 if (len > sk->sk_sndbuf - 32)
1305 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1309 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1311 unix_attach_fds(siocb->scm, skb);
1313 skb->h.raw = skb->data;
1314 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1318 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1323 if (sunaddr == NULL)
1326 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1332 unix_state_rlock(other);
1334 if (!unix_may_send(sk, other))
1337 if (sock_flag(other, SOCK_DEAD)) {
1339 * Check with 1003.1g - what should
1342 unix_state_runlock(other);
1346 unix_state_wlock(sk);
1347 if (unix_peer(sk) == other) {
1349 unix_state_wunlock(sk);
1351 unix_dgram_disconnected(sk, other);
1353 err = -ECONNREFUSED;
1355 unix_state_wunlock(sk);
1365 if (other->sk_shutdown & RCV_SHUTDOWN)
1368 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1372 if (unix_peer(other) != sk &&
1373 (skb_queue_len(&other->sk_receive_queue) >
1374 other->sk_max_ack_backlog)) {
1380 timeo = unix_wait_for_peer(other, timeo);
1382 err = sock_intr_errno(timeo);
1383 if (signal_pending(current))
1389 skb_queue_tail(&other->sk_receive_queue, skb);
1390 unix_state_runlock(other);
1391 other->sk_data_ready(other, len);
1393 scm_destroy(siocb->scm);
1397 unix_state_runlock(other);
1403 scm_destroy(siocb->scm);
1408 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1409 struct msghdr *msg, size_t len)
1411 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1412 struct sock *sk = sock->sk;
1413 struct sock *other = NULL;
1414 struct sockaddr_un *sunaddr=msg->msg_name;
1416 struct sk_buff *skb;
1418 struct scm_cookie tmp_scm;
1420 if (NULL == siocb->scm)
1421 siocb->scm = &tmp_scm;
1422 err = scm_send(sock, msg, siocb->scm);
1427 if (msg->msg_flags&MSG_OOB)
1430 if (msg->msg_namelen) {
1431 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1436 other = unix_peer_get(sk);
1441 if (sk->sk_shutdown & SEND_SHUTDOWN)
1447 * Optimisation for the fact that under 0.01% of X messages typically
1453 /* Keep two messages in the pipe so it schedules better */
1454 if (size > sk->sk_sndbuf / 2 - 64)
1455 size = sk->sk_sndbuf / 2 - 64;
1457 if (size > SKB_MAX_ALLOC)
1458 size = SKB_MAX_ALLOC;
1464 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1470 * If you pass two values to the sock_alloc_send_skb
1471 * it tries to grab the large buffer with GFP_NOFS
1472 * (which can fail easily), and if it fails grab the
1473 * fallback size buffer which is under a page and will
1476 size = min_t(int, size, skb_tailroom(skb));
1478 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1480 unix_attach_fds(siocb->scm, skb);
1482 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1487 unix_state_rlock(other);
1489 if (sock_flag(other, SOCK_DEAD) ||
1490 (other->sk_shutdown & RCV_SHUTDOWN))
1493 skb_queue_tail(&other->sk_receive_queue, skb);
1494 unix_state_runlock(other);
1495 other->sk_data_ready(other, size);
1500 scm_destroy(siocb->scm);
1506 unix_state_runlock(other);
1509 if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1510 send_sig(SIGPIPE,current,0);
1515 scm_destroy(siocb->scm);
1517 return sent ? : err;
1520 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1522 struct unix_sock *u = unix_sk(sk);
1524 msg->msg_namelen = 0;
1526 msg->msg_namelen = u->addr->len;
1527 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1531 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1532 struct msghdr *msg, size_t size,
1535 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1536 struct scm_cookie tmp_scm;
1537 struct sock *sk = sock->sk;
1538 struct unix_sock *u = unix_sk(sk);
1539 int noblock = flags & MSG_DONTWAIT;
1540 struct sk_buff *skb;
1547 msg->msg_namelen = 0;
1549 skb = skb_recv_datagram(sk, flags, noblock, &err);
1553 wake_up_interruptible(&u->peer_wait);
1556 unix_copy_addr(msg, skb->sk);
1558 if (size > skb->len)
1560 else if (size < skb->len)
1561 msg->msg_flags |= MSG_TRUNC;
1563 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1568 siocb->scm = &tmp_scm;
1569 memset(&tmp_scm, 0, sizeof(tmp_scm));
1571 siocb->scm->creds = *UNIXCREDS(skb);
1573 if (!(flags & MSG_PEEK))
1576 unix_detach_fds(siocb->scm, skb);
1580 /* It is questionable: on PEEK we could:
1581 - do not return fds - good, but too simple 8)
1582 - return fds, and do not return them on read (old strategy,
1584 - clone fds (I chose it for now, it is the most universal
1587 POSIX 1003.1g does not actually define this clearly
1588 at all. POSIX 1003.1g doesn't define a lot of things
1593 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1597 scm_recv(sock, msg, siocb->scm, flags);
1600 skb_free_datagram(sk,skb);
1606 * Sleep until data has arrive. But check for races..
1609 static long unix_stream_data_wait(struct sock * sk, long timeo)
1613 unix_state_rlock(sk);
1616 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1618 if (skb_queue_len(&sk->sk_receive_queue) ||
1620 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1621 signal_pending(current) ||
1625 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1626 unix_state_runlock(sk);
1627 timeo = schedule_timeout(timeo);
1628 unix_state_rlock(sk);
1629 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1632 finish_wait(sk->sk_sleep, &wait);
1633 unix_state_runlock(sk);
1639 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1640 struct msghdr *msg, size_t size,
1643 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1644 struct scm_cookie tmp_scm;
1645 struct sock *sk = sock->sk;
1646 struct unix_sock *u = unix_sk(sk);
1647 struct sockaddr_un *sunaddr=msg->msg_name;
1649 int check_creds = 0;
1655 if (sk->sk_state != TCP_ESTABLISHED)
1662 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1663 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1665 msg->msg_namelen = 0;
1667 /* Lock the socket to prevent queue disordering
1668 * while sleeps in memcpy_tomsg
1672 siocb->scm = &tmp_scm;
1673 memset(&tmp_scm, 0, sizeof(tmp_scm));
1681 struct sk_buff *skb;
1683 skb = skb_dequeue(&sk->sk_receive_queue);
1686 if (copied >= target)
1690 * POSIX 1003.1g mandates this order.
1693 if ((err = sock_error(sk)) != 0)
1695 if (sk->sk_shutdown & RCV_SHUTDOWN)
1702 timeo = unix_stream_data_wait(sk, timeo);
1704 if (signal_pending(current)) {
1705 err = sock_intr_errno(timeo);
1713 /* Never glue messages from different writers */
1714 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1715 skb_queue_head(&sk->sk_receive_queue, skb);
1719 /* Copy credentials */
1720 siocb->scm->creds = *UNIXCREDS(skb);
1724 /* Copy address just once */
1727 unix_copy_addr(msg, skb->sk);
1731 chunk = min_t(unsigned int, skb->len, size);
1732 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1733 skb_queue_head(&sk->sk_receive_queue, skb);
1741 /* Mark read part of skb as used */
1742 if (!(flags & MSG_PEEK))
1744 skb_pull(skb, chunk);
1747 unix_detach_fds(siocb->scm, skb);
1749 /* put the skb back if we didn't use it up.. */
1752 skb_queue_head(&sk->sk_receive_queue, skb);
1763 /* It is questionable, see note in unix_dgram_recvmsg.
1766 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1768 /* put message back and return */
1769 skb_queue_head(&sk->sk_receive_queue, skb);
1775 scm_recv(sock, msg, siocb->scm, flags);
1777 return copied ? : err;
1780 static int unix_shutdown(struct socket *sock, int mode)
1782 struct sock *sk = sock->sk;
1785 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1788 unix_state_wlock(sk);
1789 sk->sk_shutdown |= mode;
1790 other=unix_peer(sk);
1793 unix_state_wunlock(sk);
1794 sk->sk_state_change(sk);
1797 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1801 if (mode&RCV_SHUTDOWN)
1802 peer_mode |= SEND_SHUTDOWN;
1803 if (mode&SEND_SHUTDOWN)
1804 peer_mode |= RCV_SHUTDOWN;
1805 unix_state_wlock(other);
1806 other->sk_shutdown |= peer_mode;
1807 unix_state_wunlock(other);
1808 other->sk_state_change(other);
1809 read_lock(&other->sk_callback_lock);
1810 if (peer_mode == SHUTDOWN_MASK)
1811 sk_wake_async(other,1,POLL_HUP);
1812 else if (peer_mode & RCV_SHUTDOWN)
1813 sk_wake_async(other,1,POLL_IN);
1814 read_unlock(&other->sk_callback_lock);
1822 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1824 struct sock *sk = sock->sk;
1831 amount = atomic_read(&sk->sk_wmem_alloc);
1832 err = put_user(amount, (int __user *)arg);
1836 struct sk_buff *skb;
1837 if (sk->sk_state == TCP_LISTEN) {
1842 spin_lock(&sk->sk_receive_queue.lock);
1843 skb = skb_peek(&sk->sk_receive_queue);
1846 spin_unlock(&sk->sk_receive_queue.lock);
1847 err = put_user(amount, (int __user *)arg);
1852 err = dev_ioctl(cmd, (void __user *)arg);
1858 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1860 struct sock *sk = sock->sk;
1863 poll_wait(file, sk->sk_sleep, wait);
1866 /* exceptional events? */
1869 if (sk->sk_shutdown == SHUTDOWN_MASK)
1873 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1874 (sk->sk_shutdown & RCV_SHUTDOWN))
1875 mask |= POLLIN | POLLRDNORM;
1877 /* Connection-based need to check for termination and startup */
1878 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1882 * we set writable also when the other side has shut down the
1883 * connection. This prevents stuck sockets.
1885 if (unix_writable(sk))
1886 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1892 #ifdef CONFIG_PROC_FS
1893 static struct sock *unix_seq_idx(int *iter, loff_t pos)
1898 for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
1907 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
1909 read_lock(&unix_table_lock);
1910 return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
1913 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1918 return first_unix_socket(seq->private);
1919 return next_unix_socket(seq->private, v);
1922 static void unix_seq_stop(struct seq_file *seq, void *v)
1924 read_unlock(&unix_table_lock);
1927 static int unix_seq_show(struct seq_file *seq, void *v)
1931 seq_puts(seq, "Num RefCount Protocol Flags Type St "
1935 struct unix_sock *u = unix_sk(s);
1936 unix_state_rlock(s);
1938 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
1940 atomic_read(&s->sk_refcnt),
1942 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
1945 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
1946 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
1954 len = u->addr->len - sizeof(short);
1955 if (!UNIX_ABSTRACT(s))
1961 for ( ; i < len; i++)
1962 seq_putc(seq, u->addr->name->sun_path[i]);
1964 unix_state_runlock(s);
1965 seq_putc(seq, '\n');
1971 static struct seq_operations unix_seq_ops = {
1972 .start = unix_seq_start,
1973 .next = unix_seq_next,
1974 .stop = unix_seq_stop,
1975 .show = unix_seq_show,
1979 static int unix_seq_open(struct inode *inode, struct file *file)
1981 struct seq_file *seq;
1983 int *iter = kmalloc(sizeof(int), GFP_KERNEL);
1988 rc = seq_open(file, &unix_seq_ops);
1992 seq = file->private_data;
1993 seq->private = iter;
2002 static struct file_operations unix_seq_fops = {
2003 .owner = THIS_MODULE,
2004 .open = unix_seq_open,
2006 .llseek = seq_lseek,
2007 .release = seq_release_private,
2012 static struct net_proto_family unix_family_ops = {
2014 .create = unix_create,
2015 .owner = THIS_MODULE,
2018 #ifdef CONFIG_SYSCTL
2019 extern void unix_sysctl_register(void);
2020 extern void unix_sysctl_unregister(void);
2022 static inline void unix_sysctl_register(void) {}
2023 static inline void unix_sysctl_unregister(void) {}
2026 static int __init af_unix_init(void)
2028 struct sk_buff *dummy_skb;
2030 if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) {
2031 printk(KERN_CRIT "%s: panic\n", __FUNCTION__);
2034 /* allocate our sock slab cache */
2035 unix_sk_cachep = kmem_cache_create("unix_sock",
2036 sizeof(struct unix_sock), 0,
2037 SLAB_HWCACHE_ALIGN, 0, 0);
2038 if (!unix_sk_cachep)
2040 "af_unix_init: Cannot create unix_sock SLAB cache!\n");
2042 sock_register(&unix_family_ops);
2043 #ifdef CONFIG_PROC_FS
2044 proc_net_fops_create("unix", 0, &unix_seq_fops);
2046 unix_sysctl_register();
2050 static void __exit af_unix_exit(void)
2052 sock_unregister(PF_UNIX);
2053 unix_sysctl_unregister();
2054 proc_net_remove("unix");
2055 kmem_cache_destroy(unix_sk_cachep);
2058 module_init(af_unix_init);
2059 module_exit(af_unix_exit);
2061 MODULE_LICENSE("GPL");
2062 MODULE_ALIAS_NETPROTO(PF_UNIX);