2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan.cox@linux.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
14 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support.
16 * Carsten Paeth : PF_UNIX check, address fixes.
17 * Alan Cox : Limit size of allocated blocks.
18 * Alan Cox : Fixed the stupid socketpair bug.
19 * Alan Cox : BSD compatibility fine tuning.
20 * Alan Cox : Fixed a bug in connect when interrupted.
21 * Alan Cox : Sorted out a proper draft version of
22 * file descriptor passing hacked up from
24 * Marty Leisner : Fixes to fd passing
25 * Nick Nevin : recvmsg bugfix.
26 * Alan Cox : Started proper garbage collector
27 * Heiko EiBfeldt : Missing verify_area check
28 * Alan Cox : Started POSIXisms
29 * Andreas Schwab : Replace inode by dentry for proper
31 * Kirk Petersen : Made this a module
32 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
34 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
35 * by above two patches.
36 * Andrea Arcangeli : If possible we block in connect(2)
37 * if the max backlog of the listen socket
38 * is been reached. This won't break
39 * old apps and it will avoid huge amount
40 * of socks hashed (this for unix_gc()
41 * performances reasons).
42 * Security fix that limits the max
43 * number of socks to 2*max_files and
44 * the number of skb queueable in the
46 * Artur Skawina : Hash function optimizations
47 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
48 * Malcolm Beattie : Set peercred for socketpair
49 * Michal Ostrowski : Module initialization cleanup.
50 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
51 * the core infrastructure is doing that
52 * for all net proto families now (2.5.69+)
55 * Known differences from reference BSD that was tested:
58 * ECONNREFUSED is not returned from one end of a connected() socket to the
59 * other the moment one end closes.
60 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
61 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
63 * accept() returns a path name even if the connecting socket has closed
64 * in the meantime (BSD loses the path and gives up).
65 * accept() returns 0 length path for an unbound connector. BSD returns 16
66 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
68 * BSD af_unix apparently has connect forgetting to block properly.
69 * (need to check this with the POSIX spec in detail)
71 * Differences from 2.0.0-11-... (ANK)
72 * Bug fixes and improvements.
73 * - client shutdown killed server socket.
74 * - removed all useless cli/sti pairs.
76 * Semantic changes/extensions.
77 * - generic control message passing.
78 * - SCM_CREDENTIALS control message.
79 * - "Abstract" (not FS based) socket bindings.
80 * Abstract names are sequences of bytes (not zero terminated)
81 * started by 0, so that this name space does not intersect
85 #include <linux/module.h>
86 #include <linux/config.h>
87 #include <linux/kernel.h>
88 #include <linux/major.h>
89 #include <linux/signal.h>
90 #include <linux/sched.h>
91 #include <linux/errno.h>
92 #include <linux/string.h>
93 #include <linux/stat.h>
94 #include <linux/dcache.h>
95 #include <linux/namei.h>
96 #include <linux/socket.h>
98 #include <linux/fcntl.h>
99 #include <linux/termios.h>
100 #include <linux/sockios.h>
101 #include <linux/net.h>
102 #include <linux/in.h>
103 #include <linux/fs.h>
104 #include <linux/slab.h>
105 #include <asm/uaccess.h>
106 #include <linux/skbuff.h>
107 #include <linux/netdevice.h>
108 #include <net/sock.h>
109 #include <linux/tcp.h>
110 #include <net/af_unix.h>
111 #include <linux/proc_fs.h>
112 #include <linux/seq_file.h>
114 #include <linux/init.h>
115 #include <linux/poll.h>
116 #include <linux/smp_lock.h>
117 #include <linux/rtnetlink.h>
118 #include <linux/mount.h>
119 #include <net/checksum.h>
120 #include <linux/security.h>
122 #include <linux/vs_context.h>
123 #include <linux/vs_network.h>
126 int sysctl_unix_max_dgram_qlen = 10;
128 kmem_cache_t *unix_sk_cachep;
130 struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
131 rwlock_t unix_table_lock = RW_LOCK_UNLOCKED;
132 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
134 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
136 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
139 * SMP locking strategy:
140 * hash table is protected with rwlock unix_table_lock
141 * each socket state is protected by separate rwlock.
144 static inline unsigned unix_hash_fold(unsigned hash)
148 return hash&(UNIX_HASH_SIZE-1);
151 #define unix_peer(sk) ((sk)->sk_pair)
153 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
155 return unix_peer(osk) == sk;
158 static inline int unix_may_send(struct sock *sk, struct sock *osk)
160 return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
163 static struct sock *unix_peer_get(struct sock *s)
171 unix_state_runlock(s);
175 static inline void unix_release_addr(struct unix_address *addr)
177 if (atomic_dec_and_test(&addr->refcnt))
182 * Check unix socket name:
183 * - should be not zero length.
184 * - if started by not zero, should be NULL terminated (FS object)
185 * - if started by zero, it is abstract name.
188 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
190 if (len <= sizeof(short) || len > sizeof(*sunaddr))
192 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
194 if (sunaddr->sun_path[0])
197 * This may look like an off by one error but it is
198 * a bit more subtle. 108 is the longest valid AF_UNIX
199 * path for a binding. sun_path[108] doesn't as such
200 * exist. However in kernel space we are guaranteed that
201 * it is a valid memory location in our kernel
204 if (len > sizeof(*sunaddr))
205 len = sizeof(*sunaddr);
206 ((char *)sunaddr)[len]=0;
207 len = strlen(sunaddr->sun_path)+1+sizeof(short);
211 *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
215 static void __unix_remove_socket(struct sock *sk)
217 sk_del_node_init(sk);
220 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
222 BUG_TRAP(sk_unhashed(sk));
223 sk_add_node(sk, list);
226 static inline void unix_remove_socket(struct sock *sk)
228 write_lock(&unix_table_lock);
229 __unix_remove_socket(sk);
230 write_unlock(&unix_table_lock);
233 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
235 write_lock(&unix_table_lock);
236 __unix_insert_socket(list, sk);
237 write_unlock(&unix_table_lock);
240 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
241 int len, int type, unsigned hash)
244 struct hlist_node *node;
246 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
247 struct unix_sock *u = unix_sk(s);
249 if (u->addr->len == len &&
250 !memcmp(u->addr->name, sunname, len))
258 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
264 read_lock(&unix_table_lock);
265 s = __unix_find_socket_byname(sunname, len, type, hash);
268 read_unlock(&unix_table_lock);
272 static struct sock *unix_find_socket_byinode(struct inode *i)
275 struct hlist_node *node;
277 read_lock(&unix_table_lock);
279 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
280 struct dentry *dentry = unix_sk(s)->dentry;
282 if(dentry && dentry->d_inode == i)
290 read_unlock(&unix_table_lock);
294 static inline int unix_writable(struct sock *sk)
296 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
299 static void unix_write_space(struct sock *sk)
301 read_lock(&sk->sk_callback_lock);
302 if (unix_writable(sk)) {
303 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
304 wake_up_interruptible(sk->sk_sleep);
305 sk_wake_async(sk, 2, POLL_OUT);
307 read_unlock(&sk->sk_callback_lock);
310 /* When dgram socket disconnects (or changes its peer), we clear its receive
311 * queue of packets arrived from previous peer. First, it allows to do
312 * flow control based only on wmem_alloc; second, sk connected to peer
313 * may receive messages only from that peer. */
314 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
316 if (skb_queue_len(&sk->sk_receive_queue)) {
317 skb_queue_purge(&sk->sk_receive_queue);
318 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
320 /* If one link of bidirectional dgram pipe is disconnected,
321 * we signal error. Messages are lost. Do not make this,
322 * when peer was not connected to us.
324 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
325 other->sk_err = ECONNRESET;
326 other->sk_error_report(other);
331 static void unix_sock_destructor(struct sock *sk)
333 struct unix_sock *u = unix_sk(sk);
335 skb_queue_purge(&sk->sk_receive_queue);
337 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
338 BUG_TRAP(sk_unhashed(sk));
339 BUG_TRAP(!sk->sk_socket);
340 if (!sock_flag(sk, SOCK_DEAD)) {
341 printk("Attempt to release alive unix socket: %p\n", sk);
346 unix_release_addr(u->addr);
348 atomic_dec(&unix_nr_socks);
349 #ifdef UNIX_REFCNT_DEBUG
350 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
354 static int unix_release_sock (struct sock *sk, int embrion)
356 struct unix_sock *u = unix_sk(sk);
357 struct dentry *dentry;
358 struct vfsmount *mnt;
363 unix_remove_socket(sk);
366 unix_state_wlock(sk);
368 sk->sk_shutdown = SHUTDOWN_MASK;
373 state = sk->sk_state;
374 sk->sk_state = TCP_CLOSE;
375 unix_state_wunlock(sk);
377 wake_up_interruptible_all(&u->peer_wait);
379 skpair=unix_peer(sk);
382 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
383 unix_state_wlock(skpair);
385 skpair->sk_shutdown = SHUTDOWN_MASK;
386 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
387 skpair->sk_err = ECONNRESET;
388 unix_state_wunlock(skpair);
389 skpair->sk_state_change(skpair);
390 read_lock(&skpair->sk_callback_lock);
391 sk_wake_async(skpair,1,POLL_HUP);
392 read_unlock(&skpair->sk_callback_lock);
394 sock_put(skpair); /* It may now die */
395 unix_peer(sk) = NULL;
398 /* Try to flush out this socket. Throw out buffers at least */
400 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
401 if (state==TCP_LISTEN)
402 unix_release_sock(skb->sk, 1);
403 /* passed fds are erased in the kfree_skb hook */
412 clr_vx_info(&sk->sk_vx_info);
413 clr_nx_info(&sk->sk_nx_info);
416 /* ---- Socket is dead now and most probably destroyed ---- */
419 * Fixme: BSD difference: In BSD all sockets connected to use get
420 * ECONNRESET and we die on the spot. In Linux we behave
421 * like files and pipes do and wait for the last
424 * Can't we simply set sock->err?
426 * What the above comment does talk about? --ANK(980817)
429 if (atomic_read(&unix_tot_inflight))
430 unix_gc(); /* Garbage collect fds */
435 static int unix_listen(struct socket *sock, int backlog)
438 struct sock *sk = sock->sk;
439 struct unix_sock *u = unix_sk(sk);
442 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
443 goto out; /* Only stream/seqpacket sockets accept */
446 goto out; /* No listens on an unbound socket */
447 unix_state_wlock(sk);
448 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
450 if (backlog > sk->sk_max_ack_backlog)
451 wake_up_interruptible_all(&u->peer_wait);
452 sk->sk_max_ack_backlog = backlog;
453 sk->sk_state = TCP_LISTEN;
454 /* set credentials so connect can copy them */
455 sk->sk_peercred.pid = current->tgid;
456 sk->sk_peercred.uid = current->euid;
457 sk->sk_peercred.gid = current->egid;
461 unix_state_wunlock(sk);
466 static int unix_release(struct socket *);
467 static int unix_bind(struct socket *, struct sockaddr *, int);
468 static int unix_stream_connect(struct socket *, struct sockaddr *,
469 int addr_len, int flags);
470 static int unix_socketpair(struct socket *, struct socket *);
471 static int unix_accept(struct socket *, struct socket *, int);
472 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
473 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
474 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
475 static int unix_shutdown(struct socket *, int);
476 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
477 struct msghdr *, size_t);
478 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
479 struct msghdr *, size_t, int);
480 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
481 struct msghdr *, size_t);
482 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
483 struct msghdr *, size_t, int);
484 static int unix_dgram_connect(struct socket *, struct sockaddr *,
487 static struct proto_ops unix_stream_ops = {
489 .owner = THIS_MODULE,
490 .release = unix_release,
492 .connect = unix_stream_connect,
493 .socketpair = unix_socketpair,
494 .accept = unix_accept,
495 .getname = unix_getname,
498 .listen = unix_listen,
499 .shutdown = unix_shutdown,
500 .setsockopt = sock_no_setsockopt,
501 .getsockopt = sock_no_getsockopt,
502 .sendmsg = unix_stream_sendmsg,
503 .recvmsg = unix_stream_recvmsg,
504 .mmap = sock_no_mmap,
505 .sendpage = sock_no_sendpage,
508 static struct proto_ops unix_dgram_ops = {
510 .owner = THIS_MODULE,
511 .release = unix_release,
513 .connect = unix_dgram_connect,
514 .socketpair = unix_socketpair,
515 .accept = sock_no_accept,
516 .getname = unix_getname,
517 .poll = datagram_poll,
519 .listen = sock_no_listen,
520 .shutdown = unix_shutdown,
521 .setsockopt = sock_no_setsockopt,
522 .getsockopt = sock_no_getsockopt,
523 .sendmsg = unix_dgram_sendmsg,
524 .recvmsg = unix_dgram_recvmsg,
525 .mmap = sock_no_mmap,
526 .sendpage = sock_no_sendpage,
529 static struct proto_ops unix_seqpacket_ops = {
531 .owner = THIS_MODULE,
532 .release = unix_release,
534 .connect = unix_stream_connect,
535 .socketpair = unix_socketpair,
536 .accept = unix_accept,
537 .getname = unix_getname,
538 .poll = datagram_poll,
540 .listen = unix_listen,
541 .shutdown = unix_shutdown,
542 .setsockopt = sock_no_setsockopt,
543 .getsockopt = sock_no_getsockopt,
544 .sendmsg = unix_dgram_sendmsg,
545 .recvmsg = unix_dgram_recvmsg,
546 .mmap = sock_no_mmap,
547 .sendpage = sock_no_sendpage,
550 static struct sock * unix_create1(struct socket *sock)
552 struct sock *sk = NULL;
555 if (atomic_read(&unix_nr_socks) >= 2*files_stat.max_files)
558 sk = sk_alloc(PF_UNIX, GFP_KERNEL, sizeof(struct unix_sock),
563 atomic_inc(&unix_nr_socks);
565 sock_init_data(sock,sk);
566 sk_set_owner(sk, THIS_MODULE);
568 set_vx_info(&sk->sk_vx_info, current->vx_info);
569 set_nx_info(&sk->sk_nx_info, current->nx_info);
570 sk->sk_xid = vx_current_xid();
572 sk->sk_write_space = unix_write_space;
573 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
574 sk->sk_destruct = unix_sock_destructor;
578 rwlock_init(&u->lock);
579 atomic_set(&u->inflight, sock ? 0 : -1);
580 init_MUTEX(&u->readsem); /* single task reading lock */
581 init_waitqueue_head(&u->peer_wait);
582 unix_insert_socket(unix_sockets_unbound, sk);
587 static int unix_create(struct socket *sock, int protocol)
589 if (protocol && protocol != PF_UNIX)
590 return -EPROTONOSUPPORT;
592 sock->state = SS_UNCONNECTED;
594 switch (sock->type) {
596 sock->ops = &unix_stream_ops;
599 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
603 sock->type=SOCK_DGRAM;
605 sock->ops = &unix_dgram_ops;
608 sock->ops = &unix_seqpacket_ops;
611 return -ESOCKTNOSUPPORT;
614 return unix_create1(sock) ? 0 : -ENOMEM;
617 static int unix_release(struct socket *sock)
619 struct sock *sk = sock->sk;
626 return unix_release_sock (sk, 0);
629 static int unix_autobind(struct socket *sock)
631 struct sock *sk = sock->sk;
632 struct unix_sock *u = unix_sk(sk);
633 static u32 ordernum = 1;
634 struct unix_address * addr;
644 addr = kmalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
648 memset(addr, 0, sizeof(*addr) + sizeof(short) + 16);
649 addr->name->sun_family = AF_UNIX;
650 atomic_set(&addr->refcnt, 1);
653 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
654 addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
656 write_lock(&unix_table_lock);
657 ordernum = (ordernum+1)&0xFFFFF;
659 if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
661 write_unlock(&unix_table_lock);
662 /* Sanity yield. It is unusual case, but yet... */
663 if (!(ordernum&0xFF))
667 addr->hash ^= sk->sk_type;
669 __unix_remove_socket(sk);
671 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
672 write_unlock(&unix_table_lock);
675 out: up(&u->readsem);
679 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
680 int type, unsigned hash, int *error)
686 if (sunname->sun_path[0]) {
687 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
690 err = permission(nd.dentry->d_inode,MAY_WRITE, &nd);
695 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
697 u=unix_find_socket_byinode(nd.dentry->d_inode);
701 if (u->sk_type == type)
702 touch_atime(nd.mnt, nd.dentry);
707 if (u->sk_type != type) {
713 u=unix_find_socket_byname(sunname, len, type, hash);
715 struct dentry *dentry;
716 dentry = unix_sk(u)->dentry;
718 touch_atime(unix_sk(u)->mnt, dentry);
732 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
734 struct sock *sk = sock->sk;
735 struct unix_sock *u = unix_sk(sk);
736 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
737 struct dentry * dentry = NULL;
741 struct unix_address *addr;
742 struct hlist_head *list;
745 if (sunaddr->sun_family != AF_UNIX)
748 if (addr_len==sizeof(short)) {
749 err = unix_autobind(sock);
753 err = unix_mkname(sunaddr, addr_len, &hash);
765 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
769 memcpy(addr->name, sunaddr, addr_len);
770 addr->len = addr_len;
771 addr->hash = hash ^ sk->sk_type;
772 atomic_set(&addr->refcnt, 1);
774 if (sunaddr->sun_path[0]) {
778 * Get the parent directory, calculate the hash for last
781 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
783 goto out_mknod_parent;
785 * Yucky last component or no last component at all?
786 * (foo/., foo/.., /////)
789 if (nd.last_type != LAST_NORM)
792 * Lock the directory.
794 down(&nd.dentry->d_inode->i_sem);
796 * Do the final lookup.
798 dentry = lookup_hash(&nd.last, nd.dentry);
799 err = PTR_ERR(dentry);
801 goto out_mknod_unlock;
804 * Special case - lookup gave negative, but... we had foo/bar/
805 * From the vfs_mknod() POV we just have a negative dentry -
806 * all is fine. Let's be bastards - you had / on the end, you've
807 * been asking for (non-existent) directory. -ENOENT for you.
809 if (nd.last.name[nd.last.len] && !dentry->d_inode)
812 * All right, let's create it.
815 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
816 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
819 up(&nd.dentry->d_inode->i_sem);
823 addr->hash = UNIX_HASH_SIZE;
826 write_lock(&unix_table_lock);
828 if (!sunaddr->sun_path[0]) {
830 if (__unix_find_socket_byname(sunaddr, addr_len,
831 sk->sk_type, hash)) {
832 unix_release_addr(addr);
836 list = &unix_socket_table[addr->hash];
838 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
839 u->dentry = nd.dentry;
844 __unix_remove_socket(sk);
846 __unix_insert_socket(list, sk);
849 write_unlock(&unix_table_lock);
858 up(&nd.dentry->d_inode->i_sem);
864 unix_release_addr(addr);
868 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
871 struct sock *sk = sock->sk;
872 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
877 if (addr->sa_family != AF_UNSPEC) {
878 err = unix_mkname(sunaddr, alen, &hash);
883 if (test_bit(SOCK_PASS_CRED, &sock->flags) && !unix_sk(sk)->addr &&
884 (err = unix_autobind(sock)) != 0)
887 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
891 unix_state_wlock(sk);
894 if (!unix_may_send(sk, other))
897 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
903 * 1003.1g breaking connected state with AF_UNSPEC
906 unix_state_wlock(sk);
910 * If it was connected, reconnect.
913 struct sock *old_peer = unix_peer(sk);
915 unix_state_wunlock(sk);
917 if (other != old_peer)
918 unix_dgram_disconnected(sk, old_peer);
922 unix_state_wunlock(sk);
927 unix_state_wunlock(sk);
933 static long unix_wait_for_peer(struct sock *other, long timeo)
935 struct unix_sock *u = unix_sk(other);
939 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
941 sched = !sock_flag(other, SOCK_DEAD) &&
942 !(other->sk_shutdown & RCV_SHUTDOWN) &&
943 (skb_queue_len(&other->sk_receive_queue) >
944 other->sk_max_ack_backlog);
946 unix_state_runlock(other);
949 timeo = schedule_timeout(timeo);
951 finish_wait(&u->peer_wait, &wait);
955 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
956 int addr_len, int flags)
958 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
959 struct sock *sk = sock->sk;
960 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
961 struct sock *newsk = NULL;
962 struct sock *other = NULL;
963 struct sk_buff *skb = NULL;
969 err = unix_mkname(sunaddr, addr_len, &hash);
974 if (test_bit(SOCK_PASS_CRED, &sock->flags)
975 && !u->addr && (err = unix_autobind(sock)) != 0)
978 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
980 /* First of all allocate resources.
981 If we will make it after state is locked,
982 we will have to recheck all again in any case.
987 /* create new sock for complete connection */
988 newsk = unix_create1(NULL);
992 /* Allocate skb for sending to listening sock */
993 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
998 /* Find listening sock. */
999 other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
1003 /* Latch state of peer */
1004 unix_state_rlock(other);
1006 /* Apparently VFS overslept socket death. Retry. */
1007 if (sock_flag(other, SOCK_DEAD)) {
1008 unix_state_runlock(other);
1013 err = -ECONNREFUSED;
1014 if (other->sk_state != TCP_LISTEN)
1017 if (skb_queue_len(&other->sk_receive_queue) >
1018 other->sk_max_ack_backlog) {
1023 timeo = unix_wait_for_peer(other, timeo);
1025 err = sock_intr_errno(timeo);
1026 if (signal_pending(current))
1034 It is tricky place. We need to grab write lock and cannot
1035 drop lock on peer. It is dangerous because deadlock is
1036 possible. Connect to self case and simultaneous
1037 attempt to connect are eliminated by checking socket
1038 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1039 check this before attempt to grab lock.
1041 Well, and we have to recheck the state after socket locked.
1047 /* This is ok... continue with connect */
1049 case TCP_ESTABLISHED:
1050 /* Socket is already connected */
1058 unix_state_wlock(sk);
1060 if (sk->sk_state != st) {
1061 unix_state_wunlock(sk);
1062 unix_state_runlock(other);
1067 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1069 unix_state_wunlock(sk);
1073 /* The way is open! Fastly set all the necessary fields... */
1076 unix_peer(newsk) = sk;
1077 newsk->sk_state = TCP_ESTABLISHED;
1078 newsk->sk_type = sk->sk_type;
1079 newsk->sk_peercred.pid = current->tgid;
1080 newsk->sk_peercred.uid = current->euid;
1081 newsk->sk_peercred.gid = current->egid;
1082 newu = unix_sk(newsk);
1083 newsk->sk_sleep = &newu->peer_wait;
1084 otheru = unix_sk(other);
1086 /* copy address information from listening to new sock*/
1088 atomic_inc(&otheru->addr->refcnt);
1089 newu->addr = otheru->addr;
1091 if (otheru->dentry) {
1092 newu->dentry = dget(otheru->dentry);
1093 newu->mnt = mntget(otheru->mnt);
1096 /* Set credentials */
1097 sk->sk_peercred = other->sk_peercred;
1100 unix_peer(sk) = newsk;
1101 sock->state = SS_CONNECTED;
1102 sk->sk_state = TCP_ESTABLISHED;
1104 unix_state_wunlock(sk);
1106 /* take ten and and send info to listening sock */
1107 spin_lock(&other->sk_receive_queue.lock);
1108 __skb_queue_tail(&other->sk_receive_queue, skb);
1109 /* Undo artificially decreased inflight after embrion
1110 * is installed to listening socket. */
1111 atomic_inc(&newu->inflight);
1112 spin_unlock(&other->sk_receive_queue.lock);
1113 unix_state_runlock(other);
1114 other->sk_data_ready(other, 0);
1120 unix_state_runlock(other);
1126 unix_release_sock(newsk, 0);
1132 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1134 struct sock *ska=socka->sk, *skb = sockb->sk;
1136 /* Join our sockets back to back */
1141 ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1142 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1143 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1145 if (ska->sk_type != SOCK_DGRAM) {
1146 ska->sk_state = TCP_ESTABLISHED;
1147 skb->sk_state = TCP_ESTABLISHED;
1148 socka->state = SS_CONNECTED;
1149 sockb->state = SS_CONNECTED;
1154 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1156 struct sock *sk = sock->sk;
1158 struct sk_buff *skb;
1162 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1166 if (sk->sk_state != TCP_LISTEN)
1169 /* If socket state is TCP_LISTEN it cannot change (for now...),
1170 * so that no locks are necessary.
1173 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1175 /* This means receive shutdown. */
1182 skb_free_datagram(sk, skb);
1183 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1185 /* attach accepted sock to socket */
1186 unix_state_wlock(tsk);
1187 newsock->state = SS_CONNECTED;
1188 sock_graft(tsk, newsock);
1189 unix_state_wunlock(tsk);
1197 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1199 struct sock *sk = sock->sk;
1200 struct unix_sock *u;
1201 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1205 sk = unix_peer_get(sk);
1216 unix_state_rlock(sk);
1218 sunaddr->sun_family = AF_UNIX;
1219 sunaddr->sun_path[0] = 0;
1220 *uaddr_len = sizeof(short);
1222 struct unix_address *addr = u->addr;
1224 *uaddr_len = addr->len;
1225 memcpy(sunaddr, addr->name, *uaddr_len);
1227 unix_state_runlock(sk);
1233 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1237 scm->fp = UNIXCB(skb).fp;
1238 skb->destructor = sock_wfree;
1239 UNIXCB(skb).fp = NULL;
1241 for (i=scm->fp->count-1; i>=0; i--)
1242 unix_notinflight(scm->fp->fp[i]);
1245 static void unix_destruct_fds(struct sk_buff *skb)
1247 struct scm_cookie scm;
1248 memset(&scm, 0, sizeof(scm));
1249 unix_detach_fds(&scm, skb);
1251 /* Alas, it calls VFS */
1252 /* So fscking what? fput() had been SMP-safe since the last Summer */
1257 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1260 for (i=scm->fp->count-1; i>=0; i--)
1261 unix_inflight(scm->fp->fp[i]);
1262 UNIXCB(skb).fp = scm->fp;
1263 skb->destructor = unix_destruct_fds;
1268 * Send AF_UNIX data.
1271 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1272 struct msghdr *msg, size_t len)
1274 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1275 struct sock *sk = sock->sk;
1276 struct unix_sock *u = unix_sk(sk);
1277 struct sockaddr_un *sunaddr=msg->msg_name;
1278 struct sock *other = NULL;
1279 int namelen = 0; /* fake GCC */
1282 struct sk_buff *skb;
1284 struct scm_cookie tmp_scm;
1286 if (NULL == siocb->scm)
1287 siocb->scm = &tmp_scm;
1288 err = scm_send(sock, msg, siocb->scm);
1293 if (msg->msg_flags&MSG_OOB)
1296 if (msg->msg_namelen) {
1297 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1304 other = unix_peer_get(sk);
1309 if (test_bit(SOCK_PASS_CRED, &sock->flags)
1310 && !u->addr && (err = unix_autobind(sock)) != 0)
1314 if (len > sk->sk_sndbuf - 32)
1317 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1321 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1323 unix_attach_fds(siocb->scm, skb);
1325 skb->h.raw = skb->data;
1326 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1330 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1335 if (sunaddr == NULL)
1338 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1344 unix_state_rlock(other);
1346 if (!unix_may_send(sk, other))
1349 if (sock_flag(other, SOCK_DEAD)) {
1351 * Check with 1003.1g - what should
1354 unix_state_runlock(other);
1358 unix_state_wlock(sk);
1359 if (unix_peer(sk) == other) {
1361 unix_state_wunlock(sk);
1363 unix_dgram_disconnected(sk, other);
1365 err = -ECONNREFUSED;
1367 unix_state_wunlock(sk);
1377 if (other->sk_shutdown & RCV_SHUTDOWN)
1380 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1384 if (unix_peer(other) != sk &&
1385 (skb_queue_len(&other->sk_receive_queue) >
1386 other->sk_max_ack_backlog)) {
1392 timeo = unix_wait_for_peer(other, timeo);
1394 err = sock_intr_errno(timeo);
1395 if (signal_pending(current))
1401 skb_queue_tail(&other->sk_receive_queue, skb);
1402 unix_state_runlock(other);
1403 other->sk_data_ready(other, len);
1405 scm_destroy(siocb->scm);
1409 unix_state_runlock(other);
1415 scm_destroy(siocb->scm);
1420 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1421 struct msghdr *msg, size_t len)
1423 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1424 struct sock *sk = sock->sk;
1425 struct sock *other = NULL;
1426 struct sockaddr_un *sunaddr=msg->msg_name;
1428 struct sk_buff *skb;
1430 struct scm_cookie tmp_scm;
1432 if (NULL == siocb->scm)
1433 siocb->scm = &tmp_scm;
1434 err = scm_send(sock, msg, siocb->scm);
1439 if (msg->msg_flags&MSG_OOB)
1442 if (msg->msg_namelen) {
1443 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1448 other = unix_peer_get(sk);
1453 if (sk->sk_shutdown & SEND_SHUTDOWN)
1459 * Optimisation for the fact that under 0.01% of X messages typically
1465 /* Keep two messages in the pipe so it schedules better */
1466 if (size > sk->sk_sndbuf / 2 - 64)
1467 size = sk->sk_sndbuf / 2 - 64;
1469 if (size > SKB_MAX_ALLOC)
1470 size = SKB_MAX_ALLOC;
1476 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1482 * If you pass two values to the sock_alloc_send_skb
1483 * it tries to grab the large buffer with GFP_NOFS
1484 * (which can fail easily), and if it fails grab the
1485 * fallback size buffer which is under a page and will
1488 size = min_t(int, size, skb_tailroom(skb));
1490 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1492 unix_attach_fds(siocb->scm, skb);
1494 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1499 unix_state_rlock(other);
1501 if (sock_flag(other, SOCK_DEAD) ||
1502 (other->sk_shutdown & RCV_SHUTDOWN))
1505 skb_queue_tail(&other->sk_receive_queue, skb);
1506 unix_state_runlock(other);
1507 other->sk_data_ready(other, size);
1512 scm_destroy(siocb->scm);
1518 unix_state_runlock(other);
1521 if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1522 send_sig(SIGPIPE,current,0);
1527 scm_destroy(siocb->scm);
1529 return sent ? : err;
1532 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1534 struct unix_sock *u = unix_sk(sk);
1536 msg->msg_namelen = 0;
1538 msg->msg_namelen = u->addr->len;
1539 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1543 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1544 struct msghdr *msg, size_t size,
1547 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1548 struct scm_cookie tmp_scm;
1549 struct sock *sk = sock->sk;
1550 struct unix_sock *u = unix_sk(sk);
1551 int noblock = flags & MSG_DONTWAIT;
1552 struct sk_buff *skb;
1559 msg->msg_namelen = 0;
1561 skb = skb_recv_datagram(sk, flags, noblock, &err);
1565 wake_up_interruptible(&u->peer_wait);
1568 unix_copy_addr(msg, skb->sk);
1570 if (size > skb->len)
1572 else if (size < skb->len)
1573 msg->msg_flags |= MSG_TRUNC;
1575 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1580 siocb->scm = &tmp_scm;
1581 memset(&tmp_scm, 0, sizeof(tmp_scm));
1583 siocb->scm->creds = *UNIXCREDS(skb);
1585 if (!(flags & MSG_PEEK))
1588 unix_detach_fds(siocb->scm, skb);
1592 /* It is questionable: on PEEK we could:
1593 - do not return fds - good, but too simple 8)
1594 - return fds, and do not return them on read (old strategy,
1596 - clone fds (I chose it for now, it is the most universal
1599 POSIX 1003.1g does not actually define this clearly
1600 at all. POSIX 1003.1g doesn't define a lot of things
1605 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1609 scm_recv(sock, msg, siocb->scm, flags);
1612 skb_free_datagram(sk,skb);
1618 * Sleep until data has arrive. But check for races..
1621 static long unix_stream_data_wait(struct sock * sk, long timeo)
1625 unix_state_rlock(sk);
1628 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1630 if (skb_queue_len(&sk->sk_receive_queue) ||
1632 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1633 signal_pending(current) ||
1637 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1638 unix_state_runlock(sk);
1639 timeo = schedule_timeout(timeo);
1640 unix_state_rlock(sk);
1641 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1644 finish_wait(sk->sk_sleep, &wait);
1645 unix_state_runlock(sk);
1651 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1652 struct msghdr *msg, size_t size,
1655 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1656 struct scm_cookie tmp_scm;
1657 struct sock *sk = sock->sk;
1658 struct unix_sock *u = unix_sk(sk);
1659 struct sockaddr_un *sunaddr=msg->msg_name;
1661 int check_creds = 0;
1667 if (sk->sk_state != TCP_ESTABLISHED)
1674 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1675 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1677 msg->msg_namelen = 0;
1679 /* Lock the socket to prevent queue disordering
1680 * while sleeps in memcpy_tomsg
1684 siocb->scm = &tmp_scm;
1685 memset(&tmp_scm, 0, sizeof(tmp_scm));
1693 struct sk_buff *skb;
1695 skb = skb_dequeue(&sk->sk_receive_queue);
1698 if (copied >= target)
1702 * POSIX 1003.1g mandates this order.
1705 if ((err = sock_error(sk)) != 0)
1707 if (sk->sk_shutdown & RCV_SHUTDOWN)
1714 timeo = unix_stream_data_wait(sk, timeo);
1716 if (signal_pending(current)) {
1717 err = sock_intr_errno(timeo);
1725 /* Never glue messages from different writers */
1726 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1727 skb_queue_head(&sk->sk_receive_queue, skb);
1731 /* Copy credentials */
1732 siocb->scm->creds = *UNIXCREDS(skb);
1736 /* Copy address just once */
1739 unix_copy_addr(msg, skb->sk);
1743 chunk = min_t(unsigned int, skb->len, size);
1744 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1745 skb_queue_head(&sk->sk_receive_queue, skb);
1753 /* Mark read part of skb as used */
1754 if (!(flags & MSG_PEEK))
1756 skb_pull(skb, chunk);
1759 unix_detach_fds(siocb->scm, skb);
1761 /* put the skb back if we didn't use it up.. */
1764 skb_queue_head(&sk->sk_receive_queue, skb);
1775 /* It is questionable, see note in unix_dgram_recvmsg.
1778 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1780 /* put message back and return */
1781 skb_queue_head(&sk->sk_receive_queue, skb);
1787 scm_recv(sock, msg, siocb->scm, flags);
1789 return copied ? : err;
1792 static int unix_shutdown(struct socket *sock, int mode)
1794 struct sock *sk = sock->sk;
1797 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1800 unix_state_wlock(sk);
1801 sk->sk_shutdown |= mode;
1802 other=unix_peer(sk);
1805 unix_state_wunlock(sk);
1806 sk->sk_state_change(sk);
1809 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1813 if (mode&RCV_SHUTDOWN)
1814 peer_mode |= SEND_SHUTDOWN;
1815 if (mode&SEND_SHUTDOWN)
1816 peer_mode |= RCV_SHUTDOWN;
1817 unix_state_wlock(other);
1818 other->sk_shutdown |= peer_mode;
1819 unix_state_wunlock(other);
1820 other->sk_state_change(other);
1821 read_lock(&other->sk_callback_lock);
1822 if (peer_mode == SHUTDOWN_MASK)
1823 sk_wake_async(other,1,POLL_HUP);
1824 else if (peer_mode & RCV_SHUTDOWN)
1825 sk_wake_async(other,1,POLL_IN);
1826 read_unlock(&other->sk_callback_lock);
1834 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1836 struct sock *sk = sock->sk;
1843 amount = atomic_read(&sk->sk_wmem_alloc);
1844 err = put_user(amount, (int __user *)arg);
1848 struct sk_buff *skb;
1849 if (sk->sk_state == TCP_LISTEN) {
1854 spin_lock(&sk->sk_receive_queue.lock);
1855 skb = skb_peek(&sk->sk_receive_queue);
1858 spin_unlock(&sk->sk_receive_queue.lock);
1859 err = put_user(amount, (int __user *)arg);
1864 err = dev_ioctl(cmd, (void __user *)arg);
1870 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1872 struct sock *sk = sock->sk;
1875 poll_wait(file, sk->sk_sleep, wait);
1878 /* exceptional events? */
1881 if (sk->sk_shutdown == SHUTDOWN_MASK)
1885 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1886 (sk->sk_shutdown & RCV_SHUTDOWN))
1887 mask |= POLLIN | POLLRDNORM;
1889 /* Connection-based need to check for termination and startup */
1890 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1894 * we set writable also when the other side has shut down the
1895 * connection. This prevents stuck sockets.
1897 if (unix_writable(sk))
1898 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1904 #ifdef CONFIG_PROC_FS
1905 static struct sock *unix_seq_idx(int *iter, loff_t pos)
1910 for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
1919 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
1921 read_lock(&unix_table_lock);
1922 return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
1925 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1930 return first_unix_socket(seq->private);
1931 return next_unix_socket(seq->private, v);
1934 static void unix_seq_stop(struct seq_file *seq, void *v)
1936 read_unlock(&unix_table_lock);
1939 static int unix_seq_show(struct seq_file *seq, void *v)
1943 seq_puts(seq, "Num RefCount Protocol Flags Type St "
1947 struct unix_sock *u = unix_sk(s);
1948 unix_state_rlock(s);
1950 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
1952 atomic_read(&s->sk_refcnt),
1954 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
1957 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
1958 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
1966 len = u->addr->len - sizeof(short);
1967 if (!UNIX_ABSTRACT(s))
1973 for ( ; i < len; i++)
1974 seq_putc(seq, u->addr->name->sun_path[i]);
1976 unix_state_runlock(s);
1977 seq_putc(seq, '\n');
1983 static struct seq_operations unix_seq_ops = {
1984 .start = unix_seq_start,
1985 .next = unix_seq_next,
1986 .stop = unix_seq_stop,
1987 .show = unix_seq_show,
1991 static int unix_seq_open(struct inode *inode, struct file *file)
1993 struct seq_file *seq;
1995 int *iter = kmalloc(sizeof(int), GFP_KERNEL);
2000 rc = seq_open(file, &unix_seq_ops);
2004 seq = file->private_data;
2005 seq->private = iter;
2014 static struct file_operations unix_seq_fops = {
2015 .owner = THIS_MODULE,
2016 .open = unix_seq_open,
2018 .llseek = seq_lseek,
2019 .release = seq_release_private,
2024 static struct net_proto_family unix_family_ops = {
2026 .create = unix_create,
2027 .owner = THIS_MODULE,
2030 #ifdef CONFIG_SYSCTL
2031 extern void unix_sysctl_register(void);
2032 extern void unix_sysctl_unregister(void);
2034 static inline void unix_sysctl_register(void) {}
2035 static inline void unix_sysctl_unregister(void) {}
2038 static int __init af_unix_init(void)
2040 struct sk_buff *dummy_skb;
2042 if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) {
2043 printk(KERN_CRIT "%s: panic\n", __FUNCTION__);
2046 /* allocate our sock slab cache */
2047 unix_sk_cachep = kmem_cache_create("unix_sock",
2048 sizeof(struct unix_sock), 0,
2049 SLAB_HWCACHE_ALIGN, NULL, NULL);
2050 if (!unix_sk_cachep)
2052 "af_unix_init: Cannot create unix_sock SLAB cache!\n");
2054 sock_register(&unix_family_ops);
2055 #ifdef CONFIG_PROC_FS
2056 proc_net_fops_create("unix", 0, &unix_seq_fops);
2058 unix_sysctl_register();
2062 static void __exit af_unix_exit(void)
2064 sock_unregister(PF_UNIX);
2065 unix_sysctl_unregister();
2066 proc_net_remove("unix");
2067 kmem_cache_destroy(unix_sk_cachep);
2070 module_init(af_unix_init);
2071 module_exit(af_unix_exit);
2073 MODULE_LICENSE("GPL");
2074 MODULE_ALIAS_NETPROTO(PF_UNIX);