2 * NET4: Implementation of BSD Unix domain sockets.
4 * Authors: Alan Cox, <alan.cox@linux.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Version: $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
14 * Linus Torvalds : Assorted bug cures.
15 * Niibe Yutaka : async I/O support.
16 * Carsten Paeth : PF_UNIX check, address fixes.
17 * Alan Cox : Limit size of allocated blocks.
18 * Alan Cox : Fixed the stupid socketpair bug.
19 * Alan Cox : BSD compatibility fine tuning.
20 * Alan Cox : Fixed a bug in connect when interrupted.
21 * Alan Cox : Sorted out a proper draft version of
22 * file descriptor passing hacked up from
24 * Marty Leisner : Fixes to fd passing
25 * Nick Nevin : recvmsg bugfix.
26 * Alan Cox : Started proper garbage collector
27 * Heiko EiBfeldt : Missing verify_area check
28 * Alan Cox : Started POSIXisms
29 * Andreas Schwab : Replace inode by dentry for proper
31 * Kirk Petersen : Made this a module
32 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
34 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
35 * by above two patches.
36 * Andrea Arcangeli : If possible we block in connect(2)
37 * if the max backlog of the listen socket
38 * is been reached. This won't break
39 * old apps and it will avoid huge amount
40 * of socks hashed (this for unix_gc()
41 * performances reasons).
42 * Security fix that limits the max
43 * number of socks to 2*max_files and
44 * the number of skb queueable in the
46 * Artur Skawina : Hash function optimizations
47 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
48 * Malcolm Beattie : Set peercred for socketpair
49 * Michal Ostrowski : Module initialization cleanup.
50 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
51 * the core infrastructure is doing that
52 * for all net proto families now (2.5.69+)
55 * Known differences from reference BSD that was tested:
58 * ECONNREFUSED is not returned from one end of a connected() socket to the
59 * other the moment one end closes.
60 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
61 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
63 * accept() returns a path name even if the connecting socket has closed
64 * in the meantime (BSD loses the path and gives up).
65 * accept() returns 0 length path for an unbound connector. BSD returns 16
66 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
68 * BSD af_unix apparently has connect forgetting to block properly.
69 * (need to check this with the POSIX spec in detail)
71 * Differences from 2.0.0-11-... (ANK)
72 * Bug fixes and improvements.
73 * - client shutdown killed server socket.
74 * - removed all useless cli/sti pairs.
76 * Semantic changes/extensions.
77 * - generic control message passing.
78 * - SCM_CREDENTIALS control message.
79 * - "Abstract" (not FS based) socket bindings.
80 * Abstract names are sequences of bytes (not zero terminated)
81 * started by 0, so that this name space does not intersect
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/sock.h>
107 #include <net/tcp_states.h>
108 #include <net/af_unix.h>
109 #include <linux/proc_fs.h>
110 #include <linux/seq_file.h>
112 #include <linux/init.h>
113 #include <linux/poll.h>
114 #include <linux/smp_lock.h>
115 #include <linux/rtnetlink.h>
116 #include <linux/mount.h>
117 #include <net/checksum.h>
118 #include <linux/security.h>
119 #include <linux/vs_context.h>
120 #include <linux/vs_network.h>
121 #include <linux/vs_limit.h>
123 int sysctl_unix_max_dgram_qlen = 10;
125 struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
126 DEFINE_SPINLOCK(unix_table_lock);
127 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
129 #define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
131 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
133 #ifdef CONFIG_SECURITY_NETWORK
134 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
136 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
139 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141 scm->secid = *UNIXSID(skb);
144 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
147 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
149 #endif /* CONFIG_SECURITY_NETWORK */
152 * SMP locking strategy:
153 * hash table is protected with spinlock unix_table_lock
154 * each socket state is protected by separate rwlock.
157 static inline unsigned unix_hash_fold(unsigned hash)
161 return hash&(UNIX_HASH_SIZE-1);
164 #define unix_peer(sk) (unix_sk(sk)->peer)
166 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
168 return unix_peer(osk) == sk;
171 static inline int unix_may_send(struct sock *sk, struct sock *osk)
173 return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
176 static struct sock *unix_peer_get(struct sock *s)
184 unix_state_runlock(s);
188 static inline void unix_release_addr(struct unix_address *addr)
190 if (atomic_dec_and_test(&addr->refcnt))
195 * Check unix socket name:
196 * - should be not zero length.
197 * - if started by not zero, should be NULL terminated (FS object)
198 * - if started by zero, it is abstract name.
201 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
203 if (len <= sizeof(short) || len > sizeof(*sunaddr))
205 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
207 if (sunaddr->sun_path[0]) {
209 * This may look like an off by one error but it is a bit more
210 * subtle. 108 is the longest valid AF_UNIX path for a binding.
211 * sun_path[108] doesnt as such exist. However in kernel space
212 * we are guaranteed that it is a valid memory location in our
213 * kernel address buffer.
215 ((char *)sunaddr)[len]=0;
216 len = strlen(sunaddr->sun_path)+1+sizeof(short);
220 *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
224 static void __unix_remove_socket(struct sock *sk)
226 sk_del_node_init(sk);
229 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
231 BUG_TRAP(sk_unhashed(sk));
232 sk_add_node(sk, list);
235 static inline void unix_remove_socket(struct sock *sk)
237 spin_lock(&unix_table_lock);
238 __unix_remove_socket(sk);
239 spin_unlock(&unix_table_lock);
242 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
244 spin_lock(&unix_table_lock);
245 __unix_insert_socket(list, sk);
246 spin_unlock(&unix_table_lock);
249 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
253 struct hlist_node *node;
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
258 if (!vx_check(s->sk_xid, VX_IDENT|VX_WATCH))
260 if (u->addr->len == len &&
261 !memcmp(u->addr->name, sunname, len))
269 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
275 spin_lock(&unix_table_lock);
276 s = __unix_find_socket_byname(sunname, len, type, hash);
279 spin_unlock(&unix_table_lock);
283 static struct sock *unix_find_socket_byinode(struct inode *i)
286 struct hlist_node *node;
288 spin_lock(&unix_table_lock);
290 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
291 struct dentry *dentry = unix_sk(s)->dentry;
293 if(dentry && dentry->d_inode == i)
301 spin_unlock(&unix_table_lock);
305 static inline int unix_writable(struct sock *sk)
307 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
310 static void unix_write_space(struct sock *sk)
312 read_lock(&sk->sk_callback_lock);
313 if (unix_writable(sk)) {
314 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
315 wake_up_interruptible(sk->sk_sleep);
316 sk_wake_async(sk, 2, POLL_OUT);
318 read_unlock(&sk->sk_callback_lock);
321 /* When dgram socket disconnects (or changes its peer), we clear its receive
322 * queue of packets arrived from previous peer. First, it allows to do
323 * flow control based only on wmem_alloc; second, sk connected to peer
324 * may receive messages only from that peer. */
325 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
327 if (!skb_queue_empty(&sk->sk_receive_queue)) {
328 skb_queue_purge(&sk->sk_receive_queue);
329 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
331 /* If one link of bidirectional dgram pipe is disconnected,
332 * we signal error. Messages are lost. Do not make this,
333 * when peer was not connected to us.
335 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
336 other->sk_err = ECONNRESET;
337 other->sk_error_report(other);
342 static void unix_sock_destructor(struct sock *sk)
344 struct unix_sock *u = unix_sk(sk);
346 skb_queue_purge(&sk->sk_receive_queue);
348 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
349 BUG_TRAP(sk_unhashed(sk));
350 BUG_TRAP(!sk->sk_socket);
351 if (!sock_flag(sk, SOCK_DEAD)) {
352 printk("Attempt to release alive unix socket: %p\n", sk);
357 unix_release_addr(u->addr);
359 atomic_dec(&unix_nr_socks);
360 #ifdef UNIX_REFCNT_DEBUG
361 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
365 static int unix_release_sock (struct sock *sk, int embrion)
367 struct unix_sock *u = unix_sk(sk);
368 struct dentry *dentry;
369 struct vfsmount *mnt;
374 unix_remove_socket(sk);
377 unix_state_wlock(sk);
379 sk->sk_shutdown = SHUTDOWN_MASK;
384 state = sk->sk_state;
385 sk->sk_state = TCP_CLOSE;
386 unix_state_wunlock(sk);
388 wake_up_interruptible_all(&u->peer_wait);
390 skpair=unix_peer(sk);
393 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
394 unix_state_wlock(skpair);
396 skpair->sk_shutdown = SHUTDOWN_MASK;
397 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
398 skpair->sk_err = ECONNRESET;
399 unix_state_wunlock(skpair);
400 skpair->sk_state_change(skpair);
401 read_lock(&skpair->sk_callback_lock);
402 sk_wake_async(skpair,1,POLL_HUP);
403 read_unlock(&skpair->sk_callback_lock);
405 sock_put(skpair); /* It may now die */
406 unix_peer(sk) = NULL;
409 /* Try to flush out this socket. Throw out buffers at least */
411 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
412 if (state==TCP_LISTEN)
413 unix_release_sock(skb->sk, 1);
414 /* passed fds are erased in the kfree_skb hook */
425 /* ---- Socket is dead now and most probably destroyed ---- */
428 * Fixme: BSD difference: In BSD all sockets connected to use get
429 * ECONNRESET and we die on the spot. In Linux we behave
430 * like files and pipes do and wait for the last
433 * Can't we simply set sock->err?
435 * What the above comment does talk about? --ANK(980817)
438 if (atomic_read(&unix_tot_inflight))
439 unix_gc(); /* Garbage collect fds */
444 static int unix_listen(struct socket *sock, int backlog)
447 struct sock *sk = sock->sk;
448 struct unix_sock *u = unix_sk(sk);
451 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
452 goto out; /* Only stream/seqpacket sockets accept */
455 goto out; /* No listens on an unbound socket */
456 unix_state_wlock(sk);
457 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
459 if (backlog > sk->sk_max_ack_backlog)
460 wake_up_interruptible_all(&u->peer_wait);
461 sk->sk_max_ack_backlog = backlog;
462 sk->sk_state = TCP_LISTEN;
463 /* set credentials so connect can copy them */
464 sk->sk_peercred.pid = current->tgid;
465 sk->sk_peercred.uid = current->euid;
466 sk->sk_peercred.gid = current->egid;
470 unix_state_wunlock(sk);
475 static int unix_release(struct socket *);
476 static int unix_bind(struct socket *, struct sockaddr *, int);
477 static int unix_stream_connect(struct socket *, struct sockaddr *,
478 int addr_len, int flags);
479 static int unix_socketpair(struct socket *, struct socket *);
480 static int unix_accept(struct socket *, struct socket *, int);
481 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
482 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
483 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
484 static int unix_shutdown(struct socket *, int);
485 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
486 struct msghdr *, size_t);
487 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
488 struct msghdr *, size_t, int);
489 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
490 struct msghdr *, size_t);
491 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
492 struct msghdr *, size_t, int);
493 static int unix_dgram_connect(struct socket *, struct sockaddr *,
495 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
498 static const struct proto_ops unix_stream_ops = {
500 .owner = THIS_MODULE,
501 .release = unix_release,
503 .connect = unix_stream_connect,
504 .socketpair = unix_socketpair,
505 .accept = unix_accept,
506 .getname = unix_getname,
509 .listen = unix_listen,
510 .shutdown = unix_shutdown,
511 .setsockopt = sock_no_setsockopt,
512 .getsockopt = sock_no_getsockopt,
513 .sendmsg = unix_stream_sendmsg,
514 .recvmsg = unix_stream_recvmsg,
515 .mmap = sock_no_mmap,
516 .sendpage = sock_no_sendpage,
519 static const struct proto_ops unix_dgram_ops = {
521 .owner = THIS_MODULE,
522 .release = unix_release,
524 .connect = unix_dgram_connect,
525 .socketpair = unix_socketpair,
526 .accept = sock_no_accept,
527 .getname = unix_getname,
528 .poll = datagram_poll,
530 .listen = sock_no_listen,
531 .shutdown = unix_shutdown,
532 .setsockopt = sock_no_setsockopt,
533 .getsockopt = sock_no_getsockopt,
534 .sendmsg = unix_dgram_sendmsg,
535 .recvmsg = unix_dgram_recvmsg,
536 .mmap = sock_no_mmap,
537 .sendpage = sock_no_sendpage,
540 static const struct proto_ops unix_seqpacket_ops = {
542 .owner = THIS_MODULE,
543 .release = unix_release,
545 .connect = unix_stream_connect,
546 .socketpair = unix_socketpair,
547 .accept = unix_accept,
548 .getname = unix_getname,
549 .poll = datagram_poll,
551 .listen = unix_listen,
552 .shutdown = unix_shutdown,
553 .setsockopt = sock_no_setsockopt,
554 .getsockopt = sock_no_getsockopt,
555 .sendmsg = unix_seqpacket_sendmsg,
556 .recvmsg = unix_dgram_recvmsg,
557 .mmap = sock_no_mmap,
558 .sendpage = sock_no_sendpage,
561 static struct proto unix_proto = {
563 .owner = THIS_MODULE,
564 .obj_size = sizeof(struct unix_sock),
568 * AF_UNIX sockets do not interact with hardware, hence they
569 * dont trigger interrupts - so it's safe for them to have
570 * bh-unsafe locking for their sk_receive_queue.lock. Split off
571 * this special lock-class by reinitializing the spinlock key:
573 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
575 static struct sock * unix_create1(struct socket *sock)
577 struct sock *sk = NULL;
580 if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
583 sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
587 atomic_inc(&unix_nr_socks);
589 sock_init_data(sock,sk);
590 lockdep_set_class(&sk->sk_receive_queue.lock,
591 &af_unix_sk_receive_queue_lock_key);
593 sk->sk_write_space = unix_write_space;
594 sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen;
595 sk->sk_destruct = unix_sock_destructor;
599 spin_lock_init(&u->lock);
600 atomic_set(&u->inflight, sock ? 0 : -1);
601 mutex_init(&u->readlock); /* single task reading lock */
602 init_waitqueue_head(&u->peer_wait);
603 unix_insert_socket(unix_sockets_unbound, sk);
608 static int unix_create(struct socket *sock, int protocol)
610 if (protocol && protocol != PF_UNIX)
611 return -EPROTONOSUPPORT;
613 sock->state = SS_UNCONNECTED;
615 switch (sock->type) {
617 sock->ops = &unix_stream_ops;
620 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
624 sock->type=SOCK_DGRAM;
626 sock->ops = &unix_dgram_ops;
629 sock->ops = &unix_seqpacket_ops;
632 return -ESOCKTNOSUPPORT;
635 return unix_create1(sock) ? 0 : -ENOMEM;
638 static int unix_release(struct socket *sock)
640 struct sock *sk = sock->sk;
647 return unix_release_sock (sk, 0);
650 static int unix_autobind(struct socket *sock)
652 struct sock *sk = sock->sk;
653 struct unix_sock *u = unix_sk(sk);
654 static u32 ordernum = 1;
655 struct unix_address * addr;
658 mutex_lock(&u->readlock);
665 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
669 addr->name->sun_family = AF_UNIX;
670 atomic_set(&addr->refcnt, 1);
673 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
674 addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
676 spin_lock(&unix_table_lock);
677 ordernum = (ordernum+1)&0xFFFFF;
679 if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
681 spin_unlock(&unix_table_lock);
682 /* Sanity yield. It is unusual case, but yet... */
683 if (!(ordernum&0xFF))
687 addr->hash ^= sk->sk_type;
689 __unix_remove_socket(sk);
691 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
692 spin_unlock(&unix_table_lock);
695 out: mutex_unlock(&u->readlock);
699 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
700 int type, unsigned hash, int *error)
706 if (sunname->sun_path[0]) {
707 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
710 err = vfs_permission(&nd, MAY_WRITE);
715 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
717 u=unix_find_socket_byinode(nd.dentry->d_inode);
721 if (u->sk_type == type)
722 touch_atime(nd.mnt, nd.dentry);
727 if (u->sk_type != type) {
733 u=unix_find_socket_byname(sunname, len, type, hash);
735 struct dentry *dentry;
736 dentry = unix_sk(u)->dentry;
738 touch_atime(unix_sk(u)->mnt, dentry);
752 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
754 struct sock *sk = sock->sk;
755 struct unix_sock *u = unix_sk(sk);
756 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
757 struct dentry * dentry = NULL;
761 struct unix_address *addr;
762 struct hlist_head *list;
765 if (sunaddr->sun_family != AF_UNIX)
768 if (addr_len==sizeof(short)) {
769 err = unix_autobind(sock);
773 err = unix_mkname(sunaddr, addr_len, &hash);
778 mutex_lock(&u->readlock);
785 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
789 memcpy(addr->name, sunaddr, addr_len);
790 addr->len = addr_len;
791 addr->hash = hash ^ sk->sk_type;
792 atomic_set(&addr->refcnt, 1);
794 if (sunaddr->sun_path[0]) {
798 * Get the parent directory, calculate the hash for last
801 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
803 goto out_mknod_parent;
805 dentry = lookup_create(&nd, 0);
806 err = PTR_ERR(dentry);
808 goto out_mknod_unlock;
811 * All right, let's create it.
814 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
815 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0, NULL);
818 mutex_unlock(&nd.dentry->d_inode->i_mutex);
822 addr->hash = UNIX_HASH_SIZE;
825 spin_lock(&unix_table_lock);
827 if (!sunaddr->sun_path[0]) {
829 if (__unix_find_socket_byname(sunaddr, addr_len,
830 sk->sk_type, hash)) {
831 unix_release_addr(addr);
835 list = &unix_socket_table[addr->hash];
837 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
838 u->dentry = nd.dentry;
843 __unix_remove_socket(sk);
845 __unix_insert_socket(list, sk);
848 spin_unlock(&unix_table_lock);
850 mutex_unlock(&u->readlock);
857 mutex_unlock(&nd.dentry->d_inode->i_mutex);
862 unix_release_addr(addr);
866 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
869 struct sock *sk = sock->sk;
870 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
875 if (addr->sa_family != AF_UNSPEC) {
876 err = unix_mkname(sunaddr, alen, &hash);
881 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
882 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
885 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
889 unix_state_wlock(sk);
892 if (!unix_may_send(sk, other))
895 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
901 * 1003.1g breaking connected state with AF_UNSPEC
904 unix_state_wlock(sk);
908 * If it was connected, reconnect.
911 struct sock *old_peer = unix_peer(sk);
913 unix_state_wunlock(sk);
915 if (other != old_peer)
916 unix_dgram_disconnected(sk, old_peer);
920 unix_state_wunlock(sk);
925 unix_state_wunlock(sk);
931 static long unix_wait_for_peer(struct sock *other, long timeo)
933 struct unix_sock *u = unix_sk(other);
937 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
939 sched = !sock_flag(other, SOCK_DEAD) &&
940 !(other->sk_shutdown & RCV_SHUTDOWN) &&
941 (skb_queue_len(&other->sk_receive_queue) >
942 other->sk_max_ack_backlog);
944 unix_state_runlock(other);
947 timeo = schedule_timeout(timeo);
949 finish_wait(&u->peer_wait, &wait);
953 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
954 int addr_len, int flags)
956 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
957 struct sock *sk = sock->sk;
958 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
959 struct sock *newsk = NULL;
960 struct sock *other = NULL;
961 struct sk_buff *skb = NULL;
967 err = unix_mkname(sunaddr, addr_len, &hash);
972 if (test_bit(SOCK_PASSCRED, &sock->flags)
973 && !u->addr && (err = unix_autobind(sock)) != 0)
976 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
978 /* First of all allocate resources.
979 If we will make it after state is locked,
980 we will have to recheck all again in any case.
985 /* create new sock for complete connection */
986 newsk = unix_create1(NULL);
990 /* Allocate skb for sending to listening sock */
991 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
996 /* Find listening sock. */
997 other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
1001 /* Latch state of peer */
1002 unix_state_rlock(other);
1004 /* Apparently VFS overslept socket death. Retry. */
1005 if (sock_flag(other, SOCK_DEAD)) {
1006 unix_state_runlock(other);
1011 err = -ECONNREFUSED;
1012 if (other->sk_state != TCP_LISTEN)
1015 if (skb_queue_len(&other->sk_receive_queue) >
1016 other->sk_max_ack_backlog) {
1021 timeo = unix_wait_for_peer(other, timeo);
1023 err = sock_intr_errno(timeo);
1024 if (signal_pending(current))
1032 It is tricky place. We need to grab write lock and cannot
1033 drop lock on peer. It is dangerous because deadlock is
1034 possible. Connect to self case and simultaneous
1035 attempt to connect are eliminated by checking socket
1036 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1037 check this before attempt to grab lock.
1039 Well, and we have to recheck the state after socket locked.
1045 /* This is ok... continue with connect */
1047 case TCP_ESTABLISHED:
1048 /* Socket is already connected */
1056 unix_state_wlock_nested(sk);
1058 if (sk->sk_state != st) {
1059 unix_state_wunlock(sk);
1060 unix_state_runlock(other);
1065 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1067 unix_state_wunlock(sk);
1071 /* The way is open! Fastly set all the necessary fields... */
1074 unix_peer(newsk) = sk;
1075 newsk->sk_state = TCP_ESTABLISHED;
1076 newsk->sk_type = sk->sk_type;
1077 newsk->sk_peercred.pid = current->tgid;
1078 newsk->sk_peercred.uid = current->euid;
1079 newsk->sk_peercred.gid = current->egid;
1080 newu = unix_sk(newsk);
1081 newsk->sk_sleep = &newu->peer_wait;
1082 otheru = unix_sk(other);
1084 /* copy address information from listening to new sock*/
1086 atomic_inc(&otheru->addr->refcnt);
1087 newu->addr = otheru->addr;
1089 if (otheru->dentry) {
1090 newu->dentry = dget(otheru->dentry);
1091 newu->mnt = mntget(otheru->mnt);
1094 /* Set credentials */
1095 sk->sk_peercred = other->sk_peercred;
1097 sock->state = SS_CONNECTED;
1098 sk->sk_state = TCP_ESTABLISHED;
1101 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1102 unix_peer(sk) = newsk;
1104 unix_state_wunlock(sk);
1106 /* take ten and and send info to listening sock */
1107 spin_lock(&other->sk_receive_queue.lock);
1108 __skb_queue_tail(&other->sk_receive_queue, skb);
1109 /* Undo artificially decreased inflight after embrion
1110 * is installed to listening socket. */
1111 atomic_inc(&newu->inflight);
1112 spin_unlock(&other->sk_receive_queue.lock);
1113 unix_state_runlock(other);
1114 other->sk_data_ready(other, 0);
1120 unix_state_runlock(other);
1126 unix_release_sock(newsk, 0);
1132 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1134 struct sock *ska=socka->sk, *skb = sockb->sk;
1136 /* Join our sockets back to back */
1141 ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1142 ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1143 ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1145 if (ska->sk_type != SOCK_DGRAM) {
1146 ska->sk_state = TCP_ESTABLISHED;
1147 skb->sk_state = TCP_ESTABLISHED;
1148 socka->state = SS_CONNECTED;
1149 sockb->state = SS_CONNECTED;
1154 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1156 struct sock *sk = sock->sk;
1158 struct sk_buff *skb;
1162 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1166 if (sk->sk_state != TCP_LISTEN)
1169 /* If socket state is TCP_LISTEN it cannot change (for now...),
1170 * so that no locks are necessary.
1173 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1175 /* This means receive shutdown. */
1182 skb_free_datagram(sk, skb);
1183 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1185 /* attach accepted sock to socket */
1186 unix_state_wlock(tsk);
1187 newsock->state = SS_CONNECTED;
1188 sock_graft(tsk, newsock);
1189 unix_state_wunlock(tsk);
1197 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1199 struct sock *sk = sock->sk;
1200 struct unix_sock *u;
1201 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1205 sk = unix_peer_get(sk);
1216 unix_state_rlock(sk);
1218 sunaddr->sun_family = AF_UNIX;
1219 sunaddr->sun_path[0] = 0;
1220 *uaddr_len = sizeof(short);
1222 struct unix_address *addr = u->addr;
1224 *uaddr_len = addr->len;
1225 memcpy(sunaddr, addr->name, *uaddr_len);
1227 unix_state_runlock(sk);
1233 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1237 scm->fp = UNIXCB(skb).fp;
1238 skb->destructor = sock_wfree;
1239 UNIXCB(skb).fp = NULL;
1241 for (i=scm->fp->count-1; i>=0; i--)
1242 unix_notinflight(scm->fp->fp[i]);
1245 static void unix_destruct_fds(struct sk_buff *skb)
1247 struct scm_cookie scm;
1248 memset(&scm, 0, sizeof(scm));
1249 unix_detach_fds(&scm, skb);
1251 /* Alas, it calls VFS */
1252 /* So fscking what? fput() had been SMP-safe since the last Summer */
1257 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1260 for (i=scm->fp->count-1; i>=0; i--)
1261 unix_inflight(scm->fp->fp[i]);
1262 UNIXCB(skb).fp = scm->fp;
1263 skb->destructor = unix_destruct_fds;
1268 * Send AF_UNIX data.
1271 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1272 struct msghdr *msg, size_t len)
1274 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1275 struct sock *sk = sock->sk;
1276 struct unix_sock *u = unix_sk(sk);
1277 struct sockaddr_un *sunaddr=msg->msg_name;
1278 struct sock *other = NULL;
1279 int namelen = 0; /* fake GCC */
1282 struct sk_buff *skb;
1284 struct scm_cookie tmp_scm;
1286 if (NULL == siocb->scm)
1287 siocb->scm = &tmp_scm;
1288 err = scm_send(sock, msg, siocb->scm);
1293 if (msg->msg_flags&MSG_OOB)
1296 if (msg->msg_namelen) {
1297 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1304 other = unix_peer_get(sk);
1309 if (test_bit(SOCK_PASSCRED, &sock->flags)
1310 && !u->addr && (err = unix_autobind(sock)) != 0)
1314 if (len > sk->sk_sndbuf - 32)
1317 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1321 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1323 unix_attach_fds(siocb->scm, skb);
1324 unix_get_secdata(siocb->scm, skb);
1326 skb->h.raw = skb->data;
1327 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1331 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1336 if (sunaddr == NULL)
1339 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1345 unix_state_rlock(other);
1347 if (!unix_may_send(sk, other))
1350 if (sock_flag(other, SOCK_DEAD)) {
1352 * Check with 1003.1g - what should
1355 unix_state_runlock(other);
1359 unix_state_wlock(sk);
1360 if (unix_peer(sk) == other) {
1362 unix_state_wunlock(sk);
1364 unix_dgram_disconnected(sk, other);
1366 err = -ECONNREFUSED;
1368 unix_state_wunlock(sk);
1378 if (other->sk_shutdown & RCV_SHUTDOWN)
1381 if (sk->sk_type != SOCK_SEQPACKET) {
1382 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1387 if (unix_peer(other) != sk &&
1388 (skb_queue_len(&other->sk_receive_queue) >
1389 other->sk_max_ack_backlog)) {
1395 timeo = unix_wait_for_peer(other, timeo);
1397 err = sock_intr_errno(timeo);
1398 if (signal_pending(current))
1404 skb_queue_tail(&other->sk_receive_queue, skb);
1405 unix_state_runlock(other);
1406 other->sk_data_ready(other, len);
1408 scm_destroy(siocb->scm);
1412 unix_state_runlock(other);
1418 scm_destroy(siocb->scm);
1423 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1424 struct msghdr *msg, size_t len)
1426 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1427 struct sock *sk = sock->sk;
1428 struct sock *other = NULL;
1429 struct sockaddr_un *sunaddr=msg->msg_name;
1431 struct sk_buff *skb;
1433 struct scm_cookie tmp_scm;
1435 if (NULL == siocb->scm)
1436 siocb->scm = &tmp_scm;
1437 err = scm_send(sock, msg, siocb->scm);
1442 if (msg->msg_flags&MSG_OOB)
1445 if (msg->msg_namelen) {
1446 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1451 other = unix_peer(sk);
1456 if (sk->sk_shutdown & SEND_SHUTDOWN)
1462 * Optimisation for the fact that under 0.01% of X
1463 * messages typically need breaking up.
1468 /* Keep two messages in the pipe so it schedules better */
1469 if (size > ((sk->sk_sndbuf >> 1) - 64))
1470 size = (sk->sk_sndbuf >> 1) - 64;
1472 if (size > SKB_MAX_ALLOC)
1473 size = SKB_MAX_ALLOC;
1479 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1485 * If you pass two values to the sock_alloc_send_skb
1486 * it tries to grab the large buffer with GFP_NOFS
1487 * (which can fail easily), and if it fails grab the
1488 * fallback size buffer which is under a page and will
1491 size = min_t(int, size, skb_tailroom(skb));
1493 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1495 unix_attach_fds(siocb->scm, skb);
1497 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1502 unix_state_rlock(other);
1504 if (sock_flag(other, SOCK_DEAD) ||
1505 (other->sk_shutdown & RCV_SHUTDOWN))
1508 skb_queue_tail(&other->sk_receive_queue, skb);
1509 unix_state_runlock(other);
1510 other->sk_data_ready(other, size);
1514 scm_destroy(siocb->scm);
1520 unix_state_runlock(other);
1523 if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1524 send_sig(SIGPIPE,current,0);
1527 scm_destroy(siocb->scm);
1529 return sent ? : err;
1532 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1533 struct msghdr *msg, size_t len)
1536 struct sock *sk = sock->sk;
1538 err = sock_error(sk);
1542 if (sk->sk_state != TCP_ESTABLISHED)
1545 if (msg->msg_namelen)
1546 msg->msg_namelen = 0;
1548 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1551 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1553 struct unix_sock *u = unix_sk(sk);
1555 msg->msg_namelen = 0;
1557 msg->msg_namelen = u->addr->len;
1558 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1562 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1563 struct msghdr *msg, size_t size,
1566 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1567 struct scm_cookie tmp_scm;
1568 struct sock *sk = sock->sk;
1569 struct unix_sock *u = unix_sk(sk);
1570 int noblock = flags & MSG_DONTWAIT;
1571 struct sk_buff *skb;
1578 msg->msg_namelen = 0;
1580 mutex_lock(&u->readlock);
1582 skb = skb_recv_datagram(sk, flags, noblock, &err);
1586 wake_up_interruptible(&u->peer_wait);
1589 unix_copy_addr(msg, skb->sk);
1591 if (size > skb->len)
1593 else if (size < skb->len)
1594 msg->msg_flags |= MSG_TRUNC;
1596 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1601 siocb->scm = &tmp_scm;
1602 memset(&tmp_scm, 0, sizeof(tmp_scm));
1604 siocb->scm->creds = *UNIXCREDS(skb);
1605 unix_set_secdata(siocb->scm, skb);
1607 if (!(flags & MSG_PEEK))
1610 unix_detach_fds(siocb->scm, skb);
1614 /* It is questionable: on PEEK we could:
1615 - do not return fds - good, but too simple 8)
1616 - return fds, and do not return them on read (old strategy,
1618 - clone fds (I chose it for now, it is the most universal
1621 POSIX 1003.1g does not actually define this clearly
1622 at all. POSIX 1003.1g doesn't define a lot of things
1627 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1631 scm_recv(sock, msg, siocb->scm, flags);
1634 skb_free_datagram(sk,skb);
1636 mutex_unlock(&u->readlock);
1642 * Sleep until data has arrive. But check for races..
1645 static long unix_stream_data_wait(struct sock * sk, long timeo)
1649 unix_state_rlock(sk);
1652 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1654 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1656 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1657 signal_pending(current) ||
1661 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1662 unix_state_runlock(sk);
1663 timeo = schedule_timeout(timeo);
1664 unix_state_rlock(sk);
1665 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1668 finish_wait(sk->sk_sleep, &wait);
1669 unix_state_runlock(sk);
1675 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1676 struct msghdr *msg, size_t size,
1679 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1680 struct scm_cookie tmp_scm;
1681 struct sock *sk = sock->sk;
1682 struct unix_sock *u = unix_sk(sk);
1683 struct sockaddr_un *sunaddr=msg->msg_name;
1685 int check_creds = 0;
1691 if (sk->sk_state != TCP_ESTABLISHED)
1698 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1699 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1701 msg->msg_namelen = 0;
1703 /* Lock the socket to prevent queue disordering
1704 * while sleeps in memcpy_tomsg
1708 siocb->scm = &tmp_scm;
1709 memset(&tmp_scm, 0, sizeof(tmp_scm));
1712 mutex_lock(&u->readlock);
1717 struct sk_buff *skb;
1719 skb = skb_dequeue(&sk->sk_receive_queue);
1722 if (copied >= target)
1726 * POSIX 1003.1g mandates this order.
1729 if ((err = sock_error(sk)) != 0)
1731 if (sk->sk_shutdown & RCV_SHUTDOWN)
1736 mutex_unlock(&u->readlock);
1738 timeo = unix_stream_data_wait(sk, timeo);
1740 if (signal_pending(current)) {
1741 err = sock_intr_errno(timeo);
1744 mutex_lock(&u->readlock);
1749 /* Never glue messages from different writers */
1750 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1751 skb_queue_head(&sk->sk_receive_queue, skb);
1755 /* Copy credentials */
1756 siocb->scm->creds = *UNIXCREDS(skb);
1760 /* Copy address just once */
1763 unix_copy_addr(msg, skb->sk);
1767 chunk = min_t(unsigned int, skb->len, size);
1768 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1769 skb_queue_head(&sk->sk_receive_queue, skb);
1777 /* Mark read part of skb as used */
1778 if (!(flags & MSG_PEEK))
1780 skb_pull(skb, chunk);
1783 unix_detach_fds(siocb->scm, skb);
1785 /* put the skb back if we didn't use it up.. */
1788 skb_queue_head(&sk->sk_receive_queue, skb);
1799 /* It is questionable, see note in unix_dgram_recvmsg.
1802 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1804 /* put message back and return */
1805 skb_queue_head(&sk->sk_receive_queue, skb);
1810 mutex_unlock(&u->readlock);
1811 scm_recv(sock, msg, siocb->scm, flags);
1813 return copied ? : err;
1816 static int unix_shutdown(struct socket *sock, int mode)
1818 struct sock *sk = sock->sk;
1821 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1824 unix_state_wlock(sk);
1825 sk->sk_shutdown |= mode;
1826 other=unix_peer(sk);
1829 unix_state_wunlock(sk);
1830 sk->sk_state_change(sk);
1833 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1837 if (mode&RCV_SHUTDOWN)
1838 peer_mode |= SEND_SHUTDOWN;
1839 if (mode&SEND_SHUTDOWN)
1840 peer_mode |= RCV_SHUTDOWN;
1841 unix_state_wlock(other);
1842 other->sk_shutdown |= peer_mode;
1843 unix_state_wunlock(other);
1844 other->sk_state_change(other);
1845 read_lock(&other->sk_callback_lock);
1846 if (peer_mode == SHUTDOWN_MASK)
1847 sk_wake_async(other,1,POLL_HUP);
1848 else if (peer_mode & RCV_SHUTDOWN)
1849 sk_wake_async(other,1,POLL_IN);
1850 read_unlock(&other->sk_callback_lock);
1858 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1860 struct sock *sk = sock->sk;
1867 amount = atomic_read(&sk->sk_wmem_alloc);
1868 err = put_user(amount, (int __user *)arg);
1872 struct sk_buff *skb;
1874 if (sk->sk_state == TCP_LISTEN) {
1879 spin_lock(&sk->sk_receive_queue.lock);
1880 if (sk->sk_type == SOCK_STREAM ||
1881 sk->sk_type == SOCK_SEQPACKET) {
1882 skb_queue_walk(&sk->sk_receive_queue, skb)
1885 skb = skb_peek(&sk->sk_receive_queue);
1889 spin_unlock(&sk->sk_receive_queue.lock);
1890 err = put_user(amount, (int __user *)arg);
1901 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1903 struct sock *sk = sock->sk;
1906 poll_wait(file, sk->sk_sleep, wait);
1909 /* exceptional events? */
1912 if (sk->sk_shutdown == SHUTDOWN_MASK)
1914 if (sk->sk_shutdown & RCV_SHUTDOWN)
1918 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1919 (sk->sk_shutdown & RCV_SHUTDOWN))
1920 mask |= POLLIN | POLLRDNORM;
1922 /* Connection-based need to check for termination and startup */
1923 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1927 * we set writable also when the other side has shut down the
1928 * connection. This prevents stuck sockets.
1930 if (unix_writable(sk))
1931 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1937 #ifdef CONFIG_PROC_FS
1938 static struct sock *unix_seq_idx(int *iter, loff_t pos)
1943 for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
1952 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
1954 spin_lock(&unix_table_lock);
1955 return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
1958 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1963 return first_unix_socket(seq->private);
1964 return next_unix_socket(seq->private, v);
1967 static void unix_seq_stop(struct seq_file *seq, void *v)
1969 spin_unlock(&unix_table_lock);
1972 static int unix_seq_show(struct seq_file *seq, void *v)
1976 seq_puts(seq, "Num RefCount Protocol Flags Type St "
1980 struct unix_sock *u = unix_sk(s);
1981 unix_state_rlock(s);
1983 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
1985 atomic_read(&s->sk_refcnt),
1987 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
1990 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
1991 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
1999 len = u->addr->len - sizeof(short);
2000 if (!UNIX_ABSTRACT(s))
2006 for ( ; i < len; i++)
2007 seq_putc(seq, u->addr->name->sun_path[i]);
2009 unix_state_runlock(s);
2010 seq_putc(seq, '\n');
2016 static struct seq_operations unix_seq_ops = {
2017 .start = unix_seq_start,
2018 .next = unix_seq_next,
2019 .stop = unix_seq_stop,
2020 .show = unix_seq_show,
2024 static int unix_seq_open(struct inode *inode, struct file *file)
2026 struct seq_file *seq;
2028 int *iter = kmalloc(sizeof(int), GFP_KERNEL);
2033 rc = seq_open(file, &unix_seq_ops);
2037 seq = file->private_data;
2038 seq->private = iter;
2047 static struct file_operations unix_seq_fops = {
2048 .owner = THIS_MODULE,
2049 .open = unix_seq_open,
2051 .llseek = seq_lseek,
2052 .release = seq_release_private,
2057 static struct net_proto_family unix_family_ops = {
2059 .create = unix_create,
2060 .owner = THIS_MODULE,
2063 static int __init af_unix_init(void)
2066 struct sk_buff *dummy_skb;
2068 if (sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb)) {
2069 printk(KERN_CRIT "%s: panic\n", __FUNCTION__);
2073 rc = proto_register(&unix_proto, 1);
2075 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2080 sock_register(&unix_family_ops);
2081 #ifdef CONFIG_PROC_FS
2082 proc_net_fops_create("unix", 0, &unix_seq_fops);
2084 unix_sysctl_register();
2089 static void __exit af_unix_exit(void)
2091 sock_unregister(PF_UNIX);
2092 unix_sysctl_unregister();
2093 proc_net_remove("unix");
2094 proto_unregister(&unix_proto);
2097 module_init(af_unix_init);
2098 module_exit(af_unix_exit);
2100 MODULE_LICENSE("GPL");
2101 MODULE_ALIAS_NETPROTO(PF_UNIX);