2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/major.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/socket.h>
32 #include <linux/fcntl.h>
33 #include <linux/termios.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/smp_lock.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
52 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
53 #define NL_EMULATE_DEV
63 int (*handler)(int unit, struct sk_buff *skb);
64 wait_queue_head_t wait;
65 struct netlink_callback *cb;
67 void (*data_ready)(struct sock *sk, int bytes);
70 #define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
72 static struct hlist_head nl_table[MAX_LINKS];
73 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
74 static unsigned nl_nonroot[MAX_LINKS];
77 static struct socket *netlink_kernel[MAX_LINKS];
80 static int netlink_dump(struct sock *sk);
81 static void netlink_destroy_callback(struct netlink_callback *cb);
83 atomic_t netlink_sock_nr;
85 static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
86 static atomic_t nl_table_users = ATOMIC_INIT(0);
88 static struct notifier_block *netlink_chain;
90 static void netlink_sock_destruct(struct sock *sk)
92 skb_queue_purge(&sk->sk_receive_queue);
94 if (!sock_flag(sk, SOCK_DEAD)) {
95 printk("Freeing alive netlink socket %p\n", sk);
98 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
99 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
100 BUG_TRAP(!nlk_sk(sk)->cb);
104 atomic_dec(&netlink_sock_nr);
105 #ifdef NETLINK_REFCNT_DEBUG
106 printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
110 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
111 * Look, when several writers sleep and reader wakes them up, all but one
112 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
113 * this, _but_ remember, it adds useless work on UP machines.
116 static void netlink_table_grab(void)
118 write_lock_bh(&nl_table_lock);
120 if (atomic_read(&nl_table_users)) {
121 DECLARE_WAITQUEUE(wait, current);
123 add_wait_queue_exclusive(&nl_table_wait, &wait);
125 set_current_state(TASK_UNINTERRUPTIBLE);
126 if (atomic_read(&nl_table_users) == 0)
128 write_unlock_bh(&nl_table_lock);
130 write_lock_bh(&nl_table_lock);
133 __set_current_state(TASK_RUNNING);
134 remove_wait_queue(&nl_table_wait, &wait);
138 static __inline__ void netlink_table_ungrab(void)
140 write_unlock_bh(&nl_table_lock);
141 wake_up(&nl_table_wait);
144 static __inline__ void
145 netlink_lock_table(void)
147 /* read_lock() synchronizes us to netlink_table_grab */
149 read_lock(&nl_table_lock);
150 atomic_inc(&nl_table_users);
151 read_unlock(&nl_table_lock);
154 static __inline__ void
155 netlink_unlock_table(void)
157 if (atomic_dec_and_test(&nl_table_users))
158 wake_up(&nl_table_wait);
161 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
164 struct hlist_node *node;
166 read_lock(&nl_table_lock);
167 sk_for_each(sk, node, &nl_table[protocol]) {
168 if (nlk_sk(sk)->pid == pid) {
175 read_unlock(&nl_table_lock);
179 extern struct proto_ops netlink_ops;
181 static int netlink_insert(struct sock *sk, u32 pid)
183 int err = -EADDRINUSE;
185 struct hlist_node *node;
187 netlink_table_grab();
188 sk_for_each(osk, node, &nl_table[sk->sk_protocol]) {
189 if (nlk_sk(osk)->pid == pid)
194 if (nlk_sk(sk)->pid == 0) {
195 nlk_sk(sk)->pid = pid;
196 sk_add_node(sk, &nl_table[sk->sk_protocol]);
200 netlink_table_ungrab();
204 static void netlink_remove(struct sock *sk)
206 netlink_table_grab();
207 sk_del_node_init(sk);
208 netlink_table_ungrab();
211 static int netlink_create(struct socket *sock, int protocol)
214 struct netlink_opt *nlk;
216 sock->state = SS_UNCONNECTED;
218 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
219 return -ESOCKTNOSUPPORT;
221 if (protocol<0 || protocol >= MAX_LINKS)
222 return -EPROTONOSUPPORT;
224 sock->ops = &netlink_ops;
226 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1, NULL);
230 sock_init_data(sock,sk);
231 sk_set_owner(sk, THIS_MODULE);
233 nlk = sk->sk_protinfo = kmalloc(sizeof(*nlk), GFP_KERNEL);
238 memset(nlk, 0, sizeof(*nlk));
240 spin_lock_init(&nlk->cb_lock);
241 init_waitqueue_head(&nlk->wait);
242 sk->sk_destruct = netlink_sock_destruct;
243 atomic_inc(&netlink_sock_nr);
245 sk->sk_protocol = protocol;
249 static int netlink_release(struct socket *sock)
251 struct sock *sk = sock->sk;
252 struct netlink_opt *nlk;
260 spin_lock(&nlk->cb_lock);
262 nlk->cb->done(nlk->cb);
263 netlink_destroy_callback(nlk->cb);
267 spin_unlock(&nlk->cb_lock);
269 /* OK. Socket is unlinked, and, therefore,
270 no new packets will arrive */
274 wake_up_interruptible_all(&nlk->wait);
276 skb_queue_purge(&sk->sk_write_queue);
278 if (nlk->pid && !nlk->groups) {
279 struct netlink_notify n = {
280 .protocol = sk->sk_protocol,
283 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
290 static int netlink_autobind(struct socket *sock)
292 struct sock *sk = sock->sk;
294 struct hlist_node *node;
295 s32 pid = current->pid;
299 netlink_table_grab();
300 sk_for_each(osk, node, &nl_table[sk->sk_protocol]) {
301 if (nlk_sk(osk)->pid == pid) {
302 /* Bind collision, search negative pid values. */
306 netlink_table_ungrab();
310 netlink_table_ungrab();
312 err = netlink_insert(sk, pid);
313 if (err == -EADDRINUSE)
315 nlk_sk(sk)->groups = 0;
319 static inline int netlink_capable(struct socket *sock, unsigned flag)
321 return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
322 capable(CAP_NET_ADMIN);
325 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
327 struct sock *sk = sock->sk;
328 struct netlink_opt *nlk = nlk_sk(sk);
329 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
332 if (nladdr->nl_family != AF_NETLINK)
335 /* Only superuser is allowed to listen multicasts */
336 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
340 if (nladdr->nl_pid != nlk->pid)
342 nlk->groups = nladdr->nl_groups;
346 if (nladdr->nl_pid == 0) {
347 err = netlink_autobind(sock);
349 nlk->groups = nladdr->nl_groups;
353 err = netlink_insert(sk, nladdr->nl_pid);
355 nlk->groups = nladdr->nl_groups;
359 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
363 struct sock *sk = sock->sk;
364 struct netlink_opt *nlk = nlk_sk(sk);
365 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
367 if (addr->sa_family == AF_UNSPEC) {
372 if (addr->sa_family != AF_NETLINK)
375 /* Only superuser is allowed to send multicasts */
376 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
380 err = netlink_autobind(sock);
383 nlk->dst_pid = nladdr->nl_pid;
384 nlk->dst_groups = nladdr->nl_groups;
390 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
392 struct sock *sk = sock->sk;
393 struct netlink_opt *nlk = nlk_sk(sk);
394 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
396 nladdr->nl_family = AF_NETLINK;
398 *addr_len = sizeof(*nladdr);
401 nladdr->nl_pid = nlk->dst_pid;
402 nladdr->nl_groups = nlk->dst_groups;
404 nladdr->nl_pid = nlk->pid;
405 nladdr->nl_groups = nlk->groups;
410 static void netlink_overrun(struct sock *sk)
412 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
413 sk->sk_err = ENOBUFS;
414 sk->sk_error_report(sk);
418 struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
420 int protocol = ssk->sk_protocol;
422 struct netlink_opt *nlk;
424 sock = netlink_lookup(protocol, pid);
426 return ERR_PTR(-ECONNREFUSED);
428 /* Don't bother queuing skb if kernel socket has no input function */
430 if (nlk->pid == 0 && !nlk->data_ready) {
432 return ERR_PTR(-ECONNREFUSED);
437 struct sock *netlink_getsockbyfilp(struct file *filp)
439 struct inode *inode = filp->f_dentry->d_inode;
440 struct socket *socket;
443 if (!inode->i_sock || !(socket = SOCKET_I(inode)))
444 return ERR_PTR(-ENOTSOCK);
447 if (sock->sk_family != AF_NETLINK)
448 return ERR_PTR(-EINVAL);
455 * Attach a skb to a netlink socket.
456 * The caller must hold a reference to the destination socket. On error, the
457 * reference is dropped. The skb is not send to the destination, just all
458 * all error checks are performed and memory in the queue is reserved.
460 * < 0: error. skb freed, reference to sock dropped.
462 * 1: repeat lookup - reference dropped while waiting for socket memory.
464 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
466 struct netlink_opt *nlk;
470 #ifdef NL_EMULATE_DEV
474 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
475 test_bit(0, &nlk->state)) {
476 DECLARE_WAITQUEUE(wait, current);
485 __set_current_state(TASK_INTERRUPTIBLE);
486 add_wait_queue(&nlk->wait, &wait);
488 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
489 test_bit(0, &nlk->state)) &&
490 !sock_flag(sk, SOCK_DEAD))
491 timeo = schedule_timeout(timeo);
493 __set_current_state(TASK_RUNNING);
494 remove_wait_queue(&nlk->wait, &wait);
497 if (signal_pending(current)) {
499 return sock_intr_errno(timeo);
504 skb_set_owner_r(skb, sk);
508 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
510 struct netlink_opt *nlk;
514 #ifdef NL_EMULATE_DEV
517 len = nlk->handler(protocol, skb);
523 skb_queue_tail(&sk->sk_receive_queue, skb);
524 sk->sk_data_ready(sk, len);
529 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
535 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
541 timeo = sock_sndtimeo(ssk, nonblock);
543 sk = netlink_getsockbypid(ssk, pid);
548 err = netlink_attachskb(sk, skb, nonblock, timeo);
554 return netlink_sendskb(sk, skb, ssk->sk_protocol);
557 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
559 struct netlink_opt *nlk = nlk_sk(sk);
560 #ifdef NL_EMULATE_DEV
563 nlk->handler(sk->sk_protocol, skb);
567 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
568 !test_bit(0, &nlk->state)) {
570 skb_set_owner_r(skb, sk);
571 skb_queue_tail(&sk->sk_receive_queue, skb);
572 sk->sk_data_ready(sk, skb->len);
578 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
579 u32 group, int allocation)
582 struct hlist_node *node;
583 struct sk_buff *skb2 = NULL;
584 int protocol = ssk->sk_protocol;
585 int failure = 0, delivered = 0;
587 /* While we sleep in clone, do not allow to change socket list */
589 netlink_lock_table();
591 sk_for_each(sk, node, &nl_table[protocol]) {
592 struct netlink_opt *nlk = nlk_sk(sk);
597 if (nlk->pid == pid || !(nlk->groups & group))
607 if (atomic_read(&skb->users) != 1) {
608 skb2 = skb_clone(skb, allocation);
611 atomic_inc(&skb->users);
616 /* Clone failed. Notify ALL listeners. */
618 } else if (netlink_broadcast_deliver(sk, skb2)) {
627 netlink_unlock_table();
640 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
643 struct hlist_node *node;
644 int protocol = ssk->sk_protocol;
646 read_lock(&nl_table_lock);
647 sk_for_each(sk, node, &nl_table[protocol]) {
648 struct netlink_opt *nlk = nlk_sk(sk);
652 if (nlk->pid == pid || !(nlk->groups & group))
656 sk->sk_error_report(sk);
658 read_unlock(&nl_table_lock);
661 static inline void netlink_rcv_wake(struct sock *sk)
663 struct netlink_opt *nlk = nlk_sk(sk);
665 if (!skb_queue_len(&sk->sk_receive_queue))
666 clear_bit(0, &nlk->state);
667 if (!test_bit(0, &nlk->state))
668 wake_up_interruptible(&nlk->wait);
671 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
672 struct msghdr *msg, size_t len)
674 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
675 struct sock *sk = sock->sk;
676 struct netlink_opt *nlk = nlk_sk(sk);
677 struct sockaddr_nl *addr=msg->msg_name;
682 struct scm_cookie scm;
684 if (msg->msg_flags&MSG_OOB)
687 if (NULL == siocb->scm)
689 err = scm_send(sock, msg, siocb->scm);
693 if (msg->msg_namelen) {
694 if (addr->nl_family != AF_NETLINK)
696 dst_pid = addr->nl_pid;
697 dst_groups = addr->nl_groups;
698 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
701 dst_pid = nlk->dst_pid;
702 dst_groups = nlk->dst_groups;
706 err = netlink_autobind(sock);
712 if (len > sk->sk_sndbuf - 32)
715 skb = alloc_skb(len, GFP_KERNEL);
719 NETLINK_CB(skb).pid = nlk->pid;
720 NETLINK_CB(skb).groups = nlk->groups;
721 NETLINK_CB(skb).dst_pid = dst_pid;
722 NETLINK_CB(skb).dst_groups = dst_groups;
723 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
725 /* What can I do? Netlink is asynchronous, so that
726 we will have to save current capabilities to
727 check them, when this message will be delivered
728 to corresponding kernel module. --ANK (980802)
731 err = security_netlink_send(skb);
738 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
744 atomic_inc(&skb->users);
745 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
747 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
753 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
754 struct msghdr *msg, size_t len,
757 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
758 struct scm_cookie scm;
759 struct sock *sk = sock->sk;
760 struct netlink_opt *nlk = nlk_sk(sk);
761 int noblock = flags&MSG_DONTWAIT;
771 skb = skb_recv_datagram(sk,flags,noblock,&err);
775 msg->msg_namelen = 0;
779 msg->msg_flags |= MSG_TRUNC;
783 skb->h.raw = skb->data;
784 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
787 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
788 addr->nl_family = AF_NETLINK;
790 addr->nl_pid = NETLINK_CB(skb).pid;
791 addr->nl_groups = NETLINK_CB(skb).dst_groups;
792 msg->msg_namelen = sizeof(*addr);
795 if (NULL == siocb->scm) {
796 memset(&scm, 0, sizeof(scm));
799 siocb->scm->creds = *NETLINK_CREDS(skb);
800 skb_free_datagram(sk, skb);
802 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
805 scm_recv(sock, msg, siocb->scm, flags);
808 netlink_rcv_wake(sk);
809 return err ? : copied;
812 static void netlink_data_ready(struct sock *sk, int len)
814 struct netlink_opt *nlk = nlk_sk(sk);
817 nlk->data_ready(sk, len);
818 netlink_rcv_wake(sk);
822 * We export these functions to other modules. They provide a
823 * complete set of kernel non-blocking support for message
828 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
833 if (unit<0 || unit>=MAX_LINKS)
836 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
839 if (netlink_create(sock, unit) < 0) {
844 sk->sk_data_ready = netlink_data_ready;
846 nlk_sk(sk)->data_ready = input;
848 netlink_insert(sk, 0);
852 void netlink_set_nonroot(int protocol, unsigned flags)
854 if ((unsigned)protocol < MAX_LINKS)
855 nl_nonroot[protocol] = flags;
858 static void netlink_destroy_callback(struct netlink_callback *cb)
866 * It looks a bit ugly.
867 * It would be better to create kernel thread.
870 static int netlink_dump(struct sock *sk)
872 struct netlink_opt *nlk = nlk_sk(sk);
873 struct netlink_callback *cb;
875 struct nlmsghdr *nlh;
878 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
882 spin_lock(&nlk->cb_lock);
886 spin_unlock(&nlk->cb_lock);
891 len = cb->dump(skb, cb);
894 spin_unlock(&nlk->cb_lock);
895 skb_queue_tail(&sk->sk_receive_queue, skb);
896 sk->sk_data_ready(sk, len);
900 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
901 nlh->nlmsg_flags |= NLM_F_MULTI;
902 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
903 skb_queue_tail(&sk->sk_receive_queue, skb);
904 sk->sk_data_ready(sk, skb->len);
908 spin_unlock(&nlk->cb_lock);
910 netlink_destroy_callback(cb);
915 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
916 struct nlmsghdr *nlh,
917 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
918 int (*done)(struct netlink_callback*))
920 struct netlink_callback *cb;
922 struct netlink_opt *nlk;
924 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
928 memset(cb, 0, sizeof(*cb));
932 atomic_inc(&skb->users);
935 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
937 netlink_destroy_callback(cb);
938 return -ECONNREFUSED;
941 /* A dump is in progress... */
942 spin_lock(&nlk->cb_lock);
944 spin_unlock(&nlk->cb_lock);
945 netlink_destroy_callback(cb);
950 spin_unlock(&nlk->cb_lock);
956 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
959 struct nlmsghdr *rep;
960 struct nlmsgerr *errmsg;
964 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
966 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
968 skb = alloc_skb(size, GFP_KERNEL);
972 sk = netlink_lookup(in_skb->sk->sk_protocol,
973 NETLINK_CB(in_skb).pid);
975 sk->sk_err = ENOBUFS;
976 sk->sk_error_report(sk);
982 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
983 NLMSG_ERROR, sizeof(struct nlmsgerr));
984 errmsg = NLMSG_DATA(rep);
986 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
987 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
991 #ifdef NL_EMULATE_DEV
993 static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
996 * Backward compatibility.
999 int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
1001 struct sock *sk = netlink_kernel_create(unit, NULL);
1004 nlk_sk(sk)->handler = function;
1005 write_lock_bh(&nl_emu_lock);
1006 netlink_kernel[unit] = sk->sk_socket;
1007 write_unlock_bh(&nl_emu_lock);
1011 void netlink_detach(int unit)
1013 struct socket *sock;
1015 write_lock_bh(&nl_emu_lock);
1016 sock = netlink_kernel[unit];
1017 netlink_kernel[unit] = NULL;
1018 write_unlock_bh(&nl_emu_lock);
1023 int netlink_post(int unit, struct sk_buff *skb)
1025 struct socket *sock;
1027 read_lock(&nl_emu_lock);
1028 sock = netlink_kernel[unit];
1030 struct sock *sk = sock->sk;
1031 memset(skb->cb, 0, sizeof(skb->cb));
1033 read_unlock(&nl_emu_lock);
1035 netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
1040 read_unlock(&nl_emu_lock);
1046 #ifdef CONFIG_PROC_FS
1047 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1051 struct hlist_node *node;
1054 for (i=0; i<MAX_LINKS; i++) {
1055 sk_for_each(s, node, &nl_table[i]) {
1057 seq->private = (void *) i;
1066 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1068 read_lock(&nl_table_lock);
1069 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1072 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1078 if (v == SEQ_START_TOKEN)
1079 return netlink_seq_socket_idx(seq, 0);
1083 long i = (long)seq->private;
1085 while (++i < MAX_LINKS) {
1086 s = sk_head(&nl_table[i]);
1088 seq->private = (void *) i;
1096 static void netlink_seq_stop(struct seq_file *seq, void *v)
1098 read_unlock(&nl_table_lock);
1102 static int netlink_seq_show(struct seq_file *seq, void *v)
1104 if (v == SEQ_START_TOKEN)
1106 "sk Eth Pid Groups "
1107 "Rmem Wmem Dump Locks\n");
1110 struct netlink_opt *nlk = nlk_sk(s);
1112 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1117 atomic_read(&s->sk_rmem_alloc),
1118 atomic_read(&s->sk_wmem_alloc),
1120 atomic_read(&s->sk_refcnt)
1127 static struct seq_operations netlink_seq_ops = {
1128 .start = netlink_seq_start,
1129 .next = netlink_seq_next,
1130 .stop = netlink_seq_stop,
1131 .show = netlink_seq_show,
1135 static int netlink_seq_open(struct inode *inode, struct file *file)
1137 return seq_open(file, &netlink_seq_ops);
1140 static struct file_operations netlink_seq_fops = {
1141 .owner = THIS_MODULE,
1142 .open = netlink_seq_open,
1144 .llseek = seq_lseek,
1145 .release = seq_release,
1150 int netlink_register_notifier(struct notifier_block *nb)
1152 return notifier_chain_register(&netlink_chain, nb);
1155 int netlink_unregister_notifier(struct notifier_block *nb)
1157 return notifier_chain_unregister(&netlink_chain, nb);
1160 static struct proto_ops netlink_ops = {
1161 .family = PF_NETLINK,
1162 .owner = THIS_MODULE,
1163 .release = netlink_release,
1164 .bind = netlink_bind,
1165 .connect = netlink_connect,
1166 .socketpair = sock_no_socketpair,
1167 .accept = sock_no_accept,
1168 .getname = netlink_getname,
1169 .poll = datagram_poll,
1170 .ioctl = sock_no_ioctl,
1171 .listen = sock_no_listen,
1172 .shutdown = sock_no_shutdown,
1173 .setsockopt = sock_no_setsockopt,
1174 .getsockopt = sock_no_getsockopt,
1175 .sendmsg = netlink_sendmsg,
1176 .recvmsg = netlink_recvmsg,
1177 .mmap = sock_no_mmap,
1178 .sendpage = sock_no_sendpage,
1181 static struct net_proto_family netlink_family_ops = {
1182 .family = PF_NETLINK,
1183 .create = netlink_create,
1184 .owner = THIS_MODULE, /* for consistency 8) */
1187 static int __init netlink_proto_init(void)
1189 struct sk_buff *dummy_skb;
1191 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) {
1192 printk(KERN_CRIT "netlink_init: panic\n");
1195 sock_register(&netlink_family_ops);
1196 #ifdef CONFIG_PROC_FS
1197 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1199 /* The netlink device handler may be needed early. */
1204 static void __exit netlink_proto_exit(void)
1206 sock_unregister(PF_NETLINK);
1207 proc_net_remove("netlink");
1210 core_initcall(netlink_proto_init);
1211 module_exit(netlink_proto_exit);
1213 MODULE_LICENSE("GPL");
1215 MODULE_ALIAS_NETPROTO(PF_NETLINK);
1217 EXPORT_SYMBOL(netlink_ack);
1218 EXPORT_SYMBOL(netlink_broadcast);
1219 EXPORT_SYMBOL(netlink_broadcast_deliver);
1220 EXPORT_SYMBOL(netlink_dump_start);
1221 EXPORT_SYMBOL(netlink_kernel_create);
1222 EXPORT_SYMBOL(netlink_register_notifier);
1223 EXPORT_SYMBOL(netlink_set_err);
1224 EXPORT_SYMBOL(netlink_set_nonroot);
1225 EXPORT_SYMBOL(netlink_unicast);
1226 EXPORT_SYMBOL(netlink_unregister_notifier);
1228 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
1229 EXPORT_SYMBOL(netlink_attach);
1230 EXPORT_SYMBOL(netlink_detach);
1231 EXPORT_SYMBOL(netlink_post);