2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/major.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/socket.h>
32 #include <linux/fcntl.h>
33 #include <linux/termios.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/smp_lock.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
47 #include <linux/vs_base.h>
48 #include <linux/vs_context.h>
49 #include <linux/vs_network.h>
50 #include <linux/vs_limit.h>
56 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
57 #define NL_EMULATE_DEV
67 int (*handler)(int unit, struct sk_buff *skb);
68 wait_queue_head_t wait;
69 struct netlink_callback *cb;
71 void (*data_ready)(struct sock *sk, int bytes);
74 #define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
76 static struct hlist_head nl_table[MAX_LINKS];
77 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
78 static unsigned nl_nonroot[MAX_LINKS];
81 static struct socket *netlink_kernel[MAX_LINKS];
84 static int netlink_dump(struct sock *sk);
85 static void netlink_destroy_callback(struct netlink_callback *cb);
87 atomic_t netlink_sock_nr;
89 static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
90 static atomic_t nl_table_users = ATOMIC_INIT(0);
92 static struct notifier_block *netlink_chain;
94 static void netlink_sock_destruct(struct sock *sk)
96 skb_queue_purge(&sk->sk_receive_queue);
98 if (!sock_flag(sk, SOCK_DEAD)) {
99 printk("Freeing alive netlink socket %p\n", sk);
102 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
103 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
104 BUG_TRAP(!nlk_sk(sk)->cb);
108 atomic_dec(&netlink_sock_nr);
109 #ifdef NETLINK_REFCNT_DEBUG
110 printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
114 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
115 * Look, when several writers sleep and reader wakes them up, all but one
116 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
117 * this, _but_ remember, it adds useless work on UP machines.
120 static void netlink_table_grab(void)
122 write_lock_bh(&nl_table_lock);
124 if (atomic_read(&nl_table_users)) {
125 DECLARE_WAITQUEUE(wait, current);
127 add_wait_queue_exclusive(&nl_table_wait, &wait);
129 set_current_state(TASK_UNINTERRUPTIBLE);
130 if (atomic_read(&nl_table_users) == 0)
132 write_unlock_bh(&nl_table_lock);
134 write_lock_bh(&nl_table_lock);
137 __set_current_state(TASK_RUNNING);
138 remove_wait_queue(&nl_table_wait, &wait);
142 static __inline__ void netlink_table_ungrab(void)
144 write_unlock_bh(&nl_table_lock);
145 wake_up(&nl_table_wait);
148 static __inline__ void
149 netlink_lock_table(void)
151 /* read_lock() synchronizes us to netlink_table_grab */
153 read_lock(&nl_table_lock);
154 atomic_inc(&nl_table_users);
155 read_unlock(&nl_table_lock);
158 static __inline__ void
159 netlink_unlock_table(void)
161 if (atomic_dec_and_test(&nl_table_users))
162 wake_up(&nl_table_wait);
165 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
168 struct hlist_node *node;
170 read_lock(&nl_table_lock);
171 sk_for_each(sk, node, &nl_table[protocol]) {
172 if (nlk_sk(sk)->pid == pid) {
179 read_unlock(&nl_table_lock);
183 static struct proto_ops netlink_ops;
185 static int netlink_insert(struct sock *sk, u32 pid)
187 int err = -EADDRINUSE;
189 struct hlist_node *node;
191 netlink_table_grab();
192 sk_for_each(osk, node, &nl_table[sk->sk_protocol]) {
193 if (nlk_sk(osk)->pid == pid)
198 if (nlk_sk(sk)->pid == 0) {
199 nlk_sk(sk)->pid = pid;
200 sk_add_node(sk, &nl_table[sk->sk_protocol]);
204 netlink_table_ungrab();
208 static void netlink_remove(struct sock *sk)
210 netlink_table_grab();
211 sk_del_node_init(sk);
212 netlink_table_ungrab();
215 static int netlink_create(struct socket *sock, int protocol)
218 struct netlink_opt *nlk;
220 sock->state = SS_UNCONNECTED;
222 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
223 return -ESOCKTNOSUPPORT;
225 if (protocol<0 || protocol >= MAX_LINKS)
226 return -EPROTONOSUPPORT;
228 sock->ops = &netlink_ops;
230 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1, NULL);
234 sock_init_data(sock,sk);
235 sk_set_owner(sk, THIS_MODULE);
237 nlk = sk->sk_protinfo = kmalloc(sizeof(*nlk), GFP_KERNEL);
242 memset(nlk, 0, sizeof(*nlk));
244 spin_lock_init(&nlk->cb_lock);
245 init_waitqueue_head(&nlk->wait);
246 sk->sk_destruct = netlink_sock_destruct;
247 atomic_inc(&netlink_sock_nr);
249 set_vx_info(&sk->sk_vx_info, current->vx_info);
250 sk->sk_xid = vx_current_xid();
252 set_nx_info(&sk->sk_nx_info, current->nx_info);
253 sk->sk_nid = nx_current_nid();
255 sk->sk_protocol = protocol;
259 static int netlink_release(struct socket *sock)
261 struct sock *sk = sock->sk;
262 struct netlink_opt *nlk;
270 spin_lock(&nlk->cb_lock);
272 nlk->cb->done(nlk->cb);
273 netlink_destroy_callback(nlk->cb);
277 spin_unlock(&nlk->cb_lock);
279 /* OK. Socket is unlinked, and, therefore,
280 no new packets will arrive */
284 wake_up_interruptible_all(&nlk->wait);
286 skb_queue_purge(&sk->sk_write_queue);
288 if (nlk->pid && !nlk->groups) {
289 struct netlink_notify n = {
290 .protocol = sk->sk_protocol,
293 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
297 clr_vx_info(&sk->sk_vx_info);
299 clr_nx_info(&sk->sk_nx_info);
306 static int netlink_autobind(struct socket *sock)
308 struct sock *sk = sock->sk;
310 struct hlist_node *node;
311 s32 pid = current->pid;
315 netlink_table_grab();
316 sk_for_each(osk, node, &nl_table[sk->sk_protocol]) {
317 if (nlk_sk(osk)->pid == pid) {
318 /* Bind collision, search negative pid values. */
322 netlink_table_ungrab();
326 netlink_table_ungrab();
328 err = netlink_insert(sk, pid);
329 if (err == -EADDRINUSE)
331 nlk_sk(sk)->groups = 0;
335 static inline int netlink_capable(struct socket *sock, unsigned flag)
337 return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
338 capable(CAP_NET_ADMIN);
341 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
343 struct sock *sk = sock->sk;
344 struct netlink_opt *nlk = nlk_sk(sk);
345 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
348 if (nladdr->nl_family != AF_NETLINK)
351 /* Only superuser is allowed to listen multicasts */
352 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
356 if (nladdr->nl_pid != nlk->pid)
358 nlk->groups = nladdr->nl_groups;
362 if (nladdr->nl_pid == 0) {
363 err = netlink_autobind(sock);
365 nlk->groups = nladdr->nl_groups;
369 err = netlink_insert(sk, nladdr->nl_pid);
371 nlk->groups = nladdr->nl_groups;
375 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
379 struct sock *sk = sock->sk;
380 struct netlink_opt *nlk = nlk_sk(sk);
381 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
383 if (addr->sa_family == AF_UNSPEC) {
384 sk->sk_state = NETLINK_UNCONNECTED;
389 if (addr->sa_family != AF_NETLINK)
392 /* Only superuser is allowed to send multicasts */
393 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
397 err = netlink_autobind(sock);
400 sk->sk_state = NETLINK_CONNECTED;
401 nlk->dst_pid = nladdr->nl_pid;
402 nlk->dst_groups = nladdr->nl_groups;
408 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
410 struct sock *sk = sock->sk;
411 struct netlink_opt *nlk = nlk_sk(sk);
412 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
414 nladdr->nl_family = AF_NETLINK;
416 *addr_len = sizeof(*nladdr);
419 nladdr->nl_pid = nlk->dst_pid;
420 nladdr->nl_groups = nlk->dst_groups;
422 nladdr->nl_pid = nlk->pid;
423 nladdr->nl_groups = nlk->groups;
428 static void netlink_overrun(struct sock *sk)
430 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
431 sk->sk_err = ENOBUFS;
432 sk->sk_error_report(sk);
436 struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
438 int protocol = ssk->sk_protocol;
440 struct netlink_opt *nlk;
442 sock = netlink_lookup(protocol, pid);
444 return ERR_PTR(-ECONNREFUSED);
446 /* Don't bother queuing skb if kernel socket has no input function */
448 if ((nlk->pid == 0 && !nlk->data_ready) ||
449 (sock->sk_state == NETLINK_CONNECTED &&
450 nlk->dst_pid != nlk_sk(ssk)->pid)) {
452 return ERR_PTR(-ECONNREFUSED);
457 struct sock *netlink_getsockbyfilp(struct file *filp)
459 struct inode *inode = filp->f_dentry->d_inode;
460 struct socket *socket;
463 if (!inode->i_sock || !(socket = SOCKET_I(inode)))
464 return ERR_PTR(-ENOTSOCK);
467 if (sock->sk_family != AF_NETLINK)
468 return ERR_PTR(-EINVAL);
475 * Attach a skb to a netlink socket.
476 * The caller must hold a reference to the destination socket. On error, the
477 * reference is dropped. The skb is not send to the destination, just all
478 * all error checks are performed and memory in the queue is reserved.
480 * < 0: error. skb freed, reference to sock dropped.
482 * 1: repeat lookup - reference dropped while waiting for socket memory.
484 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
486 struct netlink_opt *nlk;
490 #ifdef NL_EMULATE_DEV
494 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
495 test_bit(0, &nlk->state)) {
496 DECLARE_WAITQUEUE(wait, current);
505 __set_current_state(TASK_INTERRUPTIBLE);
506 add_wait_queue(&nlk->wait, &wait);
508 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
509 test_bit(0, &nlk->state)) &&
510 !sock_flag(sk, SOCK_DEAD))
511 timeo = schedule_timeout(timeo);
513 __set_current_state(TASK_RUNNING);
514 remove_wait_queue(&nlk->wait, &wait);
517 if (signal_pending(current)) {
519 return sock_intr_errno(timeo);
524 skb_set_owner_r(skb, sk);
528 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
530 struct netlink_opt *nlk;
534 #ifdef NL_EMULATE_DEV
537 len = nlk->handler(protocol, skb);
543 skb_queue_tail(&sk->sk_receive_queue, skb);
544 sk->sk_data_ready(sk, len);
549 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
555 static inline void netlink_trim(struct sk_buff *skb, int allocation)
557 int delta = skb->end - skb->tail;
559 /* If the packet is charged to a socket, the modification
560 * of truesize below is illegal and will corrupt socket
561 * buffer accounting state.
563 BUG_ON(skb->list != NULL);
565 if (delta * 2 < skb->truesize)
567 if (pskb_expand_head(skb, 0, -delta, allocation))
569 skb->truesize -= delta;
572 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
578 netlink_trim(skb, gfp_any());
580 timeo = sock_sndtimeo(ssk, nonblock);
582 sk = netlink_getsockbypid(ssk, pid);
587 err = netlink_attachskb(sk, skb, nonblock, timeo);
593 return netlink_sendskb(sk, skb, ssk->sk_protocol);
596 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
598 struct netlink_opt *nlk = nlk_sk(sk);
599 #ifdef NL_EMULATE_DEV
602 nlk->handler(sk->sk_protocol, skb);
606 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
607 !test_bit(0, &nlk->state)) {
609 skb_set_owner_r(skb, sk);
610 skb_queue_tail(&sk->sk_receive_queue, skb);
611 sk->sk_data_ready(sk, skb->len);
617 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
618 u32 group, int allocation)
621 struct hlist_node *node;
622 struct sk_buff *skb2 = NULL;
623 int protocol = ssk->sk_protocol;
624 int failure = 0, delivered = 0;
626 netlink_trim(skb, allocation);
628 /* While we sleep in clone, do not allow to change socket list */
630 netlink_lock_table();
632 sk_for_each(sk, node, &nl_table[protocol]) {
633 struct netlink_opt *nlk = nlk_sk(sk);
638 if (nlk->pid == pid || !(nlk->groups & group))
648 if (atomic_read(&skb->users) != 1) {
649 skb2 = skb_clone(skb, allocation);
652 atomic_inc(&skb->users);
657 /* Clone failed. Notify ALL listeners. */
659 } else if (netlink_broadcast_deliver(sk, skb2)) {
668 netlink_unlock_table();
681 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
684 struct hlist_node *node;
685 int protocol = ssk->sk_protocol;
687 read_lock(&nl_table_lock);
688 sk_for_each(sk, node, &nl_table[protocol]) {
689 struct netlink_opt *nlk = nlk_sk(sk);
693 if (nlk->pid == pid || !(nlk->groups & group))
697 sk->sk_error_report(sk);
699 read_unlock(&nl_table_lock);
702 static inline void netlink_rcv_wake(struct sock *sk)
704 struct netlink_opt *nlk = nlk_sk(sk);
706 if (!skb_queue_len(&sk->sk_receive_queue))
707 clear_bit(0, &nlk->state);
708 if (!test_bit(0, &nlk->state))
709 wake_up_interruptible(&nlk->wait);
712 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
713 struct msghdr *msg, size_t len)
715 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
716 struct sock *sk = sock->sk;
717 struct netlink_opt *nlk = nlk_sk(sk);
718 struct sockaddr_nl *addr=msg->msg_name;
723 struct scm_cookie scm;
725 if (msg->msg_flags&MSG_OOB)
728 if (NULL == siocb->scm)
730 err = scm_send(sock, msg, siocb->scm);
734 if (msg->msg_namelen) {
735 if (addr->nl_family != AF_NETLINK)
737 dst_pid = addr->nl_pid;
738 dst_groups = addr->nl_groups;
739 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
742 dst_pid = nlk->dst_pid;
743 dst_groups = nlk->dst_groups;
747 err = netlink_autobind(sock);
753 if (len > sk->sk_sndbuf - 32)
756 skb = alloc_skb(len, GFP_KERNEL);
760 NETLINK_CB(skb).pid = nlk->pid;
761 NETLINK_CB(skb).groups = nlk->groups;
762 NETLINK_CB(skb).dst_pid = dst_pid;
763 NETLINK_CB(skb).dst_groups = dst_groups;
764 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
766 /* What can I do? Netlink is asynchronous, so that
767 we will have to save current capabilities to
768 check them, when this message will be delivered
769 to corresponding kernel module. --ANK (980802)
773 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
778 err = security_netlink_send(sk, skb);
785 atomic_inc(&skb->users);
786 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
788 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
794 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
795 struct msghdr *msg, size_t len,
798 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
799 struct scm_cookie scm;
800 struct sock *sk = sock->sk;
801 struct netlink_opt *nlk = nlk_sk(sk);
802 int noblock = flags&MSG_DONTWAIT;
812 skb = skb_recv_datagram(sk,flags,noblock,&err);
816 msg->msg_namelen = 0;
820 msg->msg_flags |= MSG_TRUNC;
824 skb->h.raw = skb->data;
825 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
828 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
829 addr->nl_family = AF_NETLINK;
831 addr->nl_pid = NETLINK_CB(skb).pid;
832 addr->nl_groups = NETLINK_CB(skb).dst_groups;
833 msg->msg_namelen = sizeof(*addr);
836 if (NULL == siocb->scm) {
837 memset(&scm, 0, sizeof(scm));
840 siocb->scm->creds = *NETLINK_CREDS(skb);
841 skb_free_datagram(sk, skb);
843 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
846 scm_recv(sock, msg, siocb->scm, flags);
849 netlink_rcv_wake(sk);
850 return err ? : copied;
853 static void netlink_data_ready(struct sock *sk, int len)
855 struct netlink_opt *nlk = nlk_sk(sk);
858 nlk->data_ready(sk, len);
859 netlink_rcv_wake(sk);
863 * We export these functions to other modules. They provide a
864 * complete set of kernel non-blocking support for message
869 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
874 if (unit<0 || unit>=MAX_LINKS)
877 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
880 if (netlink_create(sock, unit) < 0) {
885 sk->sk_data_ready = netlink_data_ready;
887 nlk_sk(sk)->data_ready = input;
889 netlink_insert(sk, 0);
893 void netlink_set_nonroot(int protocol, unsigned flags)
895 if ((unsigned)protocol < MAX_LINKS)
896 nl_nonroot[protocol] = flags;
899 static void netlink_destroy_callback(struct netlink_callback *cb)
907 * It looks a bit ugly.
908 * It would be better to create kernel thread.
911 static int netlink_dump(struct sock *sk)
913 struct netlink_opt *nlk = nlk_sk(sk);
914 struct netlink_callback *cb;
916 struct nlmsghdr *nlh;
919 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
923 spin_lock(&nlk->cb_lock);
927 spin_unlock(&nlk->cb_lock);
932 len = cb->dump(skb, cb);
935 spin_unlock(&nlk->cb_lock);
936 skb_queue_tail(&sk->sk_receive_queue, skb);
937 sk->sk_data_ready(sk, len);
941 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
942 nlh->nlmsg_flags |= NLM_F_MULTI;
943 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
944 skb_queue_tail(&sk->sk_receive_queue, skb);
945 sk->sk_data_ready(sk, skb->len);
949 spin_unlock(&nlk->cb_lock);
951 netlink_destroy_callback(cb);
956 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
957 struct nlmsghdr *nlh,
958 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
959 int (*done)(struct netlink_callback*))
961 struct netlink_callback *cb;
963 struct netlink_opt *nlk;
965 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
969 memset(cb, 0, sizeof(*cb));
973 atomic_inc(&skb->users);
976 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
978 netlink_destroy_callback(cb);
979 return -ECONNREFUSED;
982 /* A dump is in progress... */
983 spin_lock(&nlk->cb_lock);
985 spin_unlock(&nlk->cb_lock);
986 netlink_destroy_callback(cb);
991 spin_unlock(&nlk->cb_lock);
997 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1000 struct nlmsghdr *rep;
1001 struct nlmsgerr *errmsg;
1005 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1007 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1009 skb = alloc_skb(size, GFP_KERNEL);
1013 sk = netlink_lookup(in_skb->sk->sk_protocol,
1014 NETLINK_CB(in_skb).pid);
1016 sk->sk_err = ENOBUFS;
1017 sk->sk_error_report(sk);
1023 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1024 NLMSG_ERROR, sizeof(struct nlmsgerr));
1025 errmsg = NLMSG_DATA(rep);
1026 errmsg->error = err;
1027 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1028 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1032 #ifdef NL_EMULATE_DEV
1034 static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
1037 * Backward compatibility.
1040 int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
1042 struct sock *sk = netlink_kernel_create(unit, NULL);
1045 nlk_sk(sk)->handler = function;
1046 write_lock_bh(&nl_emu_lock);
1047 netlink_kernel[unit] = sk->sk_socket;
1048 write_unlock_bh(&nl_emu_lock);
1052 void netlink_detach(int unit)
1054 struct socket *sock;
1056 write_lock_bh(&nl_emu_lock);
1057 sock = netlink_kernel[unit];
1058 netlink_kernel[unit] = NULL;
1059 write_unlock_bh(&nl_emu_lock);
1064 int netlink_post(int unit, struct sk_buff *skb)
1066 struct socket *sock;
1068 read_lock(&nl_emu_lock);
1069 sock = netlink_kernel[unit];
1071 struct sock *sk = sock->sk;
1072 memset(skb->cb, 0, sizeof(skb->cb));
1074 read_unlock(&nl_emu_lock);
1076 netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
1081 read_unlock(&nl_emu_lock);
1087 #ifdef CONFIG_PROC_FS
1088 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1092 struct hlist_node *node;
1095 for (i=0; i<MAX_LINKS; i++) {
1096 sk_for_each(s, node, &nl_table[i]) {
1098 seq->private = (void *) i;
1107 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1109 read_lock(&nl_table_lock);
1110 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1113 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1119 if (v == SEQ_START_TOKEN)
1120 return netlink_seq_socket_idx(seq, 0);
1124 long i = (long)seq->private;
1126 while (++i < MAX_LINKS) {
1127 s = sk_head(&nl_table[i]);
1129 seq->private = (void *) i;
1137 static void netlink_seq_stop(struct seq_file *seq, void *v)
1139 read_unlock(&nl_table_lock);
1143 static int netlink_seq_show(struct seq_file *seq, void *v)
1145 if (v == SEQ_START_TOKEN)
1147 "sk Eth Pid Groups "
1148 "Rmem Wmem Dump Locks\n");
1151 struct netlink_opt *nlk = nlk_sk(s);
1153 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1158 atomic_read(&s->sk_rmem_alloc),
1159 atomic_read(&s->sk_wmem_alloc),
1161 atomic_read(&s->sk_refcnt)
1168 static struct seq_operations netlink_seq_ops = {
1169 .start = netlink_seq_start,
1170 .next = netlink_seq_next,
1171 .stop = netlink_seq_stop,
1172 .show = netlink_seq_show,
1176 static int netlink_seq_open(struct inode *inode, struct file *file)
1178 return seq_open(file, &netlink_seq_ops);
1181 static struct file_operations netlink_seq_fops = {
1182 .owner = THIS_MODULE,
1183 .open = netlink_seq_open,
1185 .llseek = seq_lseek,
1186 .release = seq_release,
1191 int netlink_register_notifier(struct notifier_block *nb)
1193 return notifier_chain_register(&netlink_chain, nb);
1196 int netlink_unregister_notifier(struct notifier_block *nb)
1198 return notifier_chain_unregister(&netlink_chain, nb);
1201 static struct proto_ops netlink_ops = {
1202 .family = PF_NETLINK,
1203 .owner = THIS_MODULE,
1204 .release = netlink_release,
1205 .bind = netlink_bind,
1206 .connect = netlink_connect,
1207 .socketpair = sock_no_socketpair,
1208 .accept = sock_no_accept,
1209 .getname = netlink_getname,
1210 .poll = datagram_poll,
1211 .ioctl = sock_no_ioctl,
1212 .listen = sock_no_listen,
1213 .shutdown = sock_no_shutdown,
1214 .setsockopt = sock_no_setsockopt,
1215 .getsockopt = sock_no_getsockopt,
1216 .sendmsg = netlink_sendmsg,
1217 .recvmsg = netlink_recvmsg,
1218 .mmap = sock_no_mmap,
1219 .sendpage = sock_no_sendpage,
1222 static struct net_proto_family netlink_family_ops = {
1223 .family = PF_NETLINK,
1224 .create = netlink_create,
1225 .owner = THIS_MODULE, /* for consistency 8) */
1228 static int __init netlink_proto_init(void)
1230 struct sk_buff *dummy_skb;
1232 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) {
1233 printk(KERN_CRIT "netlink_init: panic\n");
1236 sock_register(&netlink_family_ops);
1237 #ifdef CONFIG_PROC_FS
1238 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1240 /* The netlink device handler may be needed early. */
1245 static void __exit netlink_proto_exit(void)
1247 sock_unregister(PF_NETLINK);
1248 proc_net_remove("netlink");
1251 core_initcall(netlink_proto_init);
1252 module_exit(netlink_proto_exit);
1254 MODULE_LICENSE("GPL");
1256 MODULE_ALIAS_NETPROTO(PF_NETLINK);
1258 EXPORT_SYMBOL(netlink_ack);
1259 EXPORT_SYMBOL(netlink_broadcast);
1260 EXPORT_SYMBOL(netlink_dump_start);
1261 EXPORT_SYMBOL(netlink_kernel_create);
1262 EXPORT_SYMBOL(netlink_register_notifier);
1263 EXPORT_SYMBOL(netlink_set_err);
1264 EXPORT_SYMBOL(netlink_set_nonroot);
1265 EXPORT_SYMBOL(netlink_unicast);
1266 EXPORT_SYMBOL(netlink_unregister_notifier);
1268 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
1269 EXPORT_SYMBOL(netlink_attach);
1270 EXPORT_SYMBOL(netlink_detach);
1271 EXPORT_SYMBOL(netlink_post);