2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/major.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/socket.h>
32 #include <linux/fcntl.h>
33 #include <linux/termios.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/smp_lock.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
47 #include <linux/jhash.h>
48 #include <linux/jiffies.h>
49 #include <linux/random.h>
50 #include <linux/bitops.h>
52 #include <linux/types.h>
58 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
59 #define NL_EMULATE_DEV
67 unsigned int dst_groups;
69 int (*handler)(int unit, struct sk_buff *skb);
70 wait_queue_head_t wait;
71 struct netlink_callback *cb;
73 void (*data_ready)(struct sock *sk, int bytes);
76 #define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
79 struct hlist_head *table;
80 unsigned long rehash_time;
86 unsigned int max_shift;
91 struct netlink_table {
92 struct nl_pid_hash hash;
93 struct hlist_head mc_list;
96 static struct netlink_table *nl_table;
98 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
99 static unsigned int nl_nonroot[MAX_LINKS];
101 #ifdef NL_EMULATE_DEV
102 static struct socket *netlink_kernel[MAX_LINKS];
105 static int netlink_dump(struct sock *sk);
106 static void netlink_destroy_callback(struct netlink_callback *cb);
108 static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
109 static atomic_t nl_table_users = ATOMIC_INIT(0);
111 static struct notifier_block *netlink_chain;
113 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
115 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
118 static void netlink_sock_destruct(struct sock *sk)
120 skb_queue_purge(&sk->sk_receive_queue);
122 if (!sock_flag(sk, SOCK_DEAD)) {
123 printk("Freeing alive netlink socket %p\n", sk);
126 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
127 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
128 BUG_TRAP(!nlk_sk(sk)->cb);
133 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
134 * Look, when several writers sleep and reader wakes them up, all but one
135 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
136 * this, _but_ remember, it adds useless work on UP machines.
139 static void netlink_table_grab(void)
141 write_lock_bh(&nl_table_lock);
143 if (atomic_read(&nl_table_users)) {
144 DECLARE_WAITQUEUE(wait, current);
146 add_wait_queue_exclusive(&nl_table_wait, &wait);
148 set_current_state(TASK_UNINTERRUPTIBLE);
149 if (atomic_read(&nl_table_users) == 0)
151 write_unlock_bh(&nl_table_lock);
153 write_lock_bh(&nl_table_lock);
156 __set_current_state(TASK_RUNNING);
157 remove_wait_queue(&nl_table_wait, &wait);
161 static __inline__ void netlink_table_ungrab(void)
163 write_unlock_bh(&nl_table_lock);
164 wake_up(&nl_table_wait);
167 static __inline__ void
168 netlink_lock_table(void)
170 /* read_lock() synchronizes us to netlink_table_grab */
172 read_lock(&nl_table_lock);
173 atomic_inc(&nl_table_users);
174 read_unlock(&nl_table_lock);
177 static __inline__ void
178 netlink_unlock_table(void)
180 if (atomic_dec_and_test(&nl_table_users))
181 wake_up(&nl_table_wait);
184 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
186 struct nl_pid_hash *hash = &nl_table[protocol].hash;
187 struct hlist_head *head;
189 struct hlist_node *node;
191 read_lock(&nl_table_lock);
192 head = nl_pid_hashfn(hash, pid);
193 sk_for_each(sk, node, head) {
194 if (nlk_sk(sk)->pid == pid) {
201 read_unlock(&nl_table_lock);
205 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
207 if (size <= PAGE_SIZE)
208 return kmalloc(size, GFP_ATOMIC);
210 return (struct hlist_head *)
211 __get_free_pages(GFP_ATOMIC, get_order(size));
214 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
216 if (size <= PAGE_SIZE)
219 free_pages((unsigned long)table, get_order(size));
222 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
224 unsigned int omask, mask, shift;
226 struct hlist_head *otable, *table;
229 omask = mask = hash->mask;
230 osize = size = (mask + 1) * sizeof(*table);
234 if (++shift > hash->max_shift)
240 table = nl_pid_hash_alloc(size);
244 memset(table, 0, size);
245 otable = hash->table;
249 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
251 for (i = 0; i <= omask; i++) {
253 struct hlist_node *node, *tmp;
255 sk_for_each_safe(sk, node, tmp, &otable[i])
256 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
259 nl_pid_hash_free(otable, osize);
260 hash->rehash_time = jiffies + 10 * 60 * HZ;
264 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
266 int avg = hash->entries >> hash->shift;
268 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
271 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
272 nl_pid_hash_rehash(hash, 0);
279 static struct proto_ops netlink_ops;
281 static int netlink_insert(struct sock *sk, u32 pid)
283 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
284 struct hlist_head *head;
285 int err = -EADDRINUSE;
287 struct hlist_node *node;
290 netlink_table_grab();
291 head = nl_pid_hashfn(hash, pid);
293 sk_for_each(osk, node, head) {
294 if (nlk_sk(osk)->pid == pid)
306 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
309 if (len && nl_pid_hash_dilute(hash, len))
310 head = nl_pid_hashfn(hash, pid);
312 nlk_sk(sk)->pid = pid;
313 sk_add_node(sk, head);
317 netlink_table_ungrab();
321 static void netlink_remove(struct sock *sk)
323 netlink_table_grab();
324 nl_table[sk->sk_protocol].hash.entries--;
325 sk_del_node_init(sk);
326 if (nlk_sk(sk)->groups)
327 __sk_del_bind_node(sk);
328 netlink_table_ungrab();
331 static int netlink_create(struct socket *sock, int protocol)
334 struct netlink_opt *nlk;
336 sock->state = SS_UNCONNECTED;
338 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
339 return -ESOCKTNOSUPPORT;
341 if (protocol<0 || protocol >= MAX_LINKS)
342 return -EPROTONOSUPPORT;
344 sock->ops = &netlink_ops;
346 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1, NULL);
350 sock_init_data(sock,sk);
351 sk_set_owner(sk, THIS_MODULE);
353 nlk = sk->sk_protinfo = kmalloc(sizeof(*nlk), GFP_KERNEL);
358 memset(nlk, 0, sizeof(*nlk));
360 spin_lock_init(&nlk->cb_lock);
361 init_waitqueue_head(&nlk->wait);
362 sk->sk_destruct = netlink_sock_destruct;
364 sk->sk_protocol = protocol;
368 static int netlink_release(struct socket *sock)
370 struct sock *sk = sock->sk;
371 struct netlink_opt *nlk;
379 spin_lock(&nlk->cb_lock);
381 nlk->cb->done(nlk->cb);
382 netlink_destroy_callback(nlk->cb);
386 spin_unlock(&nlk->cb_lock);
388 /* OK. Socket is unlinked, and, therefore,
389 no new packets will arrive */
393 wake_up_interruptible_all(&nlk->wait);
395 skb_queue_purge(&sk->sk_write_queue);
397 if (nlk->pid && !nlk->groups) {
398 struct netlink_notify n = {
399 .protocol = sk->sk_protocol,
402 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
409 static int netlink_autobind(struct socket *sock)
411 struct sock *sk = sock->sk;
412 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
413 struct hlist_head *head;
415 struct hlist_node *node;
416 s32 pid = current->pid;
418 static s32 rover = -4097;
422 netlink_table_grab();
423 head = nl_pid_hashfn(hash, pid);
424 sk_for_each(osk, node, head) {
425 if (nlk_sk(osk)->pid == pid) {
426 /* Bind collision, search negative pid values. */
430 netlink_table_ungrab();
434 netlink_table_ungrab();
436 err = netlink_insert(sk, pid);
437 if (err == -EADDRINUSE)
439 nlk_sk(sk)->groups = 0;
443 static inline int netlink_capable(struct socket *sock, unsigned int flag)
445 return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
446 capable(CAP_NET_ADMIN);
449 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
451 struct sock *sk = sock->sk;
452 struct netlink_opt *nlk = nlk_sk(sk);
453 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
456 if (nladdr->nl_family != AF_NETLINK)
459 /* Only superuser is allowed to listen multicasts */
460 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
464 if (nladdr->nl_pid != nlk->pid)
467 err = nladdr->nl_pid ?
468 netlink_insert(sk, nladdr->nl_pid) :
469 netlink_autobind(sock);
474 if (!nladdr->nl_groups && !nlk->groups)
477 netlink_table_grab();
478 if (nlk->groups && !nladdr->nl_groups)
479 __sk_del_bind_node(sk);
480 else if (!nlk->groups && nladdr->nl_groups)
481 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
482 nlk->groups = nladdr->nl_groups;
483 netlink_table_ungrab();
488 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
492 struct sock *sk = sock->sk;
493 struct netlink_opt *nlk = nlk_sk(sk);
494 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
496 if (addr->sa_family == AF_UNSPEC) {
497 sk->sk_state = NETLINK_UNCONNECTED;
502 if (addr->sa_family != AF_NETLINK)
505 /* Only superuser is allowed to send multicasts */
506 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
510 err = netlink_autobind(sock);
513 sk->sk_state = NETLINK_CONNECTED;
514 nlk->dst_pid = nladdr->nl_pid;
515 nlk->dst_groups = nladdr->nl_groups;
521 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
523 struct sock *sk = sock->sk;
524 struct netlink_opt *nlk = nlk_sk(sk);
525 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
527 nladdr->nl_family = AF_NETLINK;
529 *addr_len = sizeof(*nladdr);
532 nladdr->nl_pid = nlk->dst_pid;
533 nladdr->nl_groups = nlk->dst_groups;
535 nladdr->nl_pid = nlk->pid;
536 nladdr->nl_groups = nlk->groups;
541 static void netlink_overrun(struct sock *sk)
543 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
544 sk->sk_err = ENOBUFS;
545 sk->sk_error_report(sk);
549 struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
551 int protocol = ssk->sk_protocol;
553 struct netlink_opt *nlk;
555 sock = netlink_lookup(protocol, pid);
557 return ERR_PTR(-ECONNREFUSED);
559 /* Don't bother queuing skb if kernel socket has no input function */
561 if ((nlk->pid == 0 && !nlk->data_ready) ||
562 (sock->sk_state == NETLINK_CONNECTED &&
563 nlk->dst_pid != nlk_sk(ssk)->pid)) {
565 return ERR_PTR(-ECONNREFUSED);
570 struct sock *netlink_getsockbyfilp(struct file *filp)
572 struct inode *inode = filp->f_dentry->d_inode;
573 struct socket *socket;
576 if (!inode->i_sock || !(socket = SOCKET_I(inode)))
577 return ERR_PTR(-ENOTSOCK);
580 if (sock->sk_family != AF_NETLINK)
581 return ERR_PTR(-EINVAL);
588 * Attach a skb to a netlink socket.
589 * The caller must hold a reference to the destination socket. On error, the
590 * reference is dropped. The skb is not send to the destination, just all
591 * all error checks are performed and memory in the queue is reserved.
593 * < 0: error. skb freed, reference to sock dropped.
595 * 1: repeat lookup - reference dropped while waiting for socket memory.
597 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
599 struct netlink_opt *nlk;
603 #ifdef NL_EMULATE_DEV
607 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
608 test_bit(0, &nlk->state)) {
609 DECLARE_WAITQUEUE(wait, current);
618 __set_current_state(TASK_INTERRUPTIBLE);
619 add_wait_queue(&nlk->wait, &wait);
621 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
622 test_bit(0, &nlk->state)) &&
623 !sock_flag(sk, SOCK_DEAD))
624 timeo = schedule_timeout(timeo);
626 __set_current_state(TASK_RUNNING);
627 remove_wait_queue(&nlk->wait, &wait);
630 if (signal_pending(current)) {
632 return sock_intr_errno(timeo);
637 skb_set_owner_r(skb, sk);
641 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
643 struct netlink_opt *nlk;
647 #ifdef NL_EMULATE_DEV
650 len = nlk->handler(protocol, skb);
656 skb_queue_tail(&sk->sk_receive_queue, skb);
657 sk->sk_data_ready(sk, len);
662 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
668 static inline void netlink_trim(struct sk_buff *skb, int allocation)
670 int delta = skb->end - skb->tail;
672 /* If the packet is charged to a socket, the modification
673 * of truesize below is illegal and will corrupt socket
674 * buffer accounting state.
676 BUG_ON(skb->list != NULL);
678 if (delta * 2 < skb->truesize)
680 if (pskb_expand_head(skb, 0, -delta, allocation))
682 skb->truesize -= delta;
685 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
691 netlink_trim(skb, gfp_any());
693 timeo = sock_sndtimeo(ssk, nonblock);
695 sk = netlink_getsockbypid(ssk, pid);
700 err = netlink_attachskb(sk, skb, nonblock, timeo);
706 return netlink_sendskb(sk, skb, ssk->sk_protocol);
709 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
711 struct netlink_opt *nlk = nlk_sk(sk);
712 #ifdef NL_EMULATE_DEV
715 nlk->handler(sk->sk_protocol, skb);
719 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
720 !test_bit(0, &nlk->state)) {
722 skb_set_owner_r(skb, sk);
723 skb_queue_tail(&sk->sk_receive_queue, skb);
724 sk->sk_data_ready(sk, skb->len);
725 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
730 struct netlink_broadcast_data {
731 struct sock *exclude_sk;
738 struct sk_buff *skb, *skb2;
741 static inline int do_one_broadcast(struct sock *sk,
742 struct netlink_broadcast_data *p)
744 struct netlink_opt *nlk = nlk_sk(sk);
747 if (p->exclude_sk == sk)
750 if (nlk->pid == p->pid || !(nlk->groups & p->group))
759 if (p->skb2 == NULL) {
760 if (atomic_read(&p->skb->users) != 1) {
761 p->skb2 = skb_clone(p->skb, p->allocation);
764 atomic_inc(&p->skb->users);
767 if (p->skb2 == NULL) {
769 /* Clone failed. Notify ALL listeners. */
771 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
784 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
785 u32 group, int allocation)
787 struct netlink_broadcast_data info;
788 struct hlist_node *node;
791 info.exclude_sk = ssk;
797 info.allocation = allocation;
801 netlink_trim(skb, allocation);
803 netlink_trim(skb, allocation);
805 /* While we sleep in clone, do not allow to change socket list */
807 netlink_lock_table();
809 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
810 do_one_broadcast(sk, &info);
812 netlink_unlock_table();
815 kfree_skb(info.skb2);
818 if (info.delivered) {
819 if (info.congested && (allocation & __GFP_WAIT))
828 struct netlink_set_err_data {
829 struct sock *exclude_sk;
835 static inline int do_one_set_err(struct sock *sk,
836 struct netlink_set_err_data *p)
838 struct netlink_opt *nlk = nlk_sk(sk);
840 if (sk == p->exclude_sk)
843 if (nlk->pid == p->pid || !(nlk->groups & p->group))
846 sk->sk_err = p->code;
847 sk->sk_error_report(sk);
852 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
854 struct netlink_set_err_data info;
855 struct hlist_node *node;
858 info.exclude_sk = ssk;
863 read_lock(&nl_table_lock);
865 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
866 do_one_set_err(sk, &info);
868 read_unlock(&nl_table_lock);
871 static inline void netlink_rcv_wake(struct sock *sk)
873 struct netlink_opt *nlk = nlk_sk(sk);
875 if (!skb_queue_len(&sk->sk_receive_queue))
876 clear_bit(0, &nlk->state);
877 if (!test_bit(0, &nlk->state))
878 wake_up_interruptible(&nlk->wait);
881 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
882 struct msghdr *msg, size_t len)
884 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
885 struct sock *sk = sock->sk;
886 struct netlink_opt *nlk = nlk_sk(sk);
887 struct sockaddr_nl *addr=msg->msg_name;
892 struct scm_cookie scm;
894 if (msg->msg_flags&MSG_OOB)
897 if (NULL == siocb->scm)
899 err = scm_send(sock, msg, siocb->scm);
903 if (msg->msg_namelen) {
904 if (addr->nl_family != AF_NETLINK)
906 dst_pid = addr->nl_pid;
907 dst_groups = addr->nl_groups;
908 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
911 dst_pid = nlk->dst_pid;
912 dst_groups = nlk->dst_groups;
916 err = netlink_autobind(sock);
922 if (len > sk->sk_sndbuf - 32)
925 skb = alloc_skb(len, GFP_KERNEL);
929 NETLINK_CB(skb).pid = nlk->pid;
930 NETLINK_CB(skb).groups = nlk->groups;
931 NETLINK_CB(skb).dst_pid = dst_pid;
932 NETLINK_CB(skb).dst_groups = dst_groups;
933 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
935 /* What can I do? Netlink is asynchronous, so that
936 we will have to save current capabilities to
937 check them, when this message will be delivered
938 to corresponding kernel module. --ANK (980802)
942 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
947 err = security_netlink_send(sk, skb);
954 atomic_inc(&skb->users);
955 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
957 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
963 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
964 struct msghdr *msg, size_t len,
967 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
968 struct scm_cookie scm;
969 struct sock *sk = sock->sk;
970 struct netlink_opt *nlk = nlk_sk(sk);
971 int noblock = flags&MSG_DONTWAIT;
981 skb = skb_recv_datagram(sk,flags,noblock,&err);
985 msg->msg_namelen = 0;
989 msg->msg_flags |= MSG_TRUNC;
993 skb->h.raw = skb->data;
994 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
997 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
998 addr->nl_family = AF_NETLINK;
1000 addr->nl_pid = NETLINK_CB(skb).pid;
1001 addr->nl_groups = NETLINK_CB(skb).dst_groups;
1002 msg->msg_namelen = sizeof(*addr);
1005 if (NULL == siocb->scm) {
1006 memset(&scm, 0, sizeof(scm));
1009 siocb->scm->creds = *NETLINK_CREDS(skb);
1010 skb_free_datagram(sk, skb);
1012 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1015 scm_recv(sock, msg, siocb->scm, flags);
1018 netlink_rcv_wake(sk);
1019 return err ? : copied;
1022 static void netlink_data_ready(struct sock *sk, int len)
1024 struct netlink_opt *nlk = nlk_sk(sk);
1026 if (nlk->data_ready)
1027 nlk->data_ready(sk, len);
1028 netlink_rcv_wake(sk);
1032 * We export these functions to other modules. They provide a
1033 * complete set of kernel non-blocking support for message
1038 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
1040 struct socket *sock;
1046 if (unit<0 || unit>=MAX_LINKS)
1049 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1052 if (netlink_create(sock, unit) < 0) {
1057 sk->sk_data_ready = netlink_data_ready;
1059 nlk_sk(sk)->data_ready = input;
1061 if (netlink_insert(sk, 0)) {
1068 void netlink_set_nonroot(int protocol, unsigned int flags)
1070 if ((unsigned int)protocol < MAX_LINKS)
1071 nl_nonroot[protocol] = flags;
1074 static void netlink_destroy_callback(struct netlink_callback *cb)
1082 * It looks a bit ugly.
1083 * It would be better to create kernel thread.
1086 static int netlink_dump(struct sock *sk)
1088 struct netlink_opt *nlk = nlk_sk(sk);
1089 struct netlink_callback *cb;
1090 struct sk_buff *skb;
1091 struct nlmsghdr *nlh;
1094 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1098 spin_lock(&nlk->cb_lock);
1102 spin_unlock(&nlk->cb_lock);
1107 len = cb->dump(skb, cb);
1110 spin_unlock(&nlk->cb_lock);
1111 skb_queue_tail(&sk->sk_receive_queue, skb);
1112 sk->sk_data_ready(sk, len);
1116 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
1117 nlh->nlmsg_flags |= NLM_F_MULTI;
1118 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1119 skb_queue_tail(&sk->sk_receive_queue, skb);
1120 sk->sk_data_ready(sk, skb->len);
1124 spin_unlock(&nlk->cb_lock);
1126 netlink_destroy_callback(cb);
1131 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1132 struct nlmsghdr *nlh,
1133 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1134 int (*done)(struct netlink_callback*))
1136 struct netlink_callback *cb;
1138 struct netlink_opt *nlk;
1140 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1144 memset(cb, 0, sizeof(*cb));
1148 atomic_inc(&skb->users);
1151 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1153 netlink_destroy_callback(cb);
1154 return -ECONNREFUSED;
1157 /* A dump is in progress... */
1158 spin_lock(&nlk->cb_lock);
1160 spin_unlock(&nlk->cb_lock);
1161 netlink_destroy_callback(cb);
1166 spin_unlock(&nlk->cb_lock);
1172 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1174 struct sk_buff *skb;
1175 struct nlmsghdr *rep;
1176 struct nlmsgerr *errmsg;
1180 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1182 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1184 skb = alloc_skb(size, GFP_KERNEL);
1188 sk = netlink_lookup(in_skb->sk->sk_protocol,
1189 NETLINK_CB(in_skb).pid);
1191 sk->sk_err = ENOBUFS;
1192 sk->sk_error_report(sk);
1198 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1199 NLMSG_ERROR, sizeof(struct nlmsgerr));
1200 errmsg = NLMSG_DATA(rep);
1201 errmsg->error = err;
1202 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1203 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1207 #ifdef NL_EMULATE_DEV
1209 static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
1212 * Backward compatibility.
1215 int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
1217 struct sock *sk = netlink_kernel_create(unit, NULL);
1220 nlk_sk(sk)->handler = function;
1221 write_lock_bh(&nl_emu_lock);
1222 netlink_kernel[unit] = sk->sk_socket;
1223 write_unlock_bh(&nl_emu_lock);
1227 void netlink_detach(int unit)
1229 struct socket *sock;
1231 write_lock_bh(&nl_emu_lock);
1232 sock = netlink_kernel[unit];
1233 netlink_kernel[unit] = NULL;
1234 write_unlock_bh(&nl_emu_lock);
1239 int netlink_post(int unit, struct sk_buff *skb)
1241 struct socket *sock;
1243 read_lock(&nl_emu_lock);
1244 sock = netlink_kernel[unit];
1246 struct sock *sk = sock->sk;
1247 memset(skb->cb, 0, sizeof(skb->cb));
1249 read_unlock(&nl_emu_lock);
1251 netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
1256 read_unlock(&nl_emu_lock);
1262 #ifdef CONFIG_PROC_FS
1263 struct nl_seq_iter {
1268 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1270 struct nl_seq_iter *iter = seq->private;
1273 struct hlist_node *node;
1276 for (i=0; i<MAX_LINKS; i++) {
1277 struct nl_pid_hash *hash = &nl_table[i].hash;
1279 for (j = 0; j <= hash->mask; j++) {
1280 sk_for_each(s, node, &hash->table[j]) {
1293 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1295 read_lock(&nl_table_lock);
1296 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1299 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1302 struct nl_seq_iter *iter;
1307 if (v == SEQ_START_TOKEN)
1308 return netlink_seq_socket_idx(seq, 0);
1314 iter = seq->private;
1316 j = iter->hash_idx + 1;
1319 struct nl_pid_hash *hash = &nl_table[i].hash;
1321 for (; j <= hash->mask; j++) {
1322 s = sk_head(&hash->table[j]);
1331 } while (++i < MAX_LINKS);
1336 static void netlink_seq_stop(struct seq_file *seq, void *v)
1338 read_unlock(&nl_table_lock);
1342 static int netlink_seq_show(struct seq_file *seq, void *v)
1344 if (v == SEQ_START_TOKEN)
1346 "sk Eth Pid Groups "
1347 "Rmem Wmem Dump Locks\n");
1350 struct netlink_opt *nlk = nlk_sk(s);
1352 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1357 atomic_read(&s->sk_rmem_alloc),
1358 atomic_read(&s->sk_wmem_alloc),
1360 atomic_read(&s->sk_refcnt)
1367 static struct seq_operations netlink_seq_ops = {
1368 .start = netlink_seq_start,
1369 .next = netlink_seq_next,
1370 .stop = netlink_seq_stop,
1371 .show = netlink_seq_show,
1375 static int netlink_seq_open(struct inode *inode, struct file *file)
1377 struct seq_file *seq;
1378 struct nl_seq_iter *iter;
1381 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1385 err = seq_open(file, &netlink_seq_ops);
1391 memset(iter, 0, sizeof(*iter));
1392 seq = file->private_data;
1393 seq->private = iter;
1397 static struct file_operations netlink_seq_fops = {
1398 .owner = THIS_MODULE,
1399 .open = netlink_seq_open,
1401 .llseek = seq_lseek,
1402 .release = seq_release_private,
1407 int netlink_register_notifier(struct notifier_block *nb)
1409 return notifier_chain_register(&netlink_chain, nb);
1412 int netlink_unregister_notifier(struct notifier_block *nb)
1414 return notifier_chain_unregister(&netlink_chain, nb);
1417 static struct proto_ops netlink_ops = {
1418 .family = PF_NETLINK,
1419 .owner = THIS_MODULE,
1420 .release = netlink_release,
1421 .bind = netlink_bind,
1422 .connect = netlink_connect,
1423 .socketpair = sock_no_socketpair,
1424 .accept = sock_no_accept,
1425 .getname = netlink_getname,
1426 .poll = datagram_poll,
1427 .ioctl = sock_no_ioctl,
1428 .listen = sock_no_listen,
1429 .shutdown = sock_no_shutdown,
1430 .setsockopt = sock_no_setsockopt,
1431 .getsockopt = sock_no_getsockopt,
1432 .sendmsg = netlink_sendmsg,
1433 .recvmsg = netlink_recvmsg,
1434 .mmap = sock_no_mmap,
1435 .sendpage = sock_no_sendpage,
1438 static struct net_proto_family netlink_family_ops = {
1439 .family = PF_NETLINK,
1440 .create = netlink_create,
1441 .owner = THIS_MODULE, /* for consistency 8) */
1444 extern void netlink_skb_parms_too_large(void);
1446 static int __init netlink_proto_init(void)
1448 struct sk_buff *dummy_skb;
1453 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1454 netlink_skb_parms_too_large();
1456 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1459 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1463 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1465 if (num_physpages >= (128 * 1024))
1466 max = num_physpages >> (21 - PAGE_SHIFT);
1468 max = num_physpages >> (23 - PAGE_SHIFT);
1470 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1471 max = (1UL << order) / sizeof(struct hlist_head);
1472 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1474 for (i = 0; i < MAX_LINKS; i++) {
1475 struct nl_pid_hash *hash = &nl_table[i].hash;
1477 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1480 nl_pid_hash_free(nl_table[i].hash.table,
1481 1 * sizeof(*hash->table));
1485 memset(hash->table, 0, 1 * sizeof(*hash->table));
1486 hash->max_shift = order;
1489 hash->rehash_time = jiffies;
1492 sock_register(&netlink_family_ops);
1493 #ifdef CONFIG_PROC_FS
1494 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1496 /* The netlink device handler may be needed early. */
1501 static void __exit netlink_proto_exit(void)
1503 sock_unregister(PF_NETLINK);
1504 proc_net_remove("netlink");
1509 core_initcall(netlink_proto_init);
1510 module_exit(netlink_proto_exit);
1512 MODULE_LICENSE("GPL");
1514 MODULE_ALIAS_NETPROTO(PF_NETLINK);
1516 EXPORT_SYMBOL(netlink_ack);
1517 EXPORT_SYMBOL(netlink_broadcast);
1518 EXPORT_SYMBOL(netlink_dump_start);
1519 EXPORT_SYMBOL(netlink_kernel_create);
1520 EXPORT_SYMBOL(netlink_register_notifier);
1521 EXPORT_SYMBOL(netlink_set_err);
1522 EXPORT_SYMBOL(netlink_set_nonroot);
1523 EXPORT_SYMBOL(netlink_unicast);
1524 EXPORT_SYMBOL(netlink_unregister_notifier);
1526 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
1527 EXPORT_SYMBOL(netlink_attach);
1528 EXPORT_SYMBOL(netlink_detach);
1529 EXPORT_SYMBOL(netlink_post);