2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
19 #include <linux/config.h>
20 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/major.h>
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/stat.h>
30 #include <linux/socket.h>
32 #include <linux/fcntl.h>
33 #include <linux/termios.h>
34 #include <linux/sockios.h>
35 #include <linux/net.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/proc_fs.h>
43 #include <linux/seq_file.h>
44 #include <linux/smp_lock.h>
45 #include <linux/notifier.h>
46 #include <linux/security.h>
47 #include <linux/jhash.h>
48 #include <linux/jiffies.h>
49 #include <linux/random.h>
50 #include <linux/bitops.h>
52 #include <linux/types.h>
53 #include <linux/vs_context.h>
54 #include <linux/vs_network.h>
55 #include <linux/vs_limit.h>
61 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
62 #define NL_EMULATE_DEV
70 unsigned int dst_groups;
72 int (*handler)(int unit, struct sk_buff *skb);
73 wait_queue_head_t wait;
74 struct netlink_callback *cb;
76 void (*data_ready)(struct sock *sk, int bytes);
79 #define nlk_sk(__sk) ((struct netlink_opt *)(__sk)->sk_protinfo)
82 struct hlist_head *table;
83 unsigned long rehash_time;
89 unsigned int max_shift;
94 struct netlink_table {
95 struct nl_pid_hash hash;
96 struct hlist_head mc_list;
97 unsigned int nl_nonroot;
100 static struct netlink_table *nl_table;
102 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
104 static int netlink_dump(struct sock *sk);
105 static void netlink_destroy_callback(struct netlink_callback *cb);
107 static DEFINE_RWLOCK(nl_table_lock);
108 static atomic_t nl_table_users = ATOMIC_INIT(0);
110 static struct notifier_block *netlink_chain;
112 static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
114 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
117 static void netlink_sock_destruct(struct sock *sk)
119 skb_queue_purge(&sk->sk_receive_queue);
121 if (!sock_flag(sk, SOCK_DEAD)) {
122 printk("Freeing alive netlink socket %p\n", sk);
125 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
126 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
127 BUG_TRAP(!nlk_sk(sk)->cb);
132 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
133 * Look, when several writers sleep and reader wakes them up, all but one
134 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
135 * this, _but_ remember, it adds useless work on UP machines.
138 static void netlink_table_grab(void)
140 write_lock_bh(&nl_table_lock);
142 if (atomic_read(&nl_table_users)) {
143 DECLARE_WAITQUEUE(wait, current);
145 add_wait_queue_exclusive(&nl_table_wait, &wait);
147 set_current_state(TASK_UNINTERRUPTIBLE);
148 if (atomic_read(&nl_table_users) == 0)
150 write_unlock_bh(&nl_table_lock);
152 write_lock_bh(&nl_table_lock);
155 __set_current_state(TASK_RUNNING);
156 remove_wait_queue(&nl_table_wait, &wait);
160 static __inline__ void netlink_table_ungrab(void)
162 write_unlock_bh(&nl_table_lock);
163 wake_up(&nl_table_wait);
166 static __inline__ void
167 netlink_lock_table(void)
169 /* read_lock() synchronizes us to netlink_table_grab */
171 read_lock(&nl_table_lock);
172 atomic_inc(&nl_table_users);
173 read_unlock(&nl_table_lock);
176 static __inline__ void
177 netlink_unlock_table(void)
179 if (atomic_dec_and_test(&nl_table_users))
180 wake_up(&nl_table_wait);
183 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
185 struct nl_pid_hash *hash = &nl_table[protocol].hash;
186 struct hlist_head *head;
188 struct hlist_node *node;
190 read_lock(&nl_table_lock);
191 head = nl_pid_hashfn(hash, pid);
192 sk_for_each(sk, node, head) {
193 if (nlk_sk(sk)->pid == pid) {
200 read_unlock(&nl_table_lock);
204 static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
206 if (size <= PAGE_SIZE)
207 return kmalloc(size, GFP_ATOMIC);
209 return (struct hlist_head *)
210 __get_free_pages(GFP_ATOMIC, get_order(size));
213 static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
215 if (size <= PAGE_SIZE)
218 free_pages((unsigned long)table, get_order(size));
221 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
223 unsigned int omask, mask, shift;
225 struct hlist_head *otable, *table;
228 omask = mask = hash->mask;
229 osize = size = (mask + 1) * sizeof(*table);
233 if (++shift > hash->max_shift)
239 table = nl_pid_hash_alloc(size);
243 memset(table, 0, size);
244 otable = hash->table;
248 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
250 for (i = 0; i <= omask; i++) {
252 struct hlist_node *node, *tmp;
254 sk_for_each_safe(sk, node, tmp, &otable[i])
255 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
258 nl_pid_hash_free(otable, osize);
259 hash->rehash_time = jiffies + 10 * 60 * HZ;
263 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
265 int avg = hash->entries >> hash->shift;
267 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
270 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
271 nl_pid_hash_rehash(hash, 0);
278 static struct proto_ops netlink_ops;
280 static int netlink_insert(struct sock *sk, u32 pid)
282 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
283 struct hlist_head *head;
284 int err = -EADDRINUSE;
286 struct hlist_node *node;
289 netlink_table_grab();
290 head = nl_pid_hashfn(hash, pid);
292 sk_for_each(osk, node, head) {
293 if (nlk_sk(osk)->pid == pid)
305 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
308 if (len && nl_pid_hash_dilute(hash, len))
309 head = nl_pid_hashfn(hash, pid);
311 nlk_sk(sk)->pid = pid;
312 sk_add_node(sk, head);
316 netlink_table_ungrab();
320 static void netlink_remove(struct sock *sk)
322 netlink_table_grab();
323 nl_table[sk->sk_protocol].hash.entries--;
324 sk_del_node_init(sk);
325 if (nlk_sk(sk)->groups)
326 __sk_del_bind_node(sk);
327 netlink_table_ungrab();
330 static int netlink_create(struct socket *sock, int protocol)
333 struct netlink_opt *nlk;
335 sock->state = SS_UNCONNECTED;
337 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
338 return -ESOCKTNOSUPPORT;
340 if (protocol<0 || protocol >= MAX_LINKS)
341 return -EPROTONOSUPPORT;
343 sock->ops = &netlink_ops;
345 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1, NULL);
349 sock_init_data(sock,sk);
350 sk_set_owner(sk, THIS_MODULE);
352 nlk = sk->sk_protinfo = kmalloc(sizeof(*nlk), GFP_KERNEL);
357 memset(nlk, 0, sizeof(*nlk));
359 spin_lock_init(&nlk->cb_lock);
360 init_waitqueue_head(&nlk->wait);
361 sk->sk_destruct = netlink_sock_destruct;
363 sk->sk_protocol = protocol;
367 static int netlink_release(struct socket *sock)
369 struct sock *sk = sock->sk;
370 struct netlink_opt *nlk;
378 spin_lock(&nlk->cb_lock);
380 nlk->cb->done(nlk->cb);
381 netlink_destroy_callback(nlk->cb);
385 spin_unlock(&nlk->cb_lock);
387 /* OK. Socket is unlinked, and, therefore,
388 no new packets will arrive */
392 wake_up_interruptible_all(&nlk->wait);
394 skb_queue_purge(&sk->sk_write_queue);
396 if (nlk->pid && !nlk->groups) {
397 struct netlink_notify n = {
398 .protocol = sk->sk_protocol,
401 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
408 static int netlink_autobind(struct socket *sock)
410 struct sock *sk = sock->sk;
411 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
412 struct hlist_head *head;
414 struct hlist_node *node;
415 s32 pid = current->pid;
417 static s32 rover = -4097;
421 netlink_table_grab();
422 head = nl_pid_hashfn(hash, pid);
423 sk_for_each(osk, node, head) {
424 if (nlk_sk(osk)->pid == pid) {
425 /* Bind collision, search negative pid values. */
429 netlink_table_ungrab();
433 netlink_table_ungrab();
435 err = netlink_insert(sk, pid);
436 if (err == -EADDRINUSE)
438 nlk_sk(sk)->groups = 0;
442 static inline int netlink_capable(struct socket *sock, unsigned int flag)
444 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
445 capable(CAP_NET_ADMIN);
448 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
450 struct sock *sk = sock->sk;
451 struct netlink_opt *nlk = nlk_sk(sk);
452 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
455 if (nladdr->nl_family != AF_NETLINK)
458 /* Only superuser is allowed to listen multicasts */
459 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
463 if (nladdr->nl_pid != nlk->pid)
466 err = nladdr->nl_pid ?
467 netlink_insert(sk, nladdr->nl_pid) :
468 netlink_autobind(sock);
473 if (!nladdr->nl_groups && !nlk->groups)
476 netlink_table_grab();
477 if (nlk->groups && !nladdr->nl_groups)
478 __sk_del_bind_node(sk);
479 else if (!nlk->groups && nladdr->nl_groups)
480 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
481 nlk->groups = nladdr->nl_groups;
482 netlink_table_ungrab();
487 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
491 struct sock *sk = sock->sk;
492 struct netlink_opt *nlk = nlk_sk(sk);
493 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
495 if (addr->sa_family == AF_UNSPEC) {
496 sk->sk_state = NETLINK_UNCONNECTED;
501 if (addr->sa_family != AF_NETLINK)
504 /* Only superuser is allowed to send multicasts */
505 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
509 err = netlink_autobind(sock);
512 sk->sk_state = NETLINK_CONNECTED;
513 nlk->dst_pid = nladdr->nl_pid;
514 nlk->dst_groups = nladdr->nl_groups;
520 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
522 struct sock *sk = sock->sk;
523 struct netlink_opt *nlk = nlk_sk(sk);
524 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
526 nladdr->nl_family = AF_NETLINK;
528 *addr_len = sizeof(*nladdr);
531 nladdr->nl_pid = nlk->dst_pid;
532 nladdr->nl_groups = nlk->dst_groups;
534 nladdr->nl_pid = nlk->pid;
535 nladdr->nl_groups = nlk->groups;
540 static void netlink_overrun(struct sock *sk)
542 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
543 sk->sk_err = ENOBUFS;
544 sk->sk_error_report(sk);
548 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
550 int protocol = ssk->sk_protocol;
552 struct netlink_opt *nlk;
554 sock = netlink_lookup(protocol, pid);
556 return ERR_PTR(-ECONNREFUSED);
558 /* Don't bother queuing skb if kernel socket has no input function */
560 if ((nlk->pid == 0 && !nlk->data_ready) ||
561 (sock->sk_state == NETLINK_CONNECTED &&
562 nlk->dst_pid != nlk_sk(ssk)->pid)) {
564 return ERR_PTR(-ECONNREFUSED);
569 struct sock *netlink_getsockbyfilp(struct file *filp)
571 struct inode *inode = filp->f_dentry->d_inode;
572 struct socket *socket;
575 if (!inode->i_sock || !(socket = SOCKET_I(inode)))
576 return ERR_PTR(-ENOTSOCK);
579 if (sock->sk_family != AF_NETLINK)
580 return ERR_PTR(-EINVAL);
587 * Attach a skb to a netlink socket.
588 * The caller must hold a reference to the destination socket. On error, the
589 * reference is dropped. The skb is not send to the destination, just all
590 * all error checks are performed and memory in the queue is reserved.
592 * < 0: error. skb freed, reference to sock dropped.
594 * 1: repeat lookup - reference dropped while waiting for socket memory.
596 int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
598 struct netlink_opt *nlk;
602 #ifdef NL_EMULATE_DEV
606 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
607 test_bit(0, &nlk->state)) {
608 DECLARE_WAITQUEUE(wait, current);
617 __set_current_state(TASK_INTERRUPTIBLE);
618 add_wait_queue(&nlk->wait, &wait);
620 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
621 test_bit(0, &nlk->state)) &&
622 !sock_flag(sk, SOCK_DEAD))
623 timeo = schedule_timeout(timeo);
625 __set_current_state(TASK_RUNNING);
626 remove_wait_queue(&nlk->wait, &wait);
629 if (signal_pending(current)) {
631 return sock_intr_errno(timeo);
635 skb_set_owner_r(skb, sk);
639 int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
641 struct netlink_opt *nlk;
645 #ifdef NL_EMULATE_DEV
648 len = nlk->handler(protocol, skb);
654 skb_queue_tail(&sk->sk_receive_queue, skb);
655 sk->sk_data_ready(sk, len);
660 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
666 static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation)
672 delta = skb->end - skb->tail;
673 if (delta * 2 < skb->truesize)
676 if (skb_shared(skb)) {
677 struct sk_buff *nskb = skb_clone(skb, allocation);
684 if (!pskb_expand_head(skb, 0, -delta, allocation))
685 skb->truesize -= delta;
690 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
696 skb = netlink_trim(skb, gfp_any());
698 timeo = sock_sndtimeo(ssk, nonblock);
700 sk = netlink_getsockbypid(ssk, pid);
705 err = netlink_attachskb(sk, skb, nonblock, timeo);
711 return netlink_sendskb(sk, skb, ssk->sk_protocol);
714 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
716 struct netlink_opt *nlk = nlk_sk(sk);
717 #ifdef NL_EMULATE_DEV
719 nlk->handler(sk->sk_protocol, skb);
723 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
724 !test_bit(0, &nlk->state)) {
725 skb_set_owner_r(skb, sk);
726 skb_queue_tail(&sk->sk_receive_queue, skb);
727 sk->sk_data_ready(sk, skb->len);
728 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
733 struct netlink_broadcast_data {
734 struct sock *exclude_sk;
741 struct sk_buff *skb, *skb2;
744 static inline int do_one_broadcast(struct sock *sk,
745 struct netlink_broadcast_data *p)
747 struct netlink_opt *nlk = nlk_sk(sk);
750 if (p->exclude_sk == sk)
753 if (nlk->pid == p->pid || !(nlk->groups & p->group))
762 if (p->skb2 == NULL) {
763 if (atomic_read(&p->skb->users) != 1) {
764 p->skb2 = skb_clone(p->skb, p->allocation);
767 atomic_inc(&p->skb->users);
770 if (p->skb2 == NULL) {
772 /* Clone failed. Notify ALL listeners. */
774 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
787 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
788 u32 group, int allocation)
790 struct netlink_broadcast_data info;
791 struct hlist_node *node;
794 skb = netlink_trim(skb, allocation);
796 info.exclude_sk = ssk;
802 info.allocation = allocation;
806 /* While we sleep in clone, do not allow to change socket list */
808 netlink_lock_table();
810 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
811 do_one_broadcast(sk, &info);
813 netlink_unlock_table();
816 kfree_skb(info.skb2);
819 if (info.delivered) {
820 if (info.congested && (allocation & __GFP_WAIT))
829 struct netlink_set_err_data {
830 struct sock *exclude_sk;
836 static inline int do_one_set_err(struct sock *sk,
837 struct netlink_set_err_data *p)
839 struct netlink_opt *nlk = nlk_sk(sk);
841 if (sk == p->exclude_sk)
844 if (nlk->pid == p->pid || !(nlk->groups & p->group))
847 sk->sk_err = p->code;
848 sk->sk_error_report(sk);
853 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
855 struct netlink_set_err_data info;
856 struct hlist_node *node;
859 info.exclude_sk = ssk;
864 read_lock(&nl_table_lock);
866 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
867 do_one_set_err(sk, &info);
869 read_unlock(&nl_table_lock);
872 static inline void netlink_rcv_wake(struct sock *sk)
874 struct netlink_opt *nlk = nlk_sk(sk);
876 if (!skb_queue_len(&sk->sk_receive_queue))
877 clear_bit(0, &nlk->state);
878 if (!test_bit(0, &nlk->state))
879 wake_up_interruptible(&nlk->wait);
882 static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
883 struct msghdr *msg, size_t len)
885 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
886 struct sock *sk = sock->sk;
887 struct netlink_opt *nlk = nlk_sk(sk);
888 struct sockaddr_nl *addr=msg->msg_name;
893 struct scm_cookie scm;
895 if (msg->msg_flags&MSG_OOB)
898 if (NULL == siocb->scm)
900 err = scm_send(sock, msg, siocb->scm);
904 if (msg->msg_namelen) {
905 if (addr->nl_family != AF_NETLINK)
907 dst_pid = addr->nl_pid;
908 dst_groups = addr->nl_groups;
909 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
912 dst_pid = nlk->dst_pid;
913 dst_groups = nlk->dst_groups;
917 err = netlink_autobind(sock);
923 if (len > sk->sk_sndbuf - 32)
926 skb = alloc_skb(len, GFP_KERNEL);
930 NETLINK_CB(skb).pid = nlk->pid;
931 NETLINK_CB(skb).groups = nlk->groups;
932 NETLINK_CB(skb).dst_pid = dst_pid;
933 NETLINK_CB(skb).dst_groups = dst_groups;
934 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
936 /* What can I do? Netlink is asynchronous, so that
937 we will have to save current capabilities to
938 check them, when this message will be delivered
939 to corresponding kernel module. --ANK (980802)
943 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
948 err = security_netlink_send(sk, skb);
955 atomic_inc(&skb->users);
956 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
958 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
964 static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
965 struct msghdr *msg, size_t len,
968 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
969 struct scm_cookie scm;
970 struct sock *sk = sock->sk;
971 struct netlink_opt *nlk = nlk_sk(sk);
972 int noblock = flags&MSG_DONTWAIT;
982 skb = skb_recv_datagram(sk,flags,noblock,&err);
986 msg->msg_namelen = 0;
990 msg->msg_flags |= MSG_TRUNC;
994 skb->h.raw = skb->data;
995 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
998 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
999 addr->nl_family = AF_NETLINK;
1001 addr->nl_pid = NETLINK_CB(skb).pid;
1002 addr->nl_groups = NETLINK_CB(skb).dst_groups;
1003 msg->msg_namelen = sizeof(*addr);
1006 if (NULL == siocb->scm) {
1007 memset(&scm, 0, sizeof(scm));
1010 siocb->scm->creds = *NETLINK_CREDS(skb);
1011 skb_free_datagram(sk, skb);
1013 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1016 scm_recv(sock, msg, siocb->scm, flags);
1019 netlink_rcv_wake(sk);
1020 return err ? : copied;
1023 static void netlink_data_ready(struct sock *sk, int len)
1025 struct netlink_opt *nlk = nlk_sk(sk);
1027 if (nlk->data_ready)
1028 nlk->data_ready(sk, len);
1029 netlink_rcv_wake(sk);
1033 * We export these functions to other modules. They provide a
1034 * complete set of kernel non-blocking support for message
1039 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
1041 struct socket *sock;
1047 if (unit<0 || unit>=MAX_LINKS)
1050 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1053 if (netlink_create(sock, unit) < 0) {
1058 sk->sk_data_ready = netlink_data_ready;
1060 nlk_sk(sk)->data_ready = input;
1062 if (netlink_insert(sk, 0)) {
1069 void netlink_set_nonroot(int protocol, unsigned int flags)
1071 if ((unsigned int)protocol < MAX_LINKS)
1072 nl_table[protocol].nl_nonroot = flags;
1075 static void netlink_destroy_callback(struct netlink_callback *cb)
1083 * It looks a bit ugly.
1084 * It would be better to create kernel thread.
1087 static int netlink_dump(struct sock *sk)
1089 struct netlink_opt *nlk = nlk_sk(sk);
1090 struct netlink_callback *cb;
1091 struct sk_buff *skb;
1092 struct nlmsghdr *nlh;
1095 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1099 spin_lock(&nlk->cb_lock);
1103 spin_unlock(&nlk->cb_lock);
1108 len = cb->dump(skb, cb);
1111 spin_unlock(&nlk->cb_lock);
1112 skb_queue_tail(&sk->sk_receive_queue, skb);
1113 sk->sk_data_ready(sk, len);
1117 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
1118 nlh->nlmsg_flags |= NLM_F_MULTI;
1119 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1120 skb_queue_tail(&sk->sk_receive_queue, skb);
1121 sk->sk_data_ready(sk, skb->len);
1125 spin_unlock(&nlk->cb_lock);
1127 netlink_destroy_callback(cb);
1132 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1133 struct nlmsghdr *nlh,
1134 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1135 int (*done)(struct netlink_callback*))
1137 struct netlink_callback *cb;
1139 struct netlink_opt *nlk;
1141 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1145 memset(cb, 0, sizeof(*cb));
1149 atomic_inc(&skb->users);
1152 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1154 netlink_destroy_callback(cb);
1155 return -ECONNREFUSED;
1158 /* A dump is in progress... */
1159 spin_lock(&nlk->cb_lock);
1161 spin_unlock(&nlk->cb_lock);
1162 netlink_destroy_callback(cb);
1167 spin_unlock(&nlk->cb_lock);
1173 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1175 struct sk_buff *skb;
1176 struct nlmsghdr *rep;
1177 struct nlmsgerr *errmsg;
1181 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1183 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1185 skb = alloc_skb(size, GFP_KERNEL);
1189 sk = netlink_lookup(in_skb->sk->sk_protocol,
1190 NETLINK_CB(in_skb).pid);
1192 sk->sk_err = ENOBUFS;
1193 sk->sk_error_report(sk);
1199 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1200 NLMSG_ERROR, sizeof(struct nlmsgerr));
1201 errmsg = NLMSG_DATA(rep);
1202 errmsg->error = err;
1203 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1204 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1208 #ifdef CONFIG_PROC_FS
1209 struct nl_seq_iter {
1214 static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1216 struct nl_seq_iter *iter = seq->private;
1219 struct hlist_node *node;
1222 for (i=0; i<MAX_LINKS; i++) {
1223 struct nl_pid_hash *hash = &nl_table[i].hash;
1225 for (j = 0; j <= hash->mask; j++) {
1226 sk_for_each(s, node, &hash->table[j]) {
1239 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1241 read_lock(&nl_table_lock);
1242 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1245 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1248 struct nl_seq_iter *iter;
1253 if (v == SEQ_START_TOKEN)
1254 return netlink_seq_socket_idx(seq, 0);
1260 iter = seq->private;
1262 j = iter->hash_idx + 1;
1265 struct nl_pid_hash *hash = &nl_table[i].hash;
1267 for (; j <= hash->mask; j++) {
1268 s = sk_head(&hash->table[j]);
1277 } while (++i < MAX_LINKS);
1282 static void netlink_seq_stop(struct seq_file *seq, void *v)
1284 read_unlock(&nl_table_lock);
1288 static int netlink_seq_show(struct seq_file *seq, void *v)
1290 if (v == SEQ_START_TOKEN)
1292 "sk Eth Pid Groups "
1293 "Rmem Wmem Dump Locks\n");
1296 struct netlink_opt *nlk = nlk_sk(s);
1298 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1303 atomic_read(&s->sk_rmem_alloc),
1304 atomic_read(&s->sk_wmem_alloc),
1306 atomic_read(&s->sk_refcnt)
1313 static struct seq_operations netlink_seq_ops = {
1314 .start = netlink_seq_start,
1315 .next = netlink_seq_next,
1316 .stop = netlink_seq_stop,
1317 .show = netlink_seq_show,
1321 static int netlink_seq_open(struct inode *inode, struct file *file)
1323 struct seq_file *seq;
1324 struct nl_seq_iter *iter;
1327 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1331 err = seq_open(file, &netlink_seq_ops);
1337 memset(iter, 0, sizeof(*iter));
1338 seq = file->private_data;
1339 seq->private = iter;
1343 static struct file_operations netlink_seq_fops = {
1344 .owner = THIS_MODULE,
1345 .open = netlink_seq_open,
1347 .llseek = seq_lseek,
1348 .release = seq_release_private,
1353 int netlink_register_notifier(struct notifier_block *nb)
1355 return notifier_chain_register(&netlink_chain, nb);
1358 int netlink_unregister_notifier(struct notifier_block *nb)
1360 return notifier_chain_unregister(&netlink_chain, nb);
1363 static struct proto_ops netlink_ops = {
1364 .family = PF_NETLINK,
1365 .owner = THIS_MODULE,
1366 .release = netlink_release,
1367 .bind = netlink_bind,
1368 .connect = netlink_connect,
1369 .socketpair = sock_no_socketpair,
1370 .accept = sock_no_accept,
1371 .getname = netlink_getname,
1372 .poll = datagram_poll,
1373 .ioctl = sock_no_ioctl,
1374 .listen = sock_no_listen,
1375 .shutdown = sock_no_shutdown,
1376 .setsockopt = sock_no_setsockopt,
1377 .getsockopt = sock_no_getsockopt,
1378 .sendmsg = netlink_sendmsg,
1379 .recvmsg = netlink_recvmsg,
1380 .mmap = sock_no_mmap,
1381 .sendpage = sock_no_sendpage,
1384 static struct net_proto_family netlink_family_ops = {
1385 .family = PF_NETLINK,
1386 .create = netlink_create,
1387 .owner = THIS_MODULE, /* for consistency 8) */
1390 extern void netlink_skb_parms_too_large(void);
1392 static int __init netlink_proto_init(void)
1394 struct sk_buff *dummy_skb;
1399 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1400 netlink_skb_parms_too_large();
1402 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1405 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1409 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1411 if (num_physpages >= (128 * 1024))
1412 max = num_physpages >> (21 - PAGE_SHIFT);
1414 max = num_physpages >> (23 - PAGE_SHIFT);
1416 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1417 max = (1UL << order) / sizeof(struct hlist_head);
1418 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1420 for (i = 0; i < MAX_LINKS; i++) {
1421 struct nl_pid_hash *hash = &nl_table[i].hash;
1423 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1426 nl_pid_hash_free(nl_table[i].hash.table,
1427 1 * sizeof(*hash->table));
1431 memset(hash->table, 0, 1 * sizeof(*hash->table));
1432 hash->max_shift = order;
1435 hash->rehash_time = jiffies;
1438 sock_register(&netlink_family_ops);
1439 #ifdef CONFIG_PROC_FS
1440 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1442 /* The netlink device handler may be needed early. */
1447 static void __exit netlink_proto_exit(void)
1449 sock_unregister(PF_NETLINK);
1450 proc_net_remove("netlink");
1455 core_initcall(netlink_proto_init);
1456 module_exit(netlink_proto_exit);
1458 MODULE_LICENSE("GPL");
1460 MODULE_ALIAS_NETPROTO(PF_NETLINK);
1462 EXPORT_SYMBOL(netlink_ack);
1463 EXPORT_SYMBOL(netlink_broadcast);
1464 EXPORT_SYMBOL(netlink_dump_start);
1465 EXPORT_SYMBOL(netlink_kernel_create);
1466 EXPORT_SYMBOL(netlink_register_notifier);
1467 EXPORT_SYMBOL(netlink_set_err);
1468 EXPORT_SYMBOL(netlink_set_nonroot);
1469 EXPORT_SYMBOL(netlink_unicast);
1470 EXPORT_SYMBOL(netlink_unregister_notifier);