2 * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "netlink-socket.h"
23 #include <sys/types.h>
27 #include "dynamic-string.h"
31 #include "netlink-protocol.h"
33 #include "poll-loop.h"
34 #include "socket-util.h"
39 VLOG_DEFINE_THIS_MODULE(netlink_socket);
41 COVERAGE_DEFINE(netlink_overflow);
42 COVERAGE_DEFINE(netlink_received);
43 COVERAGE_DEFINE(netlink_recv_jumbo);
44 COVERAGE_DEFINE(netlink_send);
45 COVERAGE_DEFINE(netlink_sent);
47 /* Linux header file confusion causes this to be undefined. */
49 #define SOL_NETLINK 270
52 /* A single (bad) Netlink message can in theory dump out many, many log
53 * messages, so the burst size is set quite high here to avoid missing useful
54 * information. Also, at high logging levels we log *all* Netlink messages. */
55 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
57 static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
58 static void log_nlmsg(const char *function, int error,
59 const void *message, size_t size, int protocol);
61 /* Netlink sockets. */
70 unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
73 /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
74 * of iovecs on the stack. */
77 /* Maximum number of iovecs that may be passed to sendmsg, capped at a
78 * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
80 * Initialized by nl_sock_create(). */
83 static int nl_sock_cow__(struct nl_sock *);
85 /* Creates a new netlink socket for the given netlink 'protocol'
86 * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
87 * new socket if successful, otherwise returns a positive errno value. */
89 nl_sock_create(int protocol, struct nl_sock **sockp)
92 struct sockaddr_nl local, remote;
98 int save_errno = errno;
101 max_iovs = sysconf(_SC_UIO_MAXIOV);
102 if (max_iovs < _XOPEN_IOV_MAX) {
103 if (max_iovs == -1 && errno) {
104 VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", strerror(errno));
106 max_iovs = _XOPEN_IOV_MAX;
107 } else if (max_iovs > MAX_IOVS) {
115 sock = malloc(sizeof *sock);
120 sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
122 VLOG_ERR("fcntl: %s", strerror(errno));
125 sock->protocol = protocol;
129 rcvbuf = 1024 * 1024;
130 if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
131 &rcvbuf, sizeof rcvbuf)) {
132 VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed (%s)",
133 rcvbuf, strerror(errno));
136 retval = get_socket_rcvbuf(sock->fd);
141 sock->rcvbuf = retval;
143 /* Connect to kernel (pid 0) as remote address. */
144 memset(&remote, 0, sizeof remote);
145 remote.nl_family = AF_NETLINK;
147 if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
148 VLOG_ERR("connect(0): %s", strerror(errno));
152 /* Obtain pid assigned by kernel. */
153 local_size = sizeof local;
154 if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
155 VLOG_ERR("getsockname: %s", strerror(errno));
158 if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
159 VLOG_ERR("getsockname returned bad Netlink name");
163 sock->pid = local.nl_pid;
182 /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
183 * sets '*sockp' to the new socket if successful, otherwise returns a positive
186 nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
188 return nl_sock_create(src->protocol, sockp);
191 /* Destroys netlink socket 'sock'. */
193 nl_sock_destroy(struct nl_sock *sock)
205 /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
206 * successful, otherwise a positive errno value.
208 * A socket that is subscribed to a multicast group that receives asynchronous
209 * notifications must not be used for Netlink transactions or dumps, because
210 * transactions and dumps can cause notifications to be lost.
212 * Multicast group numbers are always positive.
214 * It is not an error to attempt to join a multicast group to which a socket
215 * already belongs. */
217 nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
219 int error = nl_sock_cow__(sock);
223 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
224 &multicast_group, sizeof multicast_group) < 0) {
225 VLOG_WARN("could not join multicast group %u (%s)",
226 multicast_group, strerror(errno));
232 /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
233 * successful, otherwise a positive errno value.
235 * Multicast group numbers are always positive.
237 * It is not an error to attempt to leave a multicast group to which a socket
240 * On success, reading from 'sock' will still return any messages that were
241 * received on 'multicast_group' before the group was left. */
243 nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
246 if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
247 &multicast_group, sizeof multicast_group) < 0) {
248 VLOG_WARN("could not leave multicast group %u (%s)",
249 multicast_group, strerror(errno));
256 nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
257 uint32_t nlmsg_seq, bool wait)
259 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
262 nlmsg->nlmsg_len = msg->size;
263 nlmsg->nlmsg_seq = nlmsg_seq;
264 nlmsg->nlmsg_pid = sock->pid;
267 retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT);
268 error = retval < 0 ? errno : 0;
269 } while (error == EINTR);
270 log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
272 COVERAGE_INC(netlink_sent);
277 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
278 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
279 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
280 * sequence number, before the message is sent.
282 * Returns 0 if successful, otherwise a positive errno value. If
283 * 'wait' is true, then the send will wait until buffer space is ready;
284 * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
286 nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
288 return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
291 /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
292 * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
293 * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
294 * 'nlmsg_seq', before the message is sent.
296 * Returns 0 if successful, otherwise a positive errno value. If
297 * 'wait' is true, then the send will wait until buffer space is ready;
298 * otherwise, returns EAGAIN if the 'sock' send buffer is full.
300 * This function is suitable for sending a reply to a request that was received
301 * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
303 nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
304 uint32_t nlmsg_seq, bool wait)
306 int error = nl_sock_cow__(sock);
310 return nl_sock_send__(sock, msg, nlmsg_seq, wait);
313 /* This stress option is useful for testing that OVS properly tolerates
314 * -ENOBUFS on NetLink sockets. Such errors are unavoidable because they can
315 * occur if the kernel cannot temporarily allocate enough GFP_ATOMIC memory to
316 * reply to a request. They can also occur if messages arrive on a multicast
317 * channel faster than OVS can process them. */
319 netlink_overflow, "simulate netlink socket receive buffer overflow",
323 nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
325 /* We can't accurately predict the size of the data to be received. The
326 * caller is supposed to have allocated enough space in 'buf' to handle the
327 * "typical" case. To handle exceptions, we make available enough space in
328 * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
329 * figure since that's the maximum length of a Netlink attribute). */
330 struct nlmsghdr *nlmsghdr;
336 assert(buf->allocated >= sizeof *nlmsghdr);
339 iov[0].iov_base = buf->base;
340 iov[0].iov_len = buf->allocated;
341 iov[1].iov_base = tail;
342 iov[1].iov_len = sizeof tail;
344 memset(&msg, 0, sizeof msg);
349 retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
350 } while (retval < 0 && errno == EINTR);
354 if (error == ENOBUFS) {
355 /* Socket receive buffer overflow dropped one or more messages that
356 * the kernel tried to send to us. */
357 COVERAGE_INC(netlink_overflow);
362 if (msg.msg_flags & MSG_TRUNC) {
363 VLOG_ERR_RL(&rl, "truncated message (longer than %zu bytes)",
368 nlmsghdr = buf->data;
369 if (retval < sizeof *nlmsghdr
370 || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
371 || nlmsghdr->nlmsg_len > retval) {
372 VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %zu)",
373 retval, sizeof *nlmsghdr);
377 if (STRESS(netlink_overflow)) {
381 buf->size = MIN(retval, buf->allocated);
382 if (retval > buf->allocated) {
383 COVERAGE_INC(netlink_recv_jumbo);
384 ofpbuf_put(buf, tail, retval - buf->allocated);
387 log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
388 COVERAGE_INC(netlink_received);
393 /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
394 * 'wait' is true, waits for a message to be ready. Otherwise, fails with
395 * EAGAIN if the 'sock' receive buffer is empty.
397 * The caller must have initialized 'buf' with an allocation of at least
398 * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
399 * space for a "typical" message.
401 * On success, returns 0 and replaces 'buf''s previous content by the received
402 * message. This function expands 'buf''s allocated memory, as necessary, to
403 * hold the actual size of the received message.
405 * On failure, returns a positive errno value and clears 'buf' to zero length.
406 * 'buf' retains its previous memory allocation.
408 * Regardless of success or failure, this function resets 'buf''s headroom to
411 nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
413 int error = nl_sock_cow__(sock);
417 return nl_sock_recv__(sock, buf, wait);
421 nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
426 for (i = 0; i < n; i++) {
427 struct nl_transaction *txn = transactions[i];
431 ofpbuf_clear(txn->reply);
437 nl_sock_transact_multiple__(struct nl_sock *sock,
438 struct nl_transaction **transactions, size_t n,
441 uint64_t tmp_reply_stub[1024 / 8];
442 struct nl_transaction tmp_txn;
443 struct ofpbuf tmp_reply;
446 struct iovec iovs[MAX_IOVS];
451 base_seq = nl_sock_allocate_seq(sock, n);
453 for (i = 0; i < n; i++) {
454 struct nl_transaction *txn = transactions[i];
455 struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
457 nlmsg->nlmsg_len = txn->request->size;
458 nlmsg->nlmsg_seq = base_seq + i;
459 nlmsg->nlmsg_pid = sock->pid;
461 iovs[i].iov_base = txn->request->data;
462 iovs[i].iov_len = txn->request->size;
465 memset(&msg, 0, sizeof msg);
469 error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
470 } while (error == EINTR);
472 for (i = 0; i < n; i++) {
473 struct nl_transaction *txn = transactions[i];
475 log_nlmsg(__func__, error, txn->request->data, txn->request->size,
479 COVERAGE_ADD(netlink_sent, n);
486 ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
487 tmp_txn.request = NULL;
488 tmp_txn.reply = &tmp_reply;
491 struct nl_transaction *buf_txn, *txn;
494 /* Find a transaction whose buffer we can use for receiving a reply.
495 * If no such transaction is left, use tmp_txn. */
497 for (i = 0; i < n; i++) {
498 if (transactions[i]->reply) {
499 buf_txn = transactions[i];
504 /* Receive a reply. */
505 error = nl_sock_recv__(sock, buf_txn->reply, false);
507 if (error == EAGAIN) {
508 nl_sock_record_errors__(transactions, n, 0);
515 /* Match the reply up with a transaction. */
516 seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
517 if (seq < base_seq || seq >= base_seq + n) {
518 VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
522 txn = transactions[i];
524 /* Fill in the results for 'txn'. */
525 if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
527 ofpbuf_clear(txn->reply);
530 VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
531 error, strerror(txn->error));
535 if (txn->reply && txn != buf_txn) {
537 struct ofpbuf *reply = buf_txn->reply;
538 buf_txn->reply = txn->reply;
543 /* Fill in the results for transactions before 'txn'. (We have to do
544 * this after the results for 'txn' itself because of the buffer swap
546 nl_sock_record_errors__(transactions, i, 0);
550 transactions += i + 1;
554 ofpbuf_uninit(&tmp_reply);
559 /* Sends the 'request' member of the 'n' transactions in 'transactions' on
560 * 'sock', in order, and receives responses to all of them. Fills in the
561 * 'error' member of each transaction with 0 if it was successful, otherwise
562 * with a positive errno value. If 'reply' is nonnull, then it will be filled
563 * with the reply if the message receives a detailed reply. In other cases,
564 * i.e. where the request failed or had no reply beyond an indication of
565 * success, 'reply' will be cleared if it is nonnull.
567 * The caller is responsible for destroying each request and reply, and the
568 * transactions array itself.
570 * Before sending each message, this function will finalize nlmsg_len in each
571 * 'request' to match the ofpbuf's size, set nlmsg_pid to 'sock''s pid, and
572 * initialize nlmsg_seq.
574 * Bare Netlink is an unreliable transport protocol. This function layers
575 * reliable delivery and reply semantics on top of bare Netlink. See
576 * nl_sock_transact() for some caveats.
579 nl_sock_transact_multiple(struct nl_sock *sock,
580 struct nl_transaction **transactions, size_t n)
589 error = nl_sock_cow__(sock);
591 nl_sock_record_errors__(transactions, n, error);
595 /* In theory, every request could have a 64 kB reply. But the default and
596 * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
597 * be a bit below 128 kB, so that would only allow a single message in a
598 * "batch". So we assume that replies average (at most) 4 kB, which allows
599 * a good deal of batching.
601 * In practice, most of the requests that we batch either have no reply at
602 * all or a brief reply. */
603 max_batch_count = MAX(sock->rcvbuf / 4096, 1);
604 max_batch_count = MIN(max_batch_count, max_iovs);
610 /* Batch up to 'max_batch_count' transactions. But cap it at about a
611 * page of requests total because big skbuffs are expensive to
612 * allocate in the kernel. */
613 #if defined(PAGESIZE)
614 enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
616 enum { MAX_BATCH_BYTES = 4096 - 512 };
618 bytes = transactions[0]->request->size;
619 for (count = 1; count < n && count < max_batch_count; count++) {
620 if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
623 bytes += transactions[count]->request->size;
626 error = nl_sock_transact_multiple__(sock, transactions, count, &done);
627 transactions += done;
630 if (error == ENOBUFS) {
631 VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
633 VLOG_ERR_RL(&rl, "transaction error (%s)", strerror(error));
634 nl_sock_record_errors__(transactions, n, error);
639 /* Sends 'request' to the kernel via 'sock' and waits for a response. If
640 * successful, returns 0. On failure, returns a positive errno value.
642 * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
643 * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
644 * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
645 * reply, if any, is discarded.
647 * Before the message is sent, nlmsg_len in 'request' will be finalized to
648 * match msg->size, nlmsg_pid will be set to 'sock''s pid, and nlmsg_seq will
649 * be initialized, NLM_F_ACK will be set in nlmsg_flags.
651 * The caller is responsible for destroying 'request'.
653 * Bare Netlink is an unreliable transport protocol. This function layers
654 * reliable delivery and reply semantics on top of bare Netlink.
656 * In Netlink, sending a request to the kernel is reliable enough, because the
657 * kernel will tell us if the message cannot be queued (and we will in that
658 * case put it on the transmit queue and wait until it can be delivered).
660 * Receiving the reply is the real problem: if the socket buffer is full when
661 * the kernel tries to send the reply, the reply will be dropped. However, the
662 * kernel sets a flag that a reply has been dropped. The next call to recv
663 * then returns ENOBUFS. We can then re-send the request.
667 * 1. Netlink depends on sequence numbers to match up requests and
668 * replies. The sender of a request supplies a sequence number, and
669 * the reply echos back that sequence number.
671 * This is fine, but (1) some kernel netlink implementations are
672 * broken, in that they fail to echo sequence numbers and (2) this
673 * function will drop packets with non-matching sequence numbers, so
674 * that only a single request can be usefully transacted at a time.
676 * 2. Resending the request causes it to be re-executed, so the request
677 * needs to be idempotent.
680 nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
681 struct ofpbuf **replyp)
683 struct nl_transaction *transactionp;
684 struct nl_transaction transaction;
686 transaction.request = CONST_CAST(struct ofpbuf *, request);
687 transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
688 transactionp = &transaction;
690 nl_sock_transact_multiple(sock, &transactionp, 1);
693 if (transaction.error) {
694 ofpbuf_delete(transaction.reply);
697 *replyp = transaction.reply;
701 return transaction.error;
704 /* Drain all the messages currently in 'sock''s receive queue. */
706 nl_sock_drain(struct nl_sock *sock)
708 int error = nl_sock_cow__(sock);
712 return drain_rcvbuf(sock->fd);
715 /* The client is attempting some operation on 'sock'. If 'sock' has an ongoing
716 * dump operation, then replace 'sock''s fd with a new socket and hand 'sock''s
717 * old fd over to the dump. */
719 nl_sock_cow__(struct nl_sock *sock)
721 struct nl_sock *copy;
730 error = nl_sock_clone(sock, ©);
740 sock->pid = copy->pid;
743 sock->dump->sock = copy;
749 /* Starts a Netlink "dump" operation, by sending 'request' to the kernel via
750 * 'sock', and initializes 'dump' to reflect the state of the operation.
752 * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
753 * be set to 'sock''s pid, before the message is sent. NLM_F_DUMP and
754 * NLM_F_ACK will be set in nlmsg_flags.
756 * This Netlink socket library is designed to ensure that the dump is reliable
757 * and that it will not interfere with other operations on 'sock', including
758 * destroying or sending and receiving messages on 'sock'. One corner case is
761 * - If 'sock' has been used to send a request (e.g. with nl_sock_send())
762 * whose response has not yet been received (e.g. with nl_sock_recv()).
763 * This is unusual: usually nl_sock_transact() is used to send a message
764 * and receive its reply all in one go.
766 * This function provides no status indication. An error status for the entire
767 * dump operation is provided when it is completed by calling nl_dump_done().
769 * The caller is responsible for destroying 'request'.
771 * The new 'dump' is independent of 'sock'. 'sock' and 'dump' may be destroyed
775 nl_dump_start(struct nl_dump *dump,
776 struct nl_sock *sock, const struct ofpbuf *request)
778 ofpbuf_init(&dump->buffer, 4096);
780 /* 'sock' already has an ongoing dump. Clone the socket because
781 * Netlink only allows one dump at a time. */
782 dump->status = nl_sock_clone(sock, &dump->sock);
792 nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
793 dump->status = nl_sock_send__(sock, request, nl_sock_allocate_seq(sock, 1),
795 dump->seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
798 /* Helper function for nl_dump_next(). */
800 nl_dump_recv(struct nl_dump *dump)
802 struct nlmsghdr *nlmsghdr;
805 retval = nl_sock_recv__(dump->sock, &dump->buffer, true);
807 return retval == EINTR ? EAGAIN : retval;
810 nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
811 if (dump->seq != nlmsghdr->nlmsg_seq) {
812 VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
813 nlmsghdr->nlmsg_seq, dump->seq);
817 if (nl_msg_nlmsgerr(&dump->buffer, &retval)) {
818 VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
820 return retval && retval != EAGAIN ? retval : EPROTO;
826 /* Attempts to retrieve another reply from 'dump', which must have been
827 * initialized with nl_dump_start().
829 * If successful, returns true and points 'reply->data' and 'reply->size' to
830 * the message that was retrieved. The caller must not modify 'reply' (because
831 * it points into the middle of a larger buffer).
833 * On failure, returns false and sets 'reply->data' to NULL and 'reply->size'
834 * to 0. Failure might indicate an actual error or merely the end of replies.
835 * An error status for the entire dump operation is provided when it is
836 * completed by calling nl_dump_done().
839 nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply)
841 struct nlmsghdr *nlmsghdr;
849 while (!dump->buffer.size) {
850 int retval = nl_dump_recv(dump);
852 ofpbuf_clear(&dump->buffer);
853 if (retval != EAGAIN) {
854 dump->status = retval;
860 nlmsghdr = nl_msg_next(&dump->buffer, reply);
862 VLOG_WARN_RL(&rl, "netlink dump reply contains message fragment");
863 dump->status = EPROTO;
865 } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
873 /* Completes Netlink dump operation 'dump', which must have been initialized
874 * with nl_dump_start(). Returns 0 if the dump operation was error-free,
875 * otherwise a positive errno value describing the problem. */
877 nl_dump_done(struct nl_dump *dump)
879 /* Drain any remaining messages that the client didn't read. Otherwise the
880 * kernel will continue to queue them up and waste buffer space. */
881 while (!dump->status) {
883 if (!nl_dump_next(dump, &reply)) {
884 assert(dump->status);
889 if (dump->sock->dump) {
890 dump->sock->dump = NULL;
892 nl_sock_destroy(dump->sock);
895 ofpbuf_uninit(&dump->buffer);
896 return dump->status == EOF ? 0 : dump->status;
899 /* Causes poll_block() to wake up when any of the specified 'events' (which is
900 * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'. */
902 nl_sock_wait(const struct nl_sock *sock, short int events)
904 poll_fd_wait(sock->fd, events);
907 /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
908 * that can't use nl_sock_wait().
910 * It's a little tricky to use the returned fd correctly, because nl_sock does
911 * "copy on write" to allow a single nl_sock to be used for notifications,
912 * transactions, and dumps. If 'sock' is used only for notifications and
913 * transactions (and never for dump) then the usage is safe. */
915 nl_sock_fd(const struct nl_sock *sock)
920 /* Returns the PID associated with this socket. */
922 nl_sock_pid(const struct nl_sock *sock)
930 struct hmap_node hmap_node;
935 static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
937 static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
938 [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
939 [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
942 static struct genl_family *
943 find_genl_family_by_id(uint16_t id)
945 struct genl_family *family;
947 HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
949 if (family->id == id) {
957 define_genl_family(uint16_t id, const char *name)
959 struct genl_family *family = find_genl_family_by_id(id);
962 if (!strcmp(family->name, name)) {
967 family = xmalloc(sizeof *family);
969 hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
971 family->name = xstrdup(name);
975 genl_family_to_name(uint16_t id)
977 if (id == GENL_ID_CTRL) {
980 struct genl_family *family = find_genl_family_by_id(id);
981 return family ? family->name : "unknown";
986 do_lookup_genl_family(const char *name, struct nlattr **attrs,
987 struct ofpbuf **replyp)
989 struct nl_sock *sock;
990 struct ofpbuf request, *reply;
994 error = nl_sock_create(NETLINK_GENERIC, &sock);
999 ofpbuf_init(&request, 0);
1000 nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
1001 CTRL_CMD_GETFAMILY, 1);
1002 nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
1003 error = nl_sock_transact(sock, &request, &reply);
1004 ofpbuf_uninit(&request);
1006 nl_sock_destroy(sock);
1010 if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
1011 family_policy, attrs, ARRAY_SIZE(family_policy))
1012 || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
1013 nl_sock_destroy(sock);
1014 ofpbuf_delete(reply);
1018 nl_sock_destroy(sock);
1023 /* Finds the multicast group called 'group_name' in genl family 'family_name'.
1024 * When successful, writes its result to 'multicast_group' and returns 0.
1025 * Otherwise, clears 'multicast_group' and returns a positive error code.
1027 * Some kernels do not support looking up a multicast group with this function.
1028 * In this case, 'multicast_group' will be populated with 'fallback'. */
1030 nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
1031 unsigned int *multicast_group, unsigned int fallback)
1033 struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
1034 const struct nlattr *mc;
1035 struct ofpbuf *reply;
1039 *multicast_group = 0;
1040 error = do_lookup_genl_family(family_name, family_attrs, &reply);
1045 if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1046 *multicast_group = fallback;
1047 VLOG_WARN("%s-%s: has no multicast group, using fallback %d",
1048 family_name, group_name, *multicast_group);
1053 NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
1054 static const struct nl_policy mc_policy[] = {
1055 [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
1056 [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
1059 struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
1060 const char *mc_name;
1062 if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
1067 mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
1068 if (!strcmp(group_name, mc_name)) {
1070 nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
1078 ofpbuf_delete(reply);
1082 /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
1083 * number and stores it in '*number'. If successful, returns 0 and the caller
1084 * may use '*number' as the family number. On failure, returns a positive
1085 * errno value and '*number' caches the errno value. */
1087 nl_lookup_genl_family(const char *name, int *number)
1090 struct nlattr *attrs[ARRAY_SIZE(family_policy)];
1091 struct ofpbuf *reply;
1094 error = do_lookup_genl_family(name, attrs, &reply);
1096 *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
1097 define_genl_family(*number, name);
1101 ofpbuf_delete(reply);
1103 assert(*number != 0);
1105 return *number > 0 ? 0 : -*number;
1109 nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
1111 uint32_t seq = sock->next_seq;
1113 sock->next_seq += n;
1115 /* Make it impossible for the next request for sequence numbers to wrap
1116 * around to 0. Start over with 1 to avoid ever using a sequence number of
1117 * 0, because the kernel uses sequence number 0 for notifications. */
1118 if (sock->next_seq >= UINT32_MAX / 2) {
1126 nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
1132 static const struct nlmsg_flag flags[] = {
1133 { NLM_F_REQUEST, "REQUEST" },
1134 { NLM_F_MULTI, "MULTI" },
1135 { NLM_F_ACK, "ACK" },
1136 { NLM_F_ECHO, "ECHO" },
1137 { NLM_F_DUMP, "DUMP" },
1138 { NLM_F_ROOT, "ROOT" },
1139 { NLM_F_MATCH, "MATCH" },
1140 { NLM_F_ATOMIC, "ATOMIC" },
1142 const struct nlmsg_flag *flag;
1143 uint16_t flags_left;
1145 ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
1146 h->nlmsg_len, h->nlmsg_type);
1147 if (h->nlmsg_type == NLMSG_NOOP) {
1148 ds_put_cstr(ds, "(no-op)");
1149 } else if (h->nlmsg_type == NLMSG_ERROR) {
1150 ds_put_cstr(ds, "(error)");
1151 } else if (h->nlmsg_type == NLMSG_DONE) {
1152 ds_put_cstr(ds, "(done)");
1153 } else if (h->nlmsg_type == NLMSG_OVERRUN) {
1154 ds_put_cstr(ds, "(overrun)");
1155 } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
1156 ds_put_cstr(ds, "(reserved)");
1157 } else if (protocol == NETLINK_GENERIC) {
1158 ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
1160 ds_put_cstr(ds, "(family-defined)");
1162 ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
1163 flags_left = h->nlmsg_flags;
1164 for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
1165 if ((flags_left & flag->bits) == flag->bits) {
1166 ds_put_format(ds, "[%s]", flag->name);
1167 flags_left &= ~flag->bits;
1171 ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
1173 ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
1174 h->nlmsg_seq, h->nlmsg_pid);
1178 nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
1180 struct ds ds = DS_EMPTY_INITIALIZER;
1181 const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
1183 nlmsghdr_to_string(h, protocol, &ds);
1184 if (h->nlmsg_type == NLMSG_ERROR) {
1185 const struct nlmsgerr *e;
1186 e = ofpbuf_at(buffer, NLMSG_HDRLEN,
1187 NLMSG_ALIGN(sizeof(struct nlmsgerr)));
1189 ds_put_format(&ds, " error(%d", e->error);
1191 ds_put_format(&ds, "(%s)", strerror(-e->error));
1193 ds_put_cstr(&ds, ", in-reply-to(");
1194 nlmsghdr_to_string(&e->msg, protocol, &ds);
1195 ds_put_cstr(&ds, "))");
1197 ds_put_cstr(&ds, " error(truncated)");
1199 } else if (h->nlmsg_type == NLMSG_DONE) {
1200 int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
1202 ds_put_format(&ds, " done(%d", *error);
1204 ds_put_format(&ds, "(%s)", strerror(-*error));
1206 ds_put_cstr(&ds, ")");
1208 ds_put_cstr(&ds, " done(truncated)");
1210 } else if (protocol == NETLINK_GENERIC) {
1211 struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
1213 ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
1214 genl->cmd, genl->version);
1218 ds_put_cstr(&ds, "nl(truncated)");
1224 log_nlmsg(const char *function, int error,
1225 const void *message, size_t size, int protocol)
1227 struct ofpbuf buffer;
1230 if (!VLOG_IS_DBG_ENABLED()) {
1234 ofpbuf_use_const(&buffer, message, size);
1235 nlmsg = nlmsg_to_string(&buffer, protocol);
1236 VLOG_DBG_RL(&rl, "%s (%s): %s", function, strerror(error), nlmsg);