/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
COVERAGE_DEFINE(netlink_overflow);
COVERAGE_DEFINE(netlink_received);
COVERAGE_DEFINE(netlink_recv_jumbo);
-COVERAGE_DEFINE(netlink_send);
COVERAGE_DEFINE(netlink_sent);
/* Linux header file confusion causes this to be undefined. */
}
if (msg.msg_flags & MSG_TRUNC) {
- VLOG_ERR_RL(&rl, "truncated message (longer than %zu bytes)",
+ VLOG_ERR_RL(&rl, "truncated message (longer than %"PRIuSIZE" bytes)",
sizeof tail);
return E2BIG;
}
if (retval < sizeof *nlmsghdr
|| nlmsghdr->nlmsg_len < sizeof *nlmsghdr
|| nlmsghdr->nlmsg_len > retval) {
- VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %zu)",
+ VLOG_ERR_RL(&rl, "received invalid nlmsg (%"PRIuSIZE"d bytes < %"PRIuSIZE")",
retval, sizeof *nlmsghdr);
return EPROTO;
}
* Netlink socket created with the given 'protocol', and initializes 'dump' to
* reflect the state of the operation.
*
- * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
- * be set to the Netlink socket's pid, before the message is sent. NLM_F_DUMP
- * and NLM_F_ACK will be set in nlmsg_flags.
+ * 'request' must contain a Netlink message. Before sending the message,
+ * nlmsg_len will be finalized to match request->size, and nlmsg_pid will be
+ * set to the Netlink socket's pid. NLM_F_DUMP and NLM_F_ACK will be set in
+ * nlmsg_flags.
*
* The design of this Netlink socket library ensures that the dump is reliable.
*
- * This function provides no status indication. An error status for the entire
- * dump operation is provided when it is completed by calling nl_dump_done().
+ * This function provides no status indication. nl_dump_done() provides an
+ * error status for the entire dump operation.
*
- * The caller is responsible for destroying 'request'.
+ * The caller must eventually destroy 'request'.
*/
void
nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
dump->status = nl_sock_send__(dump->sock, request,
nl_sock_allocate_seq(dump->sock, 1), true);
- dump->seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
+ dump->nl_seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
}
/* Helper function for nl_dump_next(). */
}
nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
- if (dump->seq != nlmsghdr->nlmsg_seq) {
+ if (dump->nl_seq != nlmsghdr->nlmsg_seq) {
VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
- nlmsghdr->nlmsg_seq, dump->seq);
+ nlmsghdr->nlmsg_seq, dump->nl_seq);
return EAGAIN;
}
/* Finds the multicast group called 'group_name' in genl family 'family_name'.
* When successful, writes its result to 'multicast_group' and returns 0.
* Otherwise, clears 'multicast_group' and returns a positive error code.
- *
- * Some kernels do not support looking up a multicast group with this function.
- * In this case, 'multicast_group' will be populated with 'fallback'. */
+ */
int
nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
- unsigned int *multicast_group, unsigned int fallback)
+ unsigned int *multicast_group)
{
struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
const struct nlattr *mc;
}
if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
- *multicast_group = fallback;
- VLOG_WARN("%s-%s: has no multicast group, using fallback %d",
- family_name, group_name, *multicast_group);
- error = 0;
+ error = EPROTO;
goto exit;
}
int n;
};
-static struct nl_pool pools[MAX_LINKS];
-static pthread_mutex_t pool_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
+static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
static int
nl_pool_alloc(int protocol, struct nl_sock **sockp)
ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
pool = &pools[protocol];
if (pool->n > 0) {
sock = pool->socks[--pool->n];
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
if (sock) {
*sockp = sock;
if (sock) {
struct nl_pool *pool = &pools[sock->protocol];
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
if (pool->n < ARRAY_SIZE(pool->socks)) {
pool->socks[pool->n++] = sock;
sock = NULL;
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
nl_sock_destroy(sock);
}