libssl is installed, then Open vSwitch will automatically build
with support for it.
+ - clang, from LLVM, is optional. It provides useful static semantic
+ analyzer and thread-safety check. clang version must be 3.4 or
+ later. For Ubuntu, there are nightly built packages available on
+ clang's website.
+
To compile the kernel module on Linux, you must also install the
following. If you cannot build or install the kernel module, you may
use the userspace-only implementation, at a cost in performance. The
- GNU make.
+ - clang, version 3.4 or later
+
Also, you may find the ovs-dev script found in utilities/ovs-dev.py useful.
Installation Requirements
% ./configure CC=gcc-4.2
+ To use 'clang' compiler:
+
+ % ./configure CC=clang
+
To build the Linux kernel module, so that you can run the
kernel-based switch, pass the location of the kernel build
directory on --with-linux. For example, to build for a running
OVS_ENABLE_OPTION([-Wold-style-definition])
OVS_ENABLE_OPTION([-Wmissing-prototypes])
OVS_ENABLE_OPTION([-Wmissing-field-initializers])
+OVS_ENABLE_OPTION([-Wthread-safety])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused], [HAVE_WNO_UNUSED])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused-parameter], [HAVE_WNO_UNUSED_PARAMETER])
OVS_ENABLE_WERROR
int pthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
int pthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
-int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQ_RDLOCK(rwlock);
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQ_WRLOCK(rwlock);
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) OVS_RELEASES(rwlock);
int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
- OVS_MUST_HOLD(mutex);
+ OVS_REQUIRES(mutex);
/* Sparse complains about the proper PTHREAD_*_INITIALIZER definitions.
* Luckily, it's not a real compiler so we can overwrite it with something
({ \
int retval = pthread_mutex_trylock(mutex); \
if (!retval) { \
- OVS_ACQUIRE(MUTEX); \
+ OVS_MACRO_LOCK(MUTEX); \
} \
retval; \
})
({ \
int retval = pthread_rwlock_tryrdlock(rwlock); \
if (!retval) { \
- OVS_ACQUIRE(RWLOCK); \
+ OVS_MACRO_LOCK(RWLOCK); \
} \
retval; \
})
({ \
int retval = pthread_rwlock_trywrlock(rwlock); \
if (!retval) { \
- OVS_ACQUIRE(RWLOCK); \
+ OVS_MACRO_LOCK(RWLOCK); \
} \
retval; \
})
/* Process title. */
#ifdef LINUX_DATAPATH
-static char *argv_start; /* Start of command-line arguments in memory. */
-static size_t argv_size; /* Number of bytes of command-line arguments. */
-static char *saved_proctitle; /* Saved command-line arguments. */
-static pthread_mutex_t proctitle_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex proctitle_mutex = OVS_MUTEX_INITIALIZER;
+
+/* Start of command-line arguments in memory. */
+static char *argv_start OVS_GUARDED_BY(proctitle_mutex);
+
+/* Number of bytes of command-line arguments. */
+static size_t argv_size OVS_GUARDED_BY(proctitle_mutex);
+
+/* Saved command-line arguments. */
+static char *saved_proctitle OVS_GUARDED_BY(proctitle_mutex);
/* Prepares the process so that proctitle_set() can later succeed.
*
return;
}
+ ovs_mutex_lock(&proctitle_mutex);
/* Specialized version of first loop iteration below. */
argv_start = argv[0];
argv_size = strlen(argv[0]) + 1;
/* Copy out the old argument so we can reuse the space. */
argv[i] = xstrdup(argv[i]);
}
+ ovs_mutex_unlock(&proctitle_mutex);
}
/* Changes the name of the process, as shown by "ps", to the program name
va_list args;
int n;
+ ovs_mutex_lock(&proctitle_mutex);
if (!argv_start || argv_size < 8) {
- return;
+ goto out;
}
- xpthread_mutex_lock(&proctitle_mutex);
if (!saved_proctitle) {
saved_proctitle = xmemdup(argv_start, argv_size);
}
memset(&argv_start[n], '\0', argv_size - n);
}
va_end(args);
- xpthread_mutex_unlock(&proctitle_mutex);
+
+out:
+ ovs_mutex_unlock(&proctitle_mutex);
}
/* Restores the process's original command line, as seen by "ps". */
void
proctitle_restore(void)
{
- xpthread_mutex_lock(&proctitle_mutex);
+ ovs_mutex_lock(&proctitle_mutex);
if (saved_proctitle) {
memcpy(argv_start, saved_proctitle, argv_size);
free(saved_proctitle);
saved_proctitle = NULL;
}
- xpthread_mutex_unlock(&proctitle_mutex);
+ ovs_mutex_unlock(&proctitle_mutex);
}
#else /* !LINUX_DATAPATH*/
/* Stubs that don't do anything on non-Linux systems. */
#ifndef COMPILER_H
#define COMPILER_H 1
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
#if __GNUC__ && !__CHECKER__
#define NO_RETURN __attribute__((__noreturn__))
#define OVS_UNUSED __attribute__((__unused__))
#define OVS_UNLIKELY(CONDITION) (!!(CONDITION))
#endif
-#ifdef __CHECKER__
-/* "sparse" annotations for mutexes and mutex-like constructs.
+#if __has_feature(c_thread_safety_attributes)
+/* "clang" annotations for thread safety check.
+ *
+ * OVS_LOCKABLE indicates that the struct contains mutex element
+ * which can be locked by functions like ovs_mutex_lock().
+ *
+ * Below, the word MUTEX stands for the name of an object with an OVS_LOCKABLE
+ * struct type. It can also be a comma-separated list of multiple structs,
+ * e.g. to require a function to hold multiple locks while invoked.
+ *
+ *
+ * On a variable:
+ *
+ * - OVS_GUARDED indicates that the variable may only be accessed some mutex
+ * is held.
+ *
+ * - OVS_GUARDED_BY(MUTEX) indicates that the variable may only be accessed
+ * while the specific MUTEX is held.
*
- * In a function prototype, OVS_ACQUIRES(MUTEX) indicates that the function
- * must be called without MUTEX acquired and that it returns with MUTEX
- * acquired. OVS_RELEASES(MUTEX) indicates the reverse. OVS_MUST_HOLD
- * indicates that the function must be called with MUTEX acquired by the
- * caller and that the function does not release MUTEX.
*
- * In practice, sparse ignores the MUTEX argument. It need not even be a
- * valid expression. It is meant to indicate to human readers what mutex is
- * being acquired.
+ * On a function, the following attributes apply to mutexes:
+ *
+ * - OVS_ACQUIRES(MUTEX) indicate that the function must be called without
+ * holding MUTEX and that it returns holding MUTEX.
+ *
+ * - OVS_RELEASES(MUTEX) indicates that the function may only be called with
+ * MUTEX held and that it returns with MUTEX released. It can be used for
+ * all types of MUTEX.
+ *
+ * - OVS_TRY_LOCK(RETVAL, MUTEX) indicate that the function will try to
+ * acquire MUTEX. RETVAL is an integer or boolean value specifying the
+ * return value of a successful lock acquisition.
+ *
+ * - OVS_REQUIRES(MUTEX) indicate that the function may only be called with
+ * MUTEX held and that the function does not release MUTEX.
+ *
+ * - OVS_LOCKS_EXCLUDED(MUTEX) indicates that the function may only be
+ * called when MUTEX is not held.
+ *
+ * The following variants, with the same syntax, apply to reader-writer locks:
+ *
+ * mutex rwlock, for reading rwlock, for writing
+ * ------------------- ------------------- -------------------
+ * OVS_ACQUIRES OVS_ACQ_RDLOCK OVS_ACQ_WRLOCK
+ * OVS_RELEASES OVS_RELEASES OVS_RELEASES
+ * OVS_TRY_LOCK OVS_TRY_RDLOCK OVS_TRY_WRLOCK
+ * OVS_REQUIRES OVS_REQ_RDLOCK OVS_REQ_WRLOCK
+ * OVS_LOCKS_EXCLUDED OVS_LOCKS_EXCLUDED OVS_LOCKS_EXCLUDED
+ */
+#define OVS_LOCKABLE __attribute__((lockable))
+#define OVS_REQ_RDLOCK(...) __attribute__((shared_locks_required(__VA_ARGS__)))
+#define OVS_ACQ_RDLOCK(...) __attribute__((shared_lock_function(__VA_ARGS__)))
+#define OVS_REQ_WRLOCK(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define OVS_ACQ_WRLOCK(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define OVS_REQUIRES(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define OVS_ACQUIRES(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define OVS_TRY_WRLOCK(RETVAL, ...) \
+ __attribute__((exclusive_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_TRY_RDLOCK(RETVAL, ...) \
+ __attribute__((shared_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_TRY_LOCK(RETVAL, ...) \
+ __attribute__((exclusive_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_GUARDED __attribute__((guarded_var))
+#define OVS_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__)))
+#define OVS_RELEASES(...) __attribute__((unlock_function(__VA_ARGS__)))
+#define OVS_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
+#elif __CHECKER__
+/* "sparse" annotations for mutexes and mutex-like constructs.
*
- * Since sparse ignores MUTEX, it need not be an actual mutex. It can be
- * any construct on which paired acquire and release semantics make sense:
- * read/write locks, temporary memory allocations, whatever.
+ * Change the thread-safety check annotations to use "context" attribute.
*
- * OVS_ACQUIRE, OVS_RELEASE, and OVS_HOLDS are suitable for use within macros,
+ * OVS_MACRO_LOCK and OVS_MACRO_RELEASE are suitable for use within macros,
* where there is no function prototype to annotate. */
-#define OVS_ACQUIRES(MUTEX) __attribute__((context(MUTEX, 0, 1)))
-#define OVS_RELEASES(MUTEX) __attribute__((context(MUTEX, 1, 0)))
-#define OVS_MUST_HOLD(MUTEX) __attribute__((context(MUTEX, 1, 1)))
-#define OVS_ACQUIRE(MUTEX) __context__(MUTEX, 0, 1)
-#define OVS_RELEASE(MUTEX) __context__(MUTEX, 1, 0)
-#define OVS_HOLDS(MUTEX) __context__(MUTEX, 1, 1)
+#define OVS_LOCKABLE
+#define OVS_REQ_RDLOCK(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQ_RDLOCK(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_REQ_WRLOCK(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQ_WRLOCK(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_REQUIRES(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQUIRES(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_TRY_WRLOCK(RETVAL, ...)
+#define OVS_TRY_RDLOCK(RETVAL, ...)
+#define OVS_TRY_LOCK(REVAL, ...)
+#define OVS_GUARDED
+#define OVS_GUARDED_BY(...)
+#define OVS_RELEASES(...) __attribute__((context(MUTEX, 1, 0)))
+#define OVS_MACRO_LOCK(...) __context__(MUTEX, 0, 1)
+#define OVS_MACRO_RELEASE(...) __context__(MUTEX, 1, 0)
#else
-#define OVS_ACQUIRES(MUTEX)
-#define OVS_RELEASES(MUTEX)
-#define OVS_MUST_HOLD(MUTEX)
-#define OVS_ACQUIRE(MUTEX)
-#define OVS_RELEASE(MUTEX)
-#define OVS_HOLDS(MUTEX)
+#define OVS_LOCKABLE
+#define OVS_REQ_RDLOCK(...)
+#define OVS_ACQ_RDLOCK(...)
+#define OVS_REQ_WRLOCK(...)
+#define OVS_ACQ_WRLOCK(...)
+#define OVS_REQUIRES(...)
+#define OVS_ACQUIRES(...)
+#define OVS_TRY_WRLOCK(...)
+#define OVS_TRY_RDLOCK(...)
+#define OVS_TRY_LOCK(...)
+#define OVS_GUARDED
+#define OVS_GUARDED_BY(...)
+#define OVS_RELEASES(...)
+#define OVS_MACRO_LOCK(...)
+#define OVS_MACRO_RELEASE(...)
#endif
/* ISO C says that a C implementation may choose any integer type for an enum
int dp_ifindex;
/* Upcall messages. */
- pthread_mutex_t upcall_lock;
+ struct ovs_mutex upcall_lock;
int uc_array_size; /* Size of 'channels' and 'epoll_events'. */
struct dpif_channel *channels;
struct epoll_event *epoll_events;
dpif = xzalloc(sizeof *dpif);
dpif->port_notifier = NULL;
- xpthread_mutex_init(&dpif->upcall_lock, NULL);
+ ovs_mutex_init(&dpif->upcall_lock, PTHREAD_MUTEX_DEFAULT);
dpif->epoll_fd = -1;
dpif_init(&dpif->dpif, &dpif_linux_class, dp->name,
if (dpif->epoll_fd >= 0) {
close(dpif->epoll_fd);
}
- xpthread_mutex_destroy(&dpif->upcall_lock);
+ ovs_mutex_destroy(&dpif->upcall_lock);
free(dpif);
}
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_port_add__(dpif_, netdev, port_nop);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_port_del__(dpif_, port_no);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
uint32_t port_idx = odp_to_u32(port_no);
uint32_t pid = 0;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
/* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
* channel, since it is not heavily loaded. */
uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
pid = nl_sock_pid(dpif->channels[idx].sock);
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return pid;
}
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_recv_set__(dpif_, enable);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_recv__(dpif_, upcall, buf);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
poll_fd_wait(dpif->epoll_fd, POLLIN);
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
}
static void
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
struct dpif_channel *ch;
}
}
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
}
const struct dpif_class dpif_linux_class = {
static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
/* Global lock for all data. */
-static pthread_mutex_t dp_netdev_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
struct dp_netdev_port **portp);
{
struct shash_node *node;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
SHASH_FOR_EACH(node, &dp_netdevs) {
sset_add(all_dps, node->name);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
struct dp_netdev *dp;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp = shash_find_data(&dp_netdevs, name);
if (!dp) {
error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
if (!error) {
*dpifp = create_dpif_netdev(dp);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
ovs_assert(dp->open_cnt > 0);
if (--dp->open_cnt == 0 && dp->destroyed) {
}
free(dpif);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static int
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp->destroyed = true;
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
stats->n_flows = hmap_count(&dp->flow_table);
stats->n_hit = dp->n_hit;
stats->n_missed = dp->n_missed;
stats->n_lost = dp->n_lost;
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
odp_port_t port_no;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (*port_nop != ODPP_NONE) {
uint32_t port_idx = odp_to_u32(*port_nop);
*port_nop = port_no;
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
struct dp_netdev_port *port;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = get_port_by_number(dp, port_no, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
struct dp_netdev_port *port;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = get_port_by_name(dp, devname, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_flow_flush(dp);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
uint32_t port_idx;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
for (port_idx = odp_to_u32(state->port_no);
port_idx < MAX_PORTS; port_idx++) {
struct dp_netdev_port *port = dp->ports[port_idx];
dpif_port->type = port->type;
dpif_port->port_no = port->port_no;
state->port_no = u32_to_odp(port_idx + 1);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return EOF;
}
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (dpif->dp_serial != dpif->dp->serial) {
dpif->dp_serial = dpif->dp->serial;
error = ENOBUFS;
} else {
error = EAGAIN;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
* function and the poll_block() in one thread and a change in
* dpif->dp->serial in another thread. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (dpif->dp_serial != dpif->dp->serial) {
poll_immediate_wake();
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static struct dp_netdev_flow *
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
if (stats) {
} else {
error = ENOENT;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (!flow) {
if (put->flags & DPIF_FP_CREATE) {
error = EEXIST;
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
if (del->stats) {
} else {
error = ENOENT;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
struct dp_netdev_flow *flow;
struct hmap_node *node;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
if (!node) {
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return EOF;
}
*stats = &state->stats;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len,
&key);
if (!error) {
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_execute_actions(dp, ©, &key,
execute->actions, execute->actions_len);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
ofpbuf_uninit(©);
struct dp_netdev_queue *q;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
q = find_nonempty_queue(dpif);
if (q) {
struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
} else {
error = EAGAIN;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
* function and the poll_block() in one thread and a packet being queued in
* another thread. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (find_nonempty_queue(dpif)) {
poll_immediate_wake();
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
dpif_netdev_recv_purge(struct dpif *dpif)
{
struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_purge_queues(dpif_netdev->dp);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
\f
static void
struct dp_netdev *dp;
struct ofpbuf packet;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp = get_dp_netdev(dpif);
ofpbuf_init(&packet,
DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu);
}
}
ofpbuf_uninit(&packet);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
* - In the dpif_port_remove() case, A might wake up spuriously, but
* that is harmless. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) {
if (port->rx) {
netdev_rx_wait(port->rx);
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
/* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
-static pthread_mutex_t dpif_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
/* Rate limit for individual messages going to or from the datapath, output at
* DBG level. This is very high because, if these are enabled, it is because
{
int error;
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
error = dp_register_provider__(new_class);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return error;
}
dp_initialize();
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
error = dp_unregister_provider__(type);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return error;
}
void
dp_blacklist_provider(const char *type)
{
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
sset_add(&dpif_blacklist, type);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
/* Clears 'types' and enumerates the types of all currently registered datapath
dp_initialize();
sset_clear(types);
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
SHASH_FOR_EACH(node, &dpif_classes) {
const struct registered_dpif_class *registered_class = node->data;
sset_add(types, registered_class->dpif_class->type);
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
static void
dp_class_unref(struct registered_dpif_class *rc)
{
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
ovs_assert(rc->refcount);
rc->refcount--;
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
static struct registered_dpif_class *
{
struct registered_dpif_class *rc;
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
rc = shash_find_data(&dpif_classes, type);
if (rc) {
rc->refcount++;
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return rc;
}
datapath_type = dpif_normalize_type(datapath_type);
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
rc = shash_find_data(&dpif_classes, datapath_type);
if (rc && rc->dpif_class->port_open_type) {
port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return port_type;
}
static int signal_fds[2];
static volatile sig_atomic_t stored_sig_nr = SIG_ATOMIC_MAX;
-static pthread_mutex_t mutex;
+static struct ovs_mutex mutex;
static void atexit_handler(void);
static void call_hooks(int sig_nr);
static bool inited = false;
if (!inited) {
- pthread_mutexattr_t attr;
size_t i;
assert_single_threaded();
inited = true;
- xpthread_mutexattr_init(&attr);
- xpthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- xpthread_mutex_init(&mutex, &attr);
- xpthread_mutexattr_destroy(&attr);
-
+ ovs_mutex_init(&mutex, PTHREAD_MUTEX_RECURSIVE);
xpipe_nonblocking(signal_fds);
for (i = 0; i < ARRAY_SIZE(fatal_signals); i++) {
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
ovs_assert(n_hooks < MAX_HOOKS);
hooks[n_hooks].hook_cb = hook_cb;
hooks[n_hooks].cancel_cb = cancel_cb;
hooks[n_hooks].aux = aux;
hooks[n_hooks].run_at_exit = run_at_exit;
n_hooks++;
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Handles fatal signal number 'sig_nr'.
if (sig_nr != SIG_ATOMIC_MAX) {
char namebuf[SIGNAL_NAME_BUFSIZE];
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
VLOG_WARN("terminating with signal %d (%s)",
(int)sig_nr, signal_name(sig_nr, namebuf, sizeof namebuf));
signal(sig_nr, SIG_DFL);
raise(sig_nr);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
NOT_REACHED();
}
}
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (!added_hook) {
added_hook = true;
fatal_signal_add_hook(unlink_files, cancel_files, NULL, true);
}
sset_add(&files, file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Unregisters 'file' from being unlinked when the program terminates via
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
sset_find_and_delete(&files, file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
error = unlink(file) ? errno : 0;
if (error) {
fatal_signal_remove_file_to_unlink(file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
return error;
}
static struct json *
jsonrpc_create_id(void)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
- static unsigned int next_id;
+ static atomic_uint next_id = ATOMIC_VAR_INIT(0);
unsigned int id;
- xpthread_mutex_lock(&mutex);
- id = next_id++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_id, 1, &id);
return json_integer_create(id);
}
* descriptor for a file on which a process holds a lock drops *all* locks on
* that file. That means that we can't afford to open a lockfile more than
* once. */
-static struct hmap lock_table = HMAP_INITIALIZER(&lock_table);
-
-/* Protects 'lock_table'. */
-static pthread_mutex_t lock_table_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex lock_table_mutex = OVS_MUTEX_INITIALIZER;
+static struct hmap lock_table__ = HMAP_INITIALIZER(&lock_table__);
+static struct hmap *const lock_table OVS_GUARDED_BY(lock_table_mutex)
+ = &lock_table__;
static void lockfile_unhash(struct lockfile *);
static int lockfile_try_lock(const char *name, pid_t *pidp,
lock_name = lockfile_name(file);
- xpthread_mutex_lock(&lock_table_mutex);
+ ovs_mutex_lock(&lock_table_mutex);
error = lockfile_try_lock(lock_name, &pid, lockfilep);
- xpthread_mutex_unlock(&lock_table_mutex);
+ ovs_mutex_unlock(&lock_table_mutex);
if (error) {
COVERAGE_INC(lockfile_error);
lockfile_unlock(struct lockfile *lockfile)
{
if (lockfile) {
- xpthread_mutex_lock(&lock_table_mutex);
+ ovs_mutex_lock(&lock_table_mutex);
lockfile_unhash(lockfile);
- xpthread_mutex_unlock(&lock_table_mutex);
+ ovs_mutex_unlock(&lock_table_mutex);
COVERAGE_INC(lockfile_unlock);
free(lockfile->name);
{
struct lockfile *lockfile;
- HMAP_FOR_EACH (lockfile, hmap_node, &lock_table) {
+ ovs_mutex_lock(&lock_table_mutex);
+ HMAP_FOR_EACH (lockfile, hmap_node, lock_table) {
if (lockfile->fd >= 0) {
VLOG_WARN("%s: child does not inherit lock", lockfile->name);
lockfile_unhash(lockfile);
}
}
+ ovs_mutex_unlock(&lock_table_mutex);
}
\f
static uint32_t
}
static struct lockfile *
-lockfile_find(dev_t device, ino_t inode)
+lockfile_find(dev_t device, ino_t inode) OVS_REQUIRES(&lock_table_mutex)
{
struct lockfile *lockfile;
HMAP_FOR_EACH_WITH_HASH (lockfile, hmap_node,
- lockfile_hash(device, inode), &lock_table) {
+ lockfile_hash(device, inode), lock_table) {
if (lockfile->device == device && lockfile->inode == inode) {
return lockfile;
}
}
static void
-lockfile_unhash(struct lockfile *lockfile)
+lockfile_unhash(struct lockfile *lockfile) OVS_REQUIRES(&lock_table_mutex)
{
if (lockfile->fd >= 0) {
close(lockfile->fd);
lockfile->fd = -1;
- hmap_remove(&lock_table, &lockfile->hmap_node);
+ hmap_remove(lock_table, &lockfile->hmap_node);
}
}
static struct lockfile *
lockfile_register(const char *name, dev_t device, ino_t inode, int fd)
+ OVS_REQUIRES(&lock_table_mutex)
{
struct lockfile *lockfile;
lockfile->device = device;
lockfile->inode = inode;
lockfile->fd = fd;
- hmap_insert(&lock_table, &lockfile->hmap_node,
+ hmap_insert(lock_table, &lockfile->hmap_node,
lockfile_hash(device, inode));
return lockfile;
}
static int
lockfile_try_lock(const char *name, pid_t *pidp, struct lockfile **lockfilep)
+ OVS_REQUIRES(&lock_table_mutex)
{
struct flock l;
struct stat s;
netdev_dummy_create(const struct netdev_class *class, const char *name,
struct netdev **netdevp)
{
- static unsigned int next_n = 0xaa550000;
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
-
+ static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
struct netdev_dummy *netdev;
unsigned int n;
- xpthread_mutex_lock(&mutex);
- n = next_n++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_n, 1, &n);
netdev = xzalloc(sizeof *netdev);
netdev_init(&netdev->up, name, class);
netdev->hwaddr[0] = 0xaa;
int n;
};
-static struct nl_pool pools[MAX_LINKS];
-static pthread_mutex_t pool_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex pool_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
+static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
static int
nl_pool_alloc(int protocol, struct nl_sock **sockp)
ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
pool = &pools[protocol];
if (pool->n > 0) {
sock = pool->socks[--pool->n];
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
if (sock) {
*sockp = sock;
if (sock) {
struct nl_pool *pool = &pools[sock->protocol];
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
if (pool->n < ARRAY_SIZE(pool->socks)) {
pool->socks[pool->n++] = sock;
sock = NULL;
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
nl_sock_destroy(sock);
}
static ovs_be32
alloc_xid(void)
{
- static uint32_t next_xid = 1;
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+ static atomic_uint32_t next_xid = ATOMIC_VAR_INIT(1);
uint32_t xid;
- xpthread_mutex_lock(&mutex);
- xid = next_xid++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_xid, 1, &xid);
return htonl(xid);
}
\f
parse_flow_monitor_request__(struct ofputil_flow_monitor_request *fmr,
const char *str_, char *string)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
- static uint32_t id;
-
+ static atomic_uint32_t id = ATOMIC_VAR_INIT(0);
char *save_ptr = NULL;
char *name;
- xpthread_mutex_lock(&mutex);
- fmr->id = id++;
- xpthread_mutex_unlock(&mutex);
+ atomic_add(&id, 1, &fmr->id);
fmr->flags = (NXFMF_INITIAL | NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY
| NXFMF_OWN | NXFMF_ACTIONS);
#include "ovs-thread.h"
#if OVS_ATOMIC_GCC4P_IMPL
-static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
#define DEFINE_LOCKED_OP(TYPE, NAME, OPERATOR) \
TYPE##_t \
{ \
TYPE##_t old_value; \
\
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
old_value = u->value; \
u->value OPERATOR arg; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
\
return old_value; \
}
{ \
TYPE##_t value; \
\
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
value = u->value; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
\
return value; \
} \
void \
locked_##TYPE##_store(struct locked_##TYPE *u, TYPE##_t value) \
{ \
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
u->value = value; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
} \
DEFINE_LOCKED_OP(TYPE, add, +=); \
DEFINE_LOCKED_OP(TYPE, sub, -=); \
atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
bool old_value;
- xpthread_mutex_lock(&flag->mutex);
+ ovs_mutex_lock(&flag->mutex);
old_value = flag->b;
flag->b = true;
- xpthread_mutex_unlock(&flag->mutex);
+ ovs_mutex_unlock(&flag->mutex);
return old_value;
}
{
atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
- xpthread_mutex_lock(&flag->mutex);
+ ovs_mutex_lock(&flag->mutex);
flag->b = false;
- xpthread_mutex_unlock(&flag->mutex);
+ ovs_mutex_unlock(&flag->mutex);
}
void
typedef struct {
bool b;
- pthread_mutex_t mutex;
+ struct ovs_mutex mutex;
} atomic_flag;
#define ATOMIC_FLAG_INIT { false, PTHREAD_MUTEX_INITIALIZER }
/* True if we created any threads beyond the main initial thread. */
static bool multithreaded;
+#define LOCK_FUNCTION(TYPE, FUN) \
+ void \
+ ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
+ const char *where) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error)) { \
+ ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
+ } \
+ l->where = where; \
+ }
+LOCK_FUNCTION(mutex, lock);
+LOCK_FUNCTION(rwlock, rdlock);
+LOCK_FUNCTION(rwlock, wrlock);
+
+#define TRY_LOCK_FUNCTION(TYPE, FUN) \
+ int \
+ ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
+ const char *where) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error) && error != EBUSY) { \
+ ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
+ } \
+ if (!error) { \
+ l->where = where; \
+ } \
+ return error; \
+ }
+TRY_LOCK_FUNCTION(mutex, trylock);
+TRY_LOCK_FUNCTION(rwlock, tryrdlock);
+TRY_LOCK_FUNCTION(rwlock, trywrlock);
+
+#define UNLOCK_FUNCTION(TYPE, FUN) \
+ void \
+ ovs_##TYPE##_##FUN(const struct ovs_##TYPE *l_) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error; \
+ l->where = NULL; \
+ error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error)) { \
+ ovs_abort(error, "pthread_%s_%sfailed", #TYPE, #FUN); \
+ } \
+ }
+UNLOCK_FUNCTION(mutex, unlock);
+UNLOCK_FUNCTION(mutex, destroy);
+UNLOCK_FUNCTION(rwlock, unlock);
+UNLOCK_FUNCTION(rwlock, destroy);
+
#define XPTHREAD_FUNC1(FUNCTION, PARAM1) \
void \
x##FUNCTION(PARAM1 arg1) \
ovs_abort(error, "%s failed", #FUNCTION); \
} \
}
-#define XPTHREAD_TRY_FUNC1(FUNCTION, PARAM1) \
- int \
- x##FUNCTION(PARAM1 arg1) \
- { \
- int error = FUNCTION(arg1); \
- if (OVS_UNLIKELY(error && error != EBUSY)) { \
- ovs_abort(error, "%s failed", #FUNCTION); \
- } \
- return error; \
- }
#define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2) \
void \
x##FUNCTION(PARAM1 arg1, PARAM2 arg2) \
} \
}
-XPTHREAD_FUNC2(pthread_mutex_init, pthread_mutex_t *, pthread_mutexattr_t *);
-XPTHREAD_FUNC1(pthread_mutex_destroy, pthread_mutex_t *);
-XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *);
-XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *);
-XPTHREAD_TRY_FUNC1(pthread_mutex_trylock, pthread_mutex_t *);
-
XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *);
XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *);
XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int);
XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *);
-XPTHREAD_FUNC2(pthread_rwlock_init,
- pthread_rwlock_t *, pthread_rwlockattr_t *);
-XPTHREAD_FUNC1(pthread_rwlock_destroy, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_rdlock, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_wrlock, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_unlock, pthread_rwlock_t *);
-XPTHREAD_TRY_FUNC1(pthread_rwlock_tryrdlock, pthread_rwlock_t *);
-XPTHREAD_TRY_FUNC1(pthread_rwlock_trywrlock, pthread_rwlock_t *);
-
XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *);
XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *);
XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *);
XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *);
-XPTHREAD_FUNC2(pthread_cond_wait, pthread_cond_t *, pthread_mutex_t *);
typedef void destructor_func(void *);
XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *);
+void
+ovs_mutex_init(const struct ovs_mutex *l_, int type)
+{
+ struct ovs_mutex *l = CONST_CAST(struct ovs_mutex *, l_);
+ pthread_mutexattr_t attr;
+ int error;
+
+ l->where = NULL;
+ xpthread_mutexattr_init(&attr);
+ xpthread_mutexattr_settype(&attr, type);
+ error = pthread_mutex_init(&l->lock, &attr);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_mutex_init failed");
+ }
+ xpthread_mutexattr_destroy(&attr);
+}
+
+void
+ovs_rwlock_init(const struct ovs_rwlock *l_)
+{
+ struct ovs_rwlock *l = CONST_CAST(struct ovs_rwlock *, l_);
+ int error;
+
+ l->where = NULL;
+ error = pthread_rwlock_init(&l->lock, NULL);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_rwlock_init failed");
+ }
+}
+
+void
+ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_)
+{
+ struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_);
+ int error = pthread_cond_wait(cond, &mutex->lock);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_cond_wait failed");
+ }
+}
+
void
xpthread_create(pthread_t *threadp, pthread_attr_t *attr,
void *(*start)(void *), void *arg)
bool
ovsthread_once_start__(struct ovsthread_once *once)
{
- xpthread_mutex_lock(&once->mutex);
+ ovs_mutex_lock(&once->mutex);
if (!ovsthread_once_is_done__(once)) {
return false;
}
- xpthread_mutex_unlock(&once->mutex);
+ ovs_mutex_unlock(&once->mutex);
return true;
}
-void OVS_RELEASES(once)
+void
ovsthread_once_done(struct ovsthread_once *once)
{
atomic_store(&once->done, true);
- xpthread_mutex_unlock(&once->mutex);
+ ovs_mutex_unlock(&once->mutex);
}
\f
/* Asserts that the process has not yet created any threads (beyond the initial
pid = fork();
if (pid < 0) {
- VLOG_FATAL("fork failed (%s)", ovs_strerror(errno));
+ VLOG_FATAL("%s: fork failed (%s)", where, ovs_strerror(errno));
}
return pid;
}
#include "ovs-atomic.h"
#include "util.h"
-/* glibc has some non-portable mutex types and initializers:
- *
- * - PTHREAD_MUTEX_ADAPTIVE_NP is a mutex type that works as a spinlock that
- * falls back to a mutex after spinning for some number of iterations.
+
+/* Mutex. */
+struct OVS_LOCKABLE ovs_mutex {
+ pthread_mutex_t lock;
+ const char *where;
+};
+
+/* "struct ovs_mutex" initializers:
*
- * - PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP is a non-portable initializer
- * for an error-checking mutex.
+ * - OVS_MUTEX_INITIALIZER: common case.
*
- * We use these definitions to fall back to PTHREAD_MUTEX_NORMAL instead in
- * these cases.
+ * - OVS_ADAPTIVE_MUTEX_INITIALIZER for a mutex that spins briefly then goes
+ * to sleeps after some number of iterations.
*
- * (glibc has other non-portable initializers, but we can't reasonably
- * substitute for them here.) */
+ * - OVS_ERRORCHECK_MUTEX_INITIALIZER for a mutex that is used for
+ * error-checking. */
+#define OVS_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, NULL }
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
-#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
-#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#define OVS_ADAPTIVE_MUTEX_INITIALIZER \
+ { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, NULL }
#else
-#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_NORMAL
-#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define OVS_ADAPTIVE_MUTEX_INITIALIZER OVS_MUTEX_INITIALIZER
#endif
-
#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
-#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \
- PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+#define OVS_ERRORCHECK_MUTEX_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, NULL }
#else
-#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define OVS_ERRORCHECK_MUTEX_INITIALIZER OVS_MUTEX_INITIALIZER
#endif
\f
-/* Simple wrappers for pthreads functions. Most of these functions abort the
- * process with an error message on any error. The *_trylock() functions are
- * exceptions: they pass through a 0 or EBUSY return value to the caller and
- * abort on any other error. */
+/* Mutex types, suitable for use with pthread_mutexattr_settype().
+ * There is only one nonstandard type:
+ *
+ * - PTHREAD_MUTEX_ADAPTIVE_NP, the type used for
+ * OVS_ADAPTIVE_MUTEX_INITIALIZER. */
+#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#define OVS_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
+#else
+#define OVS_MUTEX_ADAPTIVE PTHREAD_MUTEX_NORMAL
+#endif
+
+/* ovs_mutex functions analogous to pthread_mutex_*() functions.
+ *
+ * Most of these functions abort the process with an error message on any
+ * error. ovs_mutex_trylock() is an exception: it passes through a 0 or EBUSY
+ * return value to the caller and aborts on any other error. */
+void ovs_mutex_init(const struct ovs_mutex *, int type);
+void ovs_mutex_destroy(const struct ovs_mutex *);
+void ovs_mutex_unlock(const struct ovs_mutex *mutex) OVS_RELEASES(mutex);
+void ovs_mutex_lock_at(const struct ovs_mutex *mutex, const char *where)
+ OVS_ACQUIRES(mutex);
+#define ovs_mutex_lock(mutex) \
+ ovs_mutex_lock_at(mutex, SOURCE_LOCATOR)
-void xpthread_mutex_init(pthread_mutex_t *, pthread_mutexattr_t *);
-void xpthread_mutex_destroy(pthread_mutex_t *);
-void xpthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
-void xpthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
-int xpthread_mutex_trylock(pthread_mutex_t *);
+int ovs_mutex_trylock_at(const struct ovs_mutex *mutex, const char *where)
+ OVS_TRY_LOCK(0, mutex);
+#define ovs_mutex_trylock(mutex) \
+ ovs_mutex_trylock_at(mutex, SOURCE_LOCATOR)
+void ovs_mutex_cond_wait(pthread_cond_t *, const struct ovs_mutex *);
+
+/* Wrappers for pthread_mutexattr_*() that abort the process on any error. */
void xpthread_mutexattr_init(pthread_mutexattr_t *);
void xpthread_mutexattr_destroy(pthread_mutexattr_t *);
void xpthread_mutexattr_settype(pthread_mutexattr_t *, int type);
void xpthread_mutexattr_gettype(pthread_mutexattr_t *, int *typep);
-void xpthread_rwlock_init(pthread_rwlock_t *, pthread_rwlockattr_t *);
-void xpthread_rwlock_destroy(pthread_rwlock_t *);
-void xpthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-void xpthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-void xpthread_rwlock_unlock(pthread_rwlock_t *rwlock) OVS_RELEASES(rwlock);
-int xpthread_rwlock_tryrdlock(pthread_rwlock_t *);
-int xpthread_rwlock_trywrlock(pthread_rwlock_t *);
+/* Read-write lock. */
+struct OVS_LOCKABLE ovs_rwlock {
+ pthread_rwlock_t lock;
+ const char *where;
+};
+
+/* Initializer. */
+#define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, NULL }
+
+/* ovs_rwlock functions analogous to pthread_rwlock_*() functions.
+ *
+ * Most of these functions abort the process with an error message on any
+ * error. The "trylock" functions are exception: they pass through a 0 or
+ * EBUSY return value to the caller and abort on any other error. */
+void ovs_rwlock_init(const struct ovs_rwlock *);
+void ovs_rwlock_destroy(const struct ovs_rwlock *);
+void ovs_rwlock_unlock(const struct ovs_rwlock *rwlock) OVS_RELEASES(rwlock);
+
+void ovs_rwlock_wrlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_ACQ_WRLOCK(rwlock);
+#define ovs_rwlock_wrlock(rwlock) \
+ ovs_rwlock_wrlock_at(rwlock, SOURCE_LOCATOR);
+int ovs_rwlock_trywrlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_TRY_WRLOCK(0, rwlock);
+#define ovs_rwlock_trywrlock(rwlock) \
+ ovs_rwlock_trywrlock_at(rwlock, SOURCE_LOCATOR)
+
+void ovs_rwlock_rdlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_ACQ_RDLOCK(rwlock);
+#define ovs_rwlock_rdlock(rwlock) \
+ ovs_rwlock_rdlock_at(rwlock, SOURCE_LOCATOR);
+
+int ovs_rwlock_tryrdlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_TRY_RDLOCK(0, rwlock);
+#define ovs_rwlock_tryrdlock(rwlock) \
+ ovs_rwlock_tryrdlock_at(rwlock, SOURCE_LOCATOR)
+
+/* Wrappers for xpthread_cond_*() that abort the process on any error.
+ *
+ * Use ovs_mutex_cond_wait() to wait for a condition. */
void xpthread_cond_init(pthread_cond_t *, pthread_condattr_t *);
void xpthread_cond_destroy(pthread_cond_t *);
void xpthread_cond_signal(pthread_cond_t *);
void xpthread_cond_broadcast(pthread_cond_t *);
-void xpthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
- OVS_MUST_HOLD(mutex);
#ifdef __CHECKER__
/* Replace these functions by the macros already defined in the <pthread.h>
struct ovsthread_once {
atomic_bool done;
- pthread_mutex_t mutex;
+ struct ovs_mutex mutex;
};
#define OVSTHREAD_ONCE_INITIALIZER \
{ \
ATOMIC_VAR_INIT(false), \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER, \
+ OVS_ADAPTIVE_MUTEX_INITIALIZER, \
}
-static inline bool ovsthread_once_start(struct ovsthread_once *);
-void ovsthread_once_done(struct ovsthread_once *once) OVS_RELEASES(once);
+static inline bool ovsthread_once_start(struct ovsthread_once *once)
+ OVS_TRY_LOCK(true, &once->mutex);
+void ovsthread_once_done(struct ovsthread_once *once)
+ OVS_RELEASES(&once->mutex);
-bool ovsthread_once_start__(struct ovsthread_once *);
+bool ovsthread_once_start__(struct ovsthread_once *once)
+ OVS_TRY_LOCK(false, &once->mutex);
static inline bool
ovsthread_once_is_done__(const struct ovsthread_once *once)
#ifdef __CHECKER__
#define ovsthread_once_start(ONCE) \
- ((ONCE)->done ? false : ({ OVS_ACQUIRE(ONCE); true; }))
+ ((ONCE)->done ? false : ({ OVS_MACRO_LOCK((&ONCE->mutex)); true; }))
#endif
\f
void assert_single_threaded_at(const char *where);
struct clock {
clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
- pthread_rwlock_t rwlock; /* Mutual exclusion for 'cache'. */
+ struct ovs_rwlock rwlock; /* Mutual exclusion for 'cache'. */
/* Features for use by unit tests. Protected by 'rwlock'. */
struct timespec warp; /* Offset added for unit tests. */
{
memset(c, 0, sizeof *c);
c->id = id;
- xpthread_rwlock_init(&c->rwlock, NULL);
+ ovs_rwlock_init(&c->rwlock);
xclock_gettime(c->id, &c->cache);
}
for (;;) {
/* Use the cached time by preference, but fall through if there's been
* a clock tick. */
- xpthread_rwlock_rdlock(&c->rwlock);
+ ovs_rwlock_rdlock(&c->rwlock);
if (c->stopped || !c->tick) {
timespec_add(ts, &c->cache, &c->warp);
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
return;
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
/* Refresh the cache. */
- xpthread_rwlock_wrlock(&c->rwlock);
+ ovs_rwlock_wrlock(&c->rwlock);
if (c->tick) {
c->tick = false;
xclock_gettime(c->id, &c->cache);
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
}
}
int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
void *aux OVS_UNUSED)
{
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
monotonic_clock.stopped = true;
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, NULL);
}
ts.tv_sec = msecs / 1000;
ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
timespec_add(&monotonic_clock.warp, &monotonic_clock.warp, &ts);
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, "warped");
}
void
uuid_generate(struct uuid *uuid)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+ static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
uint64_t copy[2];
uuid_init();
/* Copy out the counter's current value, then increment it. */
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
copy[0] = counter[0];
copy[1] = counter[1];
if (++counter[1] == 0) {
counter[0]++;
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
/* AES output is exactly 16 bytes, so we encrypt directly into 'uuid'. */
aes128_encrypt(&key, copy, uuid);
#define n_vlog_modules ARRAY_SIZE(vlog_modules)
#endif
+/* Protects the 'pattern' in all "struct facility"s, so that a race between
+ * changing and reading the pattern does not cause an access to freed
+ * memory. */
+static struct ovs_rwlock pattern_rwlock = OVS_RWLOCK_INITIALIZER;
+
/* Information about each facility. */
struct facility {
const char *name; /* Name. */
- char *pattern; /* Current pattern (see 'pattern_rwlock'). */
+ char *pattern OVS_GUARDED_BY(pattern_rwlock); /* Current pattern. */
bool default_pattern; /* Whether current pattern is the default. */
};
static struct facility facilities[VLF_N_FACILITIES] = {
#undef VLOG_FACILITY
};
-/* Protects the 'pattern' in all "struct facility"s, so that a race between
- * changing and reading the pattern does not cause an access to freed
- * memory. */
-static pthread_rwlock_t pattern_rwlock = PTHREAD_RWLOCK_INITIALIZER;
-
/* Sequence number for the message currently being composed. */
DEFINE_PER_THREAD_DATA(unsigned int, msg_num, 0);
*
* All of the following is protected by 'log_file_mutex', which nests inside
* pattern_rwlock. */
-static pthread_mutex_t log_file_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
-static char *log_file_name;
-static int log_fd = -1;
-static struct async_append *log_writer;
+static struct ovs_mutex log_file_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
+static char *log_file_name OVS_GUARDED_BY(log_file_mutex);
+static int log_fd OVS_GUARDED_BY(log_file_mutex) = -1;
+static struct async_append *log_writer OVS_GUARDED_BY(log_file_mutex);
static void format_log_message(const struct vlog_module *, enum vlog_level,
enum vlog_facility,
const char *message, va_list, struct ds *)
- PRINTF_FORMAT(4, 0);
+ PRINTF_FORMAT(4, 0) OVS_REQ_RDLOCK(&pattern_rwlock);
/* Searches the 'n_names' in 'names'. Returns the index of a match for
* 'target', or 'n_names' if no name matches. */
return module->levels[facility];
}
-static void OVS_MUST_HOLD(log_file_mutex)
-update_min_level(struct vlog_module *module)
+static void
+update_min_level(struct vlog_module *module) OVS_REQUIRES(&log_file_mutex)
{
enum vlog_facility facility;
assert(facility >= 0 && facility < VLF_N_FACILITIES);
assert(level < VLL_N_LEVELS);
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (!module) {
struct vlog_module **mp;
module->levels[facility] = level;
update_min_level(module);
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
}
/* Sets the logging level for the given 'module' and 'facility' to 'level'. A
{
struct facility *f = &facilities[facility];
- xpthread_rwlock_wrlock(&pattern_rwlock);
+ ovs_rwlock_wrlock(&pattern_rwlock);
if (!f->default_pattern) {
free(f->pattern);
} else {
f->default_pattern = false;
}
f->pattern = xstrdup(pattern);
- xpthread_rwlock_unlock(&pattern_rwlock);
+ ovs_rwlock_unlock(&pattern_rwlock);
}
/* Sets the pattern for the given 'facility' to 'pattern'. */
struct stat new_stat;
int new_log_fd;
bool same_file;
+ bool log_close;
/* Open new log file. */
new_log_file_name = (file_name
}
/* If the new log file is the same one we already have open, bail out. */
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
same_file = (log_fd >= 0
&& new_log_fd >= 0
&& !fstat(log_fd, &old_stat)
&& !fstat(new_log_fd, &new_stat)
&& old_stat.st_dev == new_stat.st_dev
&& old_stat.st_ino == new_stat.st_ino);
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
if (same_file) {
close(new_log_fd);
free(new_log_file_name);
}
/* Log closing old log file (we can't log while holding log_file_mutex). */
- if (log_fd >= 0) {
+ ovs_mutex_lock(&log_file_mutex);
+ log_close = log_fd >= 0;
+ ovs_mutex_unlock(&log_file_mutex);
+ if (log_close) {
VLOG_INFO("closing log file");
}
/* Close old log file, if any, and install new one. */
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (log_fd >= 0) {
free(log_file_name);
close(log_fd);
for (mp = vlog_modules; mp < &vlog_modules[n_vlog_modules]; mp++) {
update_min_level(*mp);
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
/* Log opening new log file (we can't log while holding log_file_mutex). */
VLOG_INFO("opened log file %s", new_log_file_name);
{
char *fn;
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
fn = log_file_name ? xstrdup(log_file_name) : NULL;
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
if (fn) {
int error = vlog_set_log_file(fn);
vlog_unixctl_reopen(struct unixctl_conn *conn, int argc OVS_UNUSED,
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
{
- if (log_file_name) {
+ bool has_log_file;
+
+ ovs_mutex_lock(&log_file_mutex);
+ has_log_file = log_file_name != NULL;
+ ovs_mutex_unlock(&log_file_mutex);
+
+ if (has_log_file) {
int error = vlog_reopen_log_file();
if (error) {
unixctl_command_reply_error(conn, ovs_strerror(errno));
{
bool log_to_console = module->levels[VLF_CONSOLE] >= level;
bool log_to_syslog = module->levels[VLF_SYSLOG] >= level;
- bool log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
+ bool log_to_file;
+
+ ovs_mutex_lock(&log_file_mutex);
+ log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
+ ovs_mutex_unlock(&log_file_mutex);
if (log_to_console || log_to_syslog || log_to_file) {
int save_errno = errno;
struct ds s;
ds_reserve(&s, 1024);
++*msg_num_get();
- xpthread_rwlock_rdlock(&pattern_rwlock);
+ ovs_rwlock_rdlock(&pattern_rwlock);
if (log_to_console) {
format_log_message(module, level, VLF_CONSOLE, message, args, &s);
ds_put_char(&s, '\n');
format_log_message(module, level, VLF_FILE, message, args, &s);
ds_put_char(&s, '\n');
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (log_fd >= 0) {
async_append_write(log_writer, s.string, s.length);
if (level == VLL_EMER) {
async_append_flush(log_writer);
}
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
}
- xpthread_rwlock_unlock(&pattern_rwlock);
+ ovs_rwlock_unlock(&pattern_rwlock);
ds_destroy(&s);
errno = save_errno;
return true;
}
- xpthread_mutex_lock(&rl->mutex);
+ ovs_mutex_lock(&rl->mutex);
if (!token_bucket_withdraw(&rl->token_bucket, VLOG_MSG_TOKENS)) {
time_t now = time_now();
if (!rl->n_dropped) {
}
rl->last_dropped = now;
rl->n_dropped++;
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
return true;
}
if (!rl->n_dropped) {
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
} else {
time_t now = time_now();
unsigned int n_dropped = rl->n_dropped;
unsigned int first_dropped_elapsed = now - rl->first_dropped;
unsigned int last_dropped_elapsed = now - rl->last_dropped;
rl->n_dropped = 0;
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
vlog(module, level,
"Dropped %u log messages in last %u seconds (most recently, "
time_t first_dropped; /* Time first message was dropped. */
time_t last_dropped; /* Time of most recent message drop. */
unsigned int n_dropped; /* Number of messages dropped. */
- pthread_mutex_t mutex; /* Mutual exclusion for rate limit. */
+ struct ovs_mutex mutex; /* Mutual exclusion for rate limit. */
};
/* Number of tokens to emit a message. We add 'rate' tokens per millisecond,
0, /* first_dropped */ \
0, /* last_dropped */ \
0, /* n_dropped */ \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER /* mutex */ \
+ OVS_ADAPTIVE_MUTEX_INITIALIZER /* mutex */ \
}
/* Configuring how each module logs messages. */
\f
#define SYSTEM_STATS_INTERVAL (5 * 1000) /* In milliseconds. */
-static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static struct latch latch;
+static struct latch latch OVS_GUARDED_BY(mutex);
static bool enabled;
-static bool started;
-static struct smap *system_stats;
+static bool started OVS_GUARDED_BY(mutex);
+static struct smap *system_stats OVS_GUARDED_BY(mutex);
static void *system_stats_thread_func(void *);
static void discard_stats(void);
system_stats_enable(bool enable)
{
if (enabled != enable) {
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (enable) {
if (!started) {
xpthread_create(NULL, NULL, system_stats_thread_func, NULL);
xpthread_cond_signal(&cond);
}
enabled = enable;
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
}
{
struct smap *stats = NULL;
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (system_stats) {
latch_poll(&latch);
discard_stats();
}
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
return stats;
}
}
static void
-discard_stats(void)
+discard_stats(void) OVS_REQUIRES(&mutex)
{
if (system_stats) {
smap_destroy(system_stats);
long long int next_refresh;
struct smap *stats;
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
while (!enabled) {
- xpthread_cond_wait(&cond, &mutex);
+ ovs_mutex_cond_wait(&cond, &mutex);
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
stats = xmalloc(sizeof *stats);
smap_init(stats);
get_process_stats(stats);
get_filesys_stats(stats);
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
discard_stats();
system_stats = stats;
latch_set(&latch);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
next_refresh = time_msec() + SYSTEM_STATS_INTERVAL;
do {