void xpthread_mutexattr_settype(pthread_mutexattr_t *, int type);
void xpthread_mutexattr_gettype(pthread_mutexattr_t *, int *typep);
-/* Read-write lock. */
+/* Read-write lock.
+ *
+ * An ovs_rwlock does not support recursive readers, because POSIX allows
+ * taking the reader lock recursively to deadlock when a thread is waiting on
+ * the write-lock. (NetBSD does deadlock.) glibc rwlocks in their default
+ * configuration do not deadlock, but ovs_rwlock_init() initializes rwlocks as
+ * non-recursive (which will deadlock) for two reasons:
+ *
+ * - glibc only provides fairness to writers in this mode.
+ *
+ * - It's better to find bugs in the primary Open vSwitch target rather
+ * than exposing them only to porters. */
struct OVS_LOCKABLE ovs_rwlock {
pthread_rwlock_t lock;
const char *where;
};
/* Initializer. */
+#ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
+#define OVS_RWLOCK_INITIALIZER \
+ { PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP, NULL }
+#else
#define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, NULL }
+#endif
/* ovs_rwlock functions analogous to pthread_rwlock_*() functions.
*
void ovs_rwlock_destroy(const struct ovs_rwlock *);
void ovs_rwlock_unlock(const struct ovs_rwlock *rwlock) OVS_RELEASES(rwlock);
+/* Wrappers for pthread_rwlockattr_*() that abort the process on any error. */
+void xpthread_rwlockattr_init(pthread_rwlockattr_t *);
+void xpthread_rwlockattr_destroy(pthread_rwlockattr_t *);
+#ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
+void xpthread_rwlockattr_setkind_np(pthread_rwlockattr_t *, int kind);
+#endif
+
void ovs_rwlock_wrlock_at(const struct ovs_rwlock *rwlock, const char *where)
OVS_ACQ_WRLOCK(rwlock);
#define ovs_rwlock_wrlock(rwlock) \
void xpthread_cond_signal(pthread_cond_t *);
void xpthread_cond_broadcast(pthread_cond_t *);
-#ifdef __CHECKER__
-/* Replace these functions by the macros already defined in the <pthread.h>
- * annotations, because the macro definitions have correct semantics for the
- * conditional acquisition that can't be captured in a function annotation.
- * The difference in semantics from pthread_*() to xpthread_*() does not matter
- * because sparse is not a compiler. */
-#define xpthread_mutex_trylock pthread_mutex_trylock
-#define xpthread_rwlock_tryrdlock pthread_rwlock_tryrdlock
-#define xpthread_rwlock_trywrlock pthread_rwlock_trywrlock
-#endif
+/* Wrappers for pthread_barrier_*() that abort the process on any error. */
+void xpthread_barrier_init(pthread_barrier_t *, pthread_barrierattr_t *,
+ unsigned int count);
+int xpthread_barrier_wait(pthread_barrier_t *);
+void xpthread_barrier_destroy(pthread_barrier_t *);
void xpthread_key_create(pthread_key_t *, void (*destructor)(void *));
void xpthread_key_delete(pthread_key_t);
*
* Fully thread-safe. */
-struct ovsthread_counter *ovsthread_counter_create(void);
-void ovsthread_counter_destroy(struct ovsthread_counter *);
-void ovsthread_counter_inc(struct ovsthread_counter *, unsigned long long int);
-unsigned long long int ovsthread_counter_read(
- const struct ovsthread_counter *);
+struct ovsthread_stats {
+ struct ovs_mutex mutex;
+ void *volatile buckets[16];
+};
+
+void ovsthread_stats_init(struct ovsthread_stats *);
+void ovsthread_stats_destroy(struct ovsthread_stats *);
+
+void *ovsthread_stats_bucket_get(struct ovsthread_stats *,
+ void *(*new_bucket)(void));
+
+#define OVSTHREAD_STATS_FOR_EACH_BUCKET(BUCKET, IDX, STATS) \
+ for ((IDX) = ovs_thread_stats_next_bucket(STATS, 0); \
+ ((IDX) < ARRAY_SIZE((STATS)->buckets) \
+ ? ((BUCKET) = (STATS)->buckets[IDX], true) \
+ : false); \
+ (IDX) = ovs_thread_stats_next_bucket(STATS, (IDX) + 1))
+size_t ovs_thread_stats_next_bucket(const struct ovsthread_stats *, size_t);
\f
+bool single_threaded(void);
+
void assert_single_threaded_at(const char *where);
#define assert_single_threaded() assert_single_threaded_at(SOURCE_LOCATOR)
+#ifndef _WIN32
pid_t xfork_at(const char *where);
#define xfork() xfork_at(SOURCE_LOCATOR)
+#endif
void forbid_forking(const char *reason);
bool may_fork(void);