2 * Copyright (c) 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "ovs-thread.h"
26 #include "poll-loop.h"
27 #include "socket-util.h"
31 /* Omit the definitions in this file because they are somewhat difficult to
32 * write without prompting "sparse" complaints, without ugliness or
33 * cut-and-paste. Since "sparse" is just a checker, not a compiler, it
34 * doesn't matter that we don't define them. */
38 VLOG_DEFINE_THIS_MODULE(ovs_thread);
40 /* If there is a reason that we cannot fork anymore (unless the fork will be
41 * immediately followed by an exec), then this points to a string that
43 static const char *must_not_fork;
45 /* True if we created any threads beyond the main initial thread. */
46 static bool multithreaded;
48 #define LOCK_FUNCTION(TYPE, FUN) \
50 ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
52 OVS_NO_THREAD_SAFETY_ANALYSIS \
54 struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
55 int error = pthread_##TYPE##_##FUN(&l->lock); \
56 if (OVS_UNLIKELY(error)) { \
57 ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
61 LOCK_FUNCTION(mutex, lock);
62 LOCK_FUNCTION(rwlock, rdlock);
63 LOCK_FUNCTION(rwlock, wrlock);
65 #define TRY_LOCK_FUNCTION(TYPE, FUN) \
67 ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
69 OVS_NO_THREAD_SAFETY_ANALYSIS \
71 struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
72 int error = pthread_##TYPE##_##FUN(&l->lock); \
73 if (OVS_UNLIKELY(error) && error != EBUSY) { \
74 ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
81 TRY_LOCK_FUNCTION(mutex, trylock);
82 TRY_LOCK_FUNCTION(rwlock, tryrdlock);
83 TRY_LOCK_FUNCTION(rwlock, trywrlock);
85 #define UNLOCK_FUNCTION(TYPE, FUN) \
87 ovs_##TYPE##_##FUN(const struct ovs_##TYPE *l_) \
88 OVS_NO_THREAD_SAFETY_ANALYSIS \
90 struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
93 error = pthread_##TYPE##_##FUN(&l->lock); \
94 if (OVS_UNLIKELY(error)) { \
95 ovs_abort(error, "pthread_%s_%sfailed", #TYPE, #FUN); \
98 UNLOCK_FUNCTION(mutex, unlock);
99 UNLOCK_FUNCTION(mutex, destroy);
100 UNLOCK_FUNCTION(rwlock, unlock);
101 UNLOCK_FUNCTION(rwlock, destroy);
103 #define XPTHREAD_FUNC1(FUNCTION, PARAM1) \
105 x##FUNCTION(PARAM1 arg1) \
107 int error = FUNCTION(arg1); \
108 if (OVS_UNLIKELY(error)) { \
109 ovs_abort(error, "%s failed", #FUNCTION); \
112 #define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2) \
114 x##FUNCTION(PARAM1 arg1, PARAM2 arg2) \
116 int error = FUNCTION(arg1, arg2); \
117 if (OVS_UNLIKELY(error)) { \
118 ovs_abort(error, "%s failed", #FUNCTION); \
121 #define XPTHREAD_FUNC3(FUNCTION, PARAM1, PARAM2, PARAM3)\
123 x##FUNCTION(PARAM1 arg1, PARAM2 arg2, PARAM3 arg3) \
125 int error = FUNCTION(arg1, arg2, arg3); \
126 if (OVS_UNLIKELY(error)) { \
127 ovs_abort(error, "%s failed", #FUNCTION); \
131 XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *);
132 XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *);
133 XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *);
134 XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *);
135 XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int);
136 XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *);
138 XPTHREAD_FUNC1(pthread_rwlockattr_init, pthread_rwlockattr_t *);
139 XPTHREAD_FUNC1(pthread_rwlockattr_destroy, pthread_rwlockattr_t *);
140 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
141 XPTHREAD_FUNC2(pthread_rwlockattr_setkind_np, pthread_rwlockattr_t *, int);
144 XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *);
145 XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *);
146 XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *);
147 XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *);
149 XPTHREAD_FUNC3(pthread_barrier_init, pthread_barrier_t *,
150 pthread_barrierattr_t *, unsigned int);
151 XPTHREAD_FUNC1(pthread_barrier_destroy, pthread_barrier_t *);
153 XPTHREAD_FUNC2(pthread_join, pthread_t, void **);
155 typedef void destructor_func(void *);
156 XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *);
157 XPTHREAD_FUNC1(pthread_key_delete, pthread_key_t);
158 XPTHREAD_FUNC2(pthread_setspecific, pthread_key_t, const void *);
161 ovs_mutex_init__(const struct ovs_mutex *l_, int type)
163 struct ovs_mutex *l = CONST_CAST(struct ovs_mutex *, l_);
164 pthread_mutexattr_t attr;
168 xpthread_mutexattr_init(&attr);
169 xpthread_mutexattr_settype(&attr, type);
170 error = pthread_mutex_init(&l->lock, &attr);
171 if (OVS_UNLIKELY(error)) {
172 ovs_abort(error, "pthread_mutex_init failed");
174 xpthread_mutexattr_destroy(&attr);
177 /* Initializes 'mutex' as a normal (non-recursive) mutex. */
179 ovs_mutex_init(const struct ovs_mutex *mutex)
181 ovs_mutex_init__(mutex, PTHREAD_MUTEX_ERRORCHECK);
184 /* Initializes 'mutex' as a recursive mutex. */
186 ovs_mutex_init_recursive(const struct ovs_mutex *mutex)
188 ovs_mutex_init__(mutex, PTHREAD_MUTEX_RECURSIVE);
191 /* Initializes 'mutex' as a recursive mutex. */
193 ovs_mutex_init_adaptive(const struct ovs_mutex *mutex)
195 #ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
196 ovs_mutex_init__(mutex, PTHREAD_MUTEX_ADAPTIVE_NP);
198 ovs_mutex_init(mutex);
203 ovs_rwlock_init(const struct ovs_rwlock *l_)
205 struct ovs_rwlock *l = CONST_CAST(struct ovs_rwlock *, l_);
206 pthread_rwlockattr_t attr;
211 xpthread_rwlockattr_init(&attr);
212 #ifdef PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
213 xpthread_rwlockattr_setkind_np(
214 &attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
216 error = pthread_rwlock_init(&l->lock, NULL);
217 if (OVS_UNLIKELY(error)) {
218 ovs_abort(error, "pthread_rwlock_init failed");
220 xpthread_rwlockattr_destroy(&attr);
224 ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_)
226 struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_);
229 ovsrcu_quiesce_start();
230 error = pthread_cond_wait(cond, &mutex->lock);
231 ovsrcu_quiesce_end();
233 if (OVS_UNLIKELY(error)) {
234 ovs_abort(error, "pthread_cond_wait failed");
239 xpthread_barrier_wait(pthread_barrier_t *barrier)
243 error = pthread_barrier_wait(barrier);
244 if (error && OVS_UNLIKELY(error != PTHREAD_BARRIER_SERIAL_THREAD)) {
245 ovs_abort(error, "pthread_barrier_wait failed");
251 DEFINE_EXTERN_PER_THREAD_DATA(ovsthread_id, 0);
253 struct ovsthread_aux {
254 void *(*start)(void *);
259 ovsthread_wrapper(void *aux_)
261 static atomic_uint next_id = ATOMIC_VAR_INIT(1);
263 struct ovsthread_aux *auxp = aux_;
264 struct ovsthread_aux aux;
267 atomic_add(&next_id, 1, &id);
268 *ovsthread_id_get() = id;
273 ovsrcu_quiesce_end();
274 return aux.start(aux.arg);
278 xpthread_create(pthread_t *threadp, pthread_attr_t *attr,
279 void *(*start)(void *), void *arg)
281 struct ovsthread_aux *aux;
285 forbid_forking("multiple threads exist");
286 multithreaded = true;
287 ovsrcu_quiesce_end();
289 aux = xmalloc(sizeof *aux);
293 error = pthread_create(threadp ? threadp : &thread, attr,
294 ovsthread_wrapper, aux);
296 ovs_abort(error, "pthread_create failed");
301 ovsthread_once_start__(struct ovsthread_once *once)
303 ovs_mutex_lock(&once->mutex);
304 if (!ovsthread_once_is_done__(once)) {
307 ovs_mutex_unlock(&once->mutex);
312 ovsthread_once_done(struct ovsthread_once *once)
314 atomic_store(&once->done, true);
315 ovs_mutex_unlock(&once->mutex);
319 single_threaded(void)
321 return !multithreaded;
324 /* Asserts that the process has not yet created any threads (beyond the initial
327 * ('where' is used in logging. Commonly one would use
328 * assert_single_threaded() to automatically provide the caller's source file
329 * and line number for 'where'.) */
331 assert_single_threaded_at(const char *where)
334 VLOG_FATAL("%s: attempted operation not allowed when multithreaded",
340 /* Forks the current process (checking that this is allowed). Aborts with
341 * VLOG_FATAL if fork() returns an error, and otherwise returns the value
342 * returned by fork().
344 * ('where' is used in logging. Commonly one would use xfork() to
345 * automatically provide the caller's source file and line number for
348 xfork_at(const char *where)
353 VLOG_FATAL("%s: attempted to fork but forking not allowed (%s)",
354 where, must_not_fork);
359 VLOG_FATAL("%s: fork failed (%s)", where, ovs_strerror(errno));
365 /* Notes that the process must not call fork() from now on, for the specified
366 * 'reason'. (The process may still fork() if it execs itself immediately
369 forbid_forking(const char *reason)
371 ovs_assert(reason != NULL);
372 must_not_fork = reason;
375 /* Returns true if the process is allowed to fork, false otherwise. */
379 return !must_not_fork;
382 /* ovsthread_stats. */
385 ovsthread_stats_init(struct ovsthread_stats *stats)
389 ovs_mutex_init(&stats->mutex);
390 for (i = 0; i < ARRAY_SIZE(stats->buckets); i++) {
391 stats->buckets[i] = NULL;
396 ovsthread_stats_destroy(struct ovsthread_stats *stats)
398 ovs_mutex_destroy(&stats->mutex);
402 ovsthread_stats_bucket_get(struct ovsthread_stats *stats,
403 void *(*new_bucket)(void))
405 unsigned int idx = ovsthread_id_self() & (ARRAY_SIZE(stats->buckets) - 1);
406 void *bucket = stats->buckets[idx];
408 ovs_mutex_lock(&stats->mutex);
409 bucket = stats->buckets[idx];
411 bucket = stats->buckets[idx] = new_bucket();
413 ovs_mutex_unlock(&stats->mutex);
419 ovs_thread_stats_next_bucket(const struct ovsthread_stats *stats, size_t i)
421 for (; i < ARRAY_SIZE(stats->buckets); i++) {
422 if (stats->buckets[i]) {
430 /* Parses /proc/cpuinfo for the total number of physical cores on this system
431 * across all CPU packages, not counting hyper-threads.
433 * Sets *n_cores to the total number of cores on this system, or 0 if the
434 * number cannot be determined. */
436 parse_cpuinfo(long int *n_cores)
438 static const char file_name[] = "/proc/cpuinfo";
440 uint64_t cpu = 0; /* Support up to 64 CPU packages on a single system. */
444 stream = fopen(file_name, "r");
446 VLOG_DBG("%s: open failed (%s)", file_name, ovs_strerror(errno));
450 while (fgets(line, sizeof line, stream)) {
453 /* Find the next CPU package. */
454 if (ovs_scan(line, "physical id%*[^:]: %u", &id)) {
456 VLOG_WARN("Counted over 64 CPU packages on this system. "
457 "Parsing %s for core count may be inaccurate.",
463 if (cpu & (1 << id)) {
464 /* We've already counted this package's cores. */
469 /* Find the number of cores for this package. */
470 while (fgets(line, sizeof line, stream)) {
473 if (ovs_scan(line, "cpu cores%*[^:]: %u", &count)) {
485 /* Returns the total number of cores on this system, or 0 if the number cannot
488 * Tries not to count hyper-threads, but may be inaccurate - particularly on
489 * platforms that do not provide /proc/cpuinfo, but also if /proc/cpuinfo is
490 * formatted different to the layout that parse_cpuinfo() expects. */
492 count_cpu_cores(void)
494 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
495 static long int n_cores;
497 if (ovsthread_once_start(&once)) {
499 parse_cpuinfo(&n_cores);
501 n_cores = sysconf(_SC_NPROCESSORS_ONLN);
505 GetSystemInfo(&sysinfo);
506 n_cores = sysinfo.dwNumberOfProcessors;
508 ovsthread_once_done(&once);
511 return n_cores > 0 ? n_cores : 0;
518 #define MAX_KEYS (L1_SIZE * L2_SIZE)
520 /* A piece of thread-specific data. */
521 struct ovsthread_key {
522 struct list list_node; /* In 'inuse_keys' or 'free_keys'. */
523 void (*destructor)(void *); /* Called at thread exit. */
525 /* Indexes into the per-thread array in struct ovsthread_key_slots.
526 * This key's data is stored in p1[index / L2_SIZE][index % L2_SIZE]. */
530 /* Per-thread data structure. */
531 struct ovsthread_key_slots {
532 struct list list_node; /* In 'slots_list'. */
536 /* Contains "struct ovsthread_key_slots *". */
537 static pthread_key_t tsd_key;
539 /* Guards data structures below. */
540 static struct ovs_mutex key_mutex = OVS_MUTEX_INITIALIZER;
542 /* 'inuse_keys' holds "struct ovsthread_key"s that have been created and not
545 * 'free_keys' holds "struct ovsthread_key"s that have been deleted and are
546 * ready for reuse. (We keep them around only to be able to easily locate
549 * Together, 'inuse_keys' and 'free_keys' hold an ovsthread_key for every index
550 * from 0 to n_keys - 1, inclusive. */
551 static struct list inuse_keys OVS_GUARDED_BY(key_mutex)
552 = LIST_INITIALIZER(&inuse_keys);
553 static struct list free_keys OVS_GUARDED_BY(key_mutex)
554 = LIST_INITIALIZER(&free_keys);
555 static unsigned int n_keys OVS_GUARDED_BY(key_mutex);
557 /* All existing struct ovsthread_key_slots. */
558 static struct list slots_list OVS_GUARDED_BY(key_mutex)
559 = LIST_INITIALIZER(&slots_list);
562 clear_slot(struct ovsthread_key_slots *slots, unsigned int index)
564 void **p2 = slots->p1[index / L2_SIZE];
566 void **valuep = &p2[index % L2_SIZE];
567 void *value = *valuep;
576 ovsthread_key_destruct__(void *slots_)
578 struct ovsthread_key_slots *slots = slots_;
579 struct ovsthread_key *key;
583 ovs_mutex_lock(&key_mutex);
584 list_remove(&slots->list_node);
585 LIST_FOR_EACH (key, list_node, &inuse_keys) {
586 void *value = clear_slot(slots, key->index);
587 if (value && key->destructor) {
588 key->destructor(value);
592 ovs_mutex_unlock(&key_mutex);
594 for (i = 0; i < n / L2_SIZE; i++) {
600 /* Initializes '*keyp' as a thread-specific data key. The data items are
601 * initially null in all threads.
603 * If a thread exits with non-null data, then 'destructor', if nonnull, will be
604 * called passing the final data value as its argument. 'destructor' must not
605 * call any thread-specific data functions in this API.
607 * This function is similar to xpthread_key_create(). */
609 ovsthread_key_create(ovsthread_key_t *keyp, void (*destructor)(void *))
611 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
612 struct ovsthread_key *key;
614 if (ovsthread_once_start(&once)) {
615 xpthread_key_create(&tsd_key, ovsthread_key_destruct__);
616 ovsthread_once_done(&once);
619 ovs_mutex_lock(&key_mutex);
620 if (list_is_empty(&free_keys)) {
621 key = xmalloc(sizeof *key);
622 key->index = n_keys++;
623 if (key->index >= MAX_KEYS) {
627 key = CONTAINER_OF(list_pop_back(&free_keys),
628 struct ovsthread_key, list_node);
630 list_push_back(&inuse_keys, &key->list_node);
631 key->destructor = destructor;
632 ovs_mutex_unlock(&key_mutex);
637 /* Frees 'key'. The destructor supplied to ovsthread_key_create(), if any, is
640 * This function is similar to xpthread_key_delete(). */
642 ovsthread_key_delete(ovsthread_key_t key)
644 struct ovsthread_key_slots *slots;
646 ovs_mutex_lock(&key_mutex);
648 /* Move 'key' from 'inuse_keys' to 'free_keys'. */
649 list_remove(&key->list_node);
650 list_push_back(&free_keys, &key->list_node);
652 /* Clear this slot in all threads. */
653 LIST_FOR_EACH (slots, list_node, &slots_list) {
654 clear_slot(slots, key->index);
657 ovs_mutex_unlock(&key_mutex);
661 ovsthread_key_lookup__(const struct ovsthread_key *key)
663 struct ovsthread_key_slots *slots;
666 slots = pthread_getspecific(tsd_key);
668 slots = xzalloc(sizeof *slots);
670 ovs_mutex_lock(&key_mutex);
671 pthread_setspecific(tsd_key, slots);
672 list_push_back(&slots_list, &slots->list_node);
673 ovs_mutex_unlock(&key_mutex);
676 p2 = slots->p1[key->index / L2_SIZE];
678 p2 = xzalloc(L2_SIZE * sizeof *p2);
679 slots->p1[key->index / L2_SIZE] = p2;
682 return &p2[key->index % L2_SIZE];
685 /* Sets the value of thread-specific data item 'key', in the current thread, to
688 * This function is similar to pthread_setspecific(). */
690 ovsthread_setspecific(ovsthread_key_t key, const void *value)
692 *ovsthread_key_lookup__(key) = CONST_CAST(void *, value);
695 /* Returns the value of thread-specific data item 'key' in the current thread.
697 * This function is similar to pthread_getspecific(). */
699 ovsthread_getspecific(ovsthread_key_t key)
701 return *ovsthread_key_lookup__(key);