X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fovs-thread.c;h=f33f5f1107f606caef25a43c2972bd58d30566a9;hb=595ef8b10ad81c6033df53dfbfdb8c1b01f6de87;hp=4dfccaf3e5f8aae2d323615e28e33aa7236626e3;hpb=6b59b543c840ac82acc4d57f865c0b5e331ac590;p=sliver-openvswitch.git diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c index 4dfccaf3e..f33f5f110 100644 --- a/lib/ovs-thread.c +++ b/lib/ovs-thread.c @@ -22,6 +22,7 @@ #include #include "compiler.h" #include "hash.h" +#include "ovs-rcu.h" #include "poll-loop.h" #include "socket-util.h" #include "util.h" @@ -117,6 +118,15 @@ UNLOCK_FUNCTION(rwlock, destroy); ovs_abort(error, "%s failed", #FUNCTION); \ } \ } +#define XPTHREAD_FUNC3(FUNCTION, PARAM1, PARAM2, PARAM3)\ + void \ + x##FUNCTION(PARAM1 arg1, PARAM2 arg2, PARAM3 arg3) \ + { \ + int error = FUNCTION(arg1, arg2, arg3); \ + if (OVS_UNLIKELY(error)) { \ + ovs_abort(error, "%s failed", #FUNCTION); \ + } \ + } XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *); XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *); @@ -136,6 +146,10 @@ XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *); XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *); XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *); +XPTHREAD_FUNC3(pthread_barrier_init, pthread_barrier_t *, + pthread_barrierattr_t *, unsigned int); +XPTHREAD_FUNC1(pthread_barrier_destroy, pthread_barrier_t *); + XPTHREAD_FUNC2(pthread_join, pthread_t, void **); typedef void destructor_func(void *); @@ -210,11 +224,32 @@ void ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_) { struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_); - int error = pthread_cond_wait(cond, &mutex->lock); + int error; + + ovsrcu_quiesce_start(); + error = pthread_cond_wait(cond, &mutex->lock); + ovsrcu_quiesce_end(); + if (OVS_UNLIKELY(error)) { ovs_abort(error, "pthread_cond_wait failed"); } } + +int +xpthread_barrier_wait(pthread_barrier_t *barrier) +{ + int error; + + ovsrcu_quiesce_start(); + error = pthread_barrier_wait(barrier); + ovsrcu_quiesce_end(); + + if (error && OVS_UNLIKELY(error != PTHREAD_BARRIER_SERIAL_THREAD)) { + ovs_abort(error, "pthread_barrier_wait failed"); + } + + return error; +} DEFINE_EXTERN_PER_THREAD_DATA(ovsthread_id, 0); @@ -238,6 +273,7 @@ ovsthread_wrapper(void *aux_) aux = *auxp; free(auxp); + ovsrcu_quiesce_end(); return aux.start(aux.arg); } @@ -251,6 +287,7 @@ xpthread_create(pthread_t *threadp, pthread_attr_t *attr, forbid_forking("multiple threads exist"); multithreaded = true; + ovsrcu_quiesce_end(); aux = xmalloc(sizeof *aux); aux->start = start; @@ -281,6 +318,12 @@ ovsthread_once_done(struct ovsthread_once *once) ovs_mutex_unlock(&once->mutex); } +bool +single_threaded(void) +{ + return !multithreaded; +} + /* Asserts that the process has not yet created any threads (beyond the initial * thread). * @@ -296,6 +339,7 @@ assert_single_threaded_at(const char *where) } } +#ifndef _WIN32 /* Forks the current process (checking that this is allowed). Aborts with * VLOG_FATAL if fork() returns an error, and otherwise returns the value * returned by fork(). @@ -319,6 +363,7 @@ xfork_at(const char *where) } return pid; } +#endif /* Notes that the process must not call fork() from now on, for the specified * 'reason'. (The process may still fork() if it execs itself immediately @@ -337,83 +382,53 @@ may_fork(void) return !must_not_fork; } -/* ovsthread_counter. - * - * We implement the counter as an array of N_COUNTERS individual counters, each - * with its own lock. Each thread uses one of the counters chosen based on a - * hash of the thread's ID, the idea being that, statistically, different - * threads will tend to use different counters and therefore avoid - * interfering with each other. - * - * Undoubtedly, better implementations are possible. */ - -/* Basic counter structure. */ -struct ovsthread_counter__ { - struct ovs_mutex mutex; - unsigned long long int value; -}; - -/* Pad the basic counter structure to 64 bytes to avoid cache line - * interference. */ -struct ovsthread_counter { - struct ovsthread_counter__ c; - char pad[ROUND_UP(sizeof(struct ovsthread_counter__), 64) - - sizeof(struct ovsthread_counter__)]; -}; - -#define N_COUNTERS 16 +/* ovsthread_stats. */ -struct ovsthread_counter * -ovsthread_counter_create(void) +void +ovsthread_stats_init(struct ovsthread_stats *stats) { - struct ovsthread_counter *c; int i; - c = xmalloc(N_COUNTERS * sizeof *c); - for (i = 0; i < N_COUNTERS; i++) { - ovs_mutex_init(&c[i].c.mutex); - c[i].c.value = 0; + ovs_mutex_init(&stats->mutex); + for (i = 0; i < ARRAY_SIZE(stats->buckets); i++) { + stats->buckets[i] = NULL; } - return c; } void -ovsthread_counter_destroy(struct ovsthread_counter *c) +ovsthread_stats_destroy(struct ovsthread_stats *stats) { - if (c) { - int i; - - for (i = 0; i < N_COUNTERS; i++) { - ovs_mutex_destroy(&c[i].c.mutex); - } - free(c); - } + ovs_mutex_destroy(&stats->mutex); } -void -ovsthread_counter_inc(struct ovsthread_counter *c, unsigned long long int n) +void * +ovsthread_stats_bucket_get(struct ovsthread_stats *stats, + void *(*new_bucket)(void)) { - c = &c[hash_int(ovsthread_id_self(), 0) % N_COUNTERS]; - - ovs_mutex_lock(&c->c.mutex); - c->c.value += n; - ovs_mutex_unlock(&c->c.mutex); + unsigned int idx = ovsthread_id_self() & (ARRAY_SIZE(stats->buckets) - 1); + void *bucket = stats->buckets[idx]; + if (!bucket) { + ovs_mutex_lock(&stats->mutex); + bucket = stats->buckets[idx]; + if (!bucket) { + bucket = stats->buckets[idx] = new_bucket(); + } + ovs_mutex_unlock(&stats->mutex); + } + return bucket; } -unsigned long long int -ovsthread_counter_read(const struct ovsthread_counter *c) +size_t +ovs_thread_stats_next_bucket(const struct ovsthread_stats *stats, size_t i) { - unsigned long long int sum; - int i; - - sum = 0; - for (i = 0; i < N_COUNTERS; i++) { - ovs_mutex_lock(&c[i].c.mutex); - sum += c[i].c.value; - ovs_mutex_unlock(&c[i].c.mutex); + for (; i < ARRAY_SIZE(stats->buckets); i++) { + if (stats->buckets[i]) { + break; + } } - return sum; + return i; } + /* Parses /proc/cpuinfo for the total number of physical cores on this system * across all CPU packages, not counting hyper-threads. @@ -483,10 +498,16 @@ count_cpu_cores(void) static long int n_cores; if (ovsthread_once_start(&once)) { +#ifndef _WIN32 parse_cpuinfo(&n_cores); if (!n_cores) { n_cores = sysconf(_SC_NPROCESSORS_ONLN); } +#else + SYSTEM_INFO sysinfo; + GetSystemInfo(&sysinfo); + n_cores = sysinfo.dwNumberOfProcessors; +#endif ovsthread_once_done(&once); }