struct clock {
clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
- pthread_rwlock_t rwlock; /* Mutual exclusion for 'cache'. */
+ struct ovs_rwlock rwlock; /* Mutual exclusion for 'cache'. */
/* Features for use by unit tests. Protected by 'rwlock'. */
struct timespec warp; /* Offset added for unit tests. */
* LLONG_MAX). */
static long long int deadline = LLONG_MAX;
+/* Monotonic time, in milliseconds, at which the last call to time_poll() woke
+ * up. */
+DEFINE_PER_THREAD_DATA(long long int, last_wakeup, 0);
+
static void set_up_timer(void);
static void set_up_signal(int flags);
static void sigalrm_handler(int);
{
memset(c, 0, sizeof *c);
c->id = id;
- xpthread_rwlock_init(&c->rwlock, NULL);
+ ovs_rwlock_init(&c->rwlock);
xclock_gettime(c->id, &c->cache);
}
void
time_postfork(void)
{
+ assert_single_threaded();
time_init();
set_up_timer();
}
for (;;) {
/* Use the cached time by preference, but fall through if there's been
* a clock tick. */
- xpthread_rwlock_rdlock(&c->rwlock);
+ ovs_rwlock_rdlock(&c->rwlock);
if (c->stopped || !c->tick) {
timespec_add(ts, &c->cache, &c->warp);
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
return;
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
/* Refresh the cache. */
- xpthread_rwlock_wrlock(&c->rwlock);
+ ovs_rwlock_wrlock(&c->rwlock);
if (c->tick) {
c->tick = false;
xclock_gettime(c->id, &c->cache);
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
}
}
time_poll(struct pollfd *pollfds, int n_pollfds, long long int timeout_when,
int *elapsed)
{
- static long long int last_wakeup = 0;
+ long long int *last_wakeup = last_wakeup_get();
long long int start;
sigset_t oldsigs;
bool blocked;
int retval;
+ time_init();
time_refresh();
- if (last_wakeup) {
- log_poll_interval(last_wakeup);
+ if (*last_wakeup) {
+ log_poll_interval(*last_wakeup);
}
coverage_clear();
start = time_msec();
if (blocked) {
unblock_sigalrm(&oldsigs);
}
- last_wakeup = time_msec();
+ *last_wakeup = time_msec();
refresh_rusage();
- *elapsed = last_wakeup - start;
+ *elapsed = *last_wakeup - start;
return retval;
}
unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
};
-static struct rusage recent_rusage;
-static struct cpu_usage older = { LLONG_MIN, 0 };
-static struct cpu_usage newer = { LLONG_MIN, 0 };
-static int cpu_usage = -1;
+struct cpu_tracker {
+ struct cpu_usage older;
+ struct cpu_usage newer;
+ int cpu_usage;
+
+ struct rusage recent_rusage;
+};
+DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
+
+static struct cpu_tracker *
+get_cpu_tracker(void)
+{
+ struct cpu_tracker *t = cpu_tracker_var_get();
+ if (!t) {
+ t = xzalloc(sizeof *t);
+ t->older.when = LLONG_MIN;
+ t->newer.when = LLONG_MIN;
+ cpu_tracker_var_set_unsafe(t);
+ }
+ return t;
+}
static struct rusage *
get_recent_rusage(void)
{
- return &recent_rusage;
+ return &get_cpu_tracker()->recent_rusage;
+}
+
+static int
+getrusage_thread(struct rusage *rusage OVS_UNUSED)
+{
+#ifdef RUSAGE_THREAD
+ return getrusage(RUSAGE_THREAD, rusage);
+#else
+ errno = EINVAL;
+ return -1;
+#endif
}
static void
refresh_rusage(void)
{
- long long int now;
+ struct cpu_tracker *t = get_cpu_tracker();
+ struct rusage *recent_rusage = &t->recent_rusage;
- now = time_msec();
- getrusage(RUSAGE_SELF, &recent_rusage);
-
- if (now >= newer.when + 3 * 1000) {
- older = newer;
- newer.when = now;
- newer.cpu = (timeval_to_msec(&recent_rusage.ru_utime) +
- timeval_to_msec(&recent_rusage.ru_stime));
-
- if (older.when != LLONG_MIN && newer.cpu > older.cpu) {
- unsigned int dividend = newer.cpu - older.cpu;
- unsigned int divisor = (newer.when - older.when) / 100;
- cpu_usage = divisor > 0 ? dividend / divisor : -1;
- } else {
- cpu_usage = -1;
+ if (!getrusage_thread(recent_rusage)) {
+ long long int now = time_msec();
+ if (now >= t->newer.when + 3 * 1000) {
+ t->older = t->newer;
+ t->newer.when = now;
+ t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
+ timeval_to_msec(&recent_rusage->ru_stime));
+
+ if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
+ unsigned int dividend = t->newer.cpu - t->older.cpu;
+ unsigned int divisor = (t->newer.when - t->older.when) / 100;
+ t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
+ } else {
+ t->cpu_usage = -1;
+ }
}
}
}
int
get_cpu_usage(void)
{
- return cpu_usage;
+ return get_cpu_tracker()->cpu_usage;
}
\f
/* Unixctl interface. */
int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
void *aux OVS_UNUSED)
{
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
monotonic_clock.stopped = true;
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, NULL);
}
ts.tv_sec = msecs / 1000;
ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
timespec_add(&monotonic_clock.warp, &monotonic_clock.warp, &ts);
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, "warped");
}