struct clock {
clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
- pthread_rwlock_t rwlock; /* Mutual exclusion for 'cache'. */
/* Features for use by unit tests. Protected by 'rwlock'. */
+ struct ovs_rwlock rwlock;
struct timespec warp; /* Offset added for unit tests. */
bool stopped; /* Disables real-time updates if true. */
- /* Relevant only if CACHE_TIME is true. */
- volatile sig_atomic_t tick; /* Has the timer ticked? Set by signal. */
struct timespec cache; /* Last time read from kernel. */
};
* LLONG_MAX). */
static long long int deadline = LLONG_MAX;
-static void set_up_timer(void);
-static void set_up_signal(int flags);
-static void sigalrm_handler(int);
-static void block_sigalrm(sigset_t *);
-static void unblock_sigalrm(const sigset_t *);
+/* Monotonic time, in milliseconds, at which the last call to time_poll() woke
+ * up. */
+DEFINE_STATIC_PER_THREAD_DATA(long long int, last_wakeup, 0);
+
static void log_poll_interval(long long int last_wakeup);
static struct rusage *get_recent_rusage(void);
static void refresh_rusage(void);
{
memset(c, 0, sizeof *c);
c->id = id;
- xpthread_rwlock_init(&c->rwlock, NULL);
+ ovs_rwlock_init(&c->rwlock);
xclock_gettime(c->id, &c->cache);
}
: CLOCK_REALTIME));
init_clock(&wall_clock, CLOCK_REALTIME);
boot_time = timespec_to_msec(&monotonic_clock.cache);
-
- set_up_signal(SA_RESTART);
- set_up_timer();
}
/* Initializes the timetracking module, if not already initialized. */
pthread_once(&once, do_init_time);
}
-static void
-set_up_signal(int flags)
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof sa);
- sa.sa_handler = sigalrm_handler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = flags;
- xsigaction(SIGALRM, &sa, NULL);
-}
-
-static void
-set_up_timer(void)
-{
- static timer_t timer_id; /* "static" to avoid apparent memory leak. */
- struct itimerspec itimer;
-
- if (!CACHE_TIME) {
- return;
- }
-
- if (timer_create(monotonic_clock.id, NULL, &timer_id)) {
- VLOG_FATAL("timer_create failed (%s)", ovs_strerror(errno));
- }
-
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_nsec = TIME_UPDATE_INTERVAL * 1000 * 1000;
- itimer.it_value = itimer.it_interval;
-
- if (timer_settime(timer_id, 0, &itimer, NULL)) {
- VLOG_FATAL("timer_settime failed (%s)", ovs_strerror(errno));
- }
-}
-
-/* Set up the interval timer, to ensure that time advances even without calling
- * time_refresh().
- *
- * A child created with fork() does not inherit the parent's interval timer, so
- * this function needs to be called from the child after fork(). */
-void
-time_postfork(void)
-{
- time_init();
- set_up_timer();
-}
-
-/* Forces a refresh of the current time from the kernel. It is not usually
- * necessary to call this function, since the time will be refreshed
- * automatically at least every TIME_UPDATE_INTERVAL milliseconds. If
- * CACHE_TIME is false, we will always refresh the current time so this
- * function has no effect. */
-void
-time_refresh(void)
-{
- monotonic_clock.tick = wall_clock.tick = true;
-}
-
static void
time_timespec__(struct clock *c, struct timespec *ts)
{
time_init();
- for (;;) {
- /* Use the cached time by preference, but fall through if there's been
- * a clock tick. */
- xpthread_rwlock_rdlock(&c->rwlock);
- if (c->stopped || !c->tick) {
- timespec_add(ts, &c->cache, &c->warp);
- xpthread_rwlock_unlock(&c->rwlock);
- return;
- }
- xpthread_rwlock_unlock(&c->rwlock);
- /* Refresh the cache. */
- xpthread_rwlock_wrlock(&c->rwlock);
- if (c->tick) {
- c->tick = false;
- xclock_gettime(c->id, &c->cache);
- }
- xpthread_rwlock_unlock(&c->rwlock);
+ if (!c->stopped) {
+ xclock_gettime(c->id, ts);
+ } else {
+ ovs_rwlock_rdlock(&c->rwlock);
+ timespec_add(ts, &c->cache, &c->warp);
+ ovs_rwlock_unlock(&c->rwlock);
}
}
assert_single_threaded();
time_init();
- time_refresh();
now = time_msec();
msecs = secs * 1000LL;
* timeout is reached. (Because of this property, this function will
* never return -EINTR.)
*
- * - As a side effect, refreshes the current time (like time_refresh()).
- *
* Stores the number of milliseconds elapsed during poll in '*elapsed'. */
int
time_poll(struct pollfd *pollfds, int n_pollfds, long long int timeout_when,
int *elapsed)
{
- static long long int last_wakeup = 0;
+ long long int *last_wakeup = last_wakeup_get();
long long int start;
- sigset_t oldsigs;
- bool blocked;
int retval;
- time_refresh();
- if (last_wakeup) {
- log_poll_interval(last_wakeup);
+ time_init();
+ if (*last_wakeup) {
+ log_poll_interval(*last_wakeup);
}
coverage_clear();
start = time_msec();
- blocked = false;
timeout_when = MIN(timeout_when, deadline);
retval = -errno;
}
- time_refresh();
if (deadline <= time_msec()) {
fatal_signal_handler(SIGALRM);
if (retval < 0) {
if (retval != -EINTR) {
break;
}
-
- if (!blocked && CACHE_TIME) {
- block_sigalrm(&oldsigs);
- blocked = true;
- }
}
- if (blocked) {
- unblock_sigalrm(&oldsigs);
- }
- last_wakeup = time_msec();
+ *last_wakeup = time_msec();
refresh_rusage();
- *elapsed = last_wakeup - start;
+ *elapsed = *last_wakeup - start;
return retval;
}
-static void
-sigalrm_handler(int sig_nr OVS_UNUSED)
-{
- monotonic_clock.tick = wall_clock.tick = true;
-}
-
-static void
-block_sigalrm(sigset_t *oldsigs)
-{
- sigset_t sigalrm;
- sigemptyset(&sigalrm);
- sigaddset(&sigalrm, SIGALRM);
- xpthread_sigmask(SIG_BLOCK, &sigalrm, oldsigs);
-}
-
-static void
-unblock_sigalrm(const sigset_t *oldsigs)
-{
- xpthread_sigmask(SIG_SETMASK, oldsigs, NULL);
-}
-
long long int
timespec_to_msec(const struct timespec *ts)
{
unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
};
-static struct rusage recent_rusage;
-static struct cpu_usage older = { LLONG_MIN, 0 };
-static struct cpu_usage newer = { LLONG_MIN, 0 };
-static int cpu_usage = -1;
+struct cpu_tracker {
+ struct cpu_usage older;
+ struct cpu_usage newer;
+ int cpu_usage;
+
+ struct rusage recent_rusage;
+};
+DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
+
+static struct cpu_tracker *
+get_cpu_tracker(void)
+{
+ struct cpu_tracker *t = cpu_tracker_var_get();
+ if (!t) {
+ t = xzalloc(sizeof *t);
+ t->older.when = LLONG_MIN;
+ t->newer.when = LLONG_MIN;
+ cpu_tracker_var_set_unsafe(t);
+ }
+ return t;
+}
static struct rusage *
get_recent_rusage(void)
{
- return &recent_rusage;
+ return &get_cpu_tracker()->recent_rusage;
+}
+
+static int
+getrusage_thread(struct rusage *rusage OVS_UNUSED)
+{
+#ifdef RUSAGE_THREAD
+ return getrusage(RUSAGE_THREAD, rusage);
+#else
+ errno = EINVAL;
+ return -1;
+#endif
}
static void
refresh_rusage(void)
{
- long long int now;
+ struct cpu_tracker *t = get_cpu_tracker();
+ struct rusage *recent_rusage = &t->recent_rusage;
- now = time_msec();
- getrusage(RUSAGE_SELF, &recent_rusage);
-
- if (now >= newer.when + 3 * 1000) {
- older = newer;
- newer.when = now;
- newer.cpu = (timeval_to_msec(&recent_rusage.ru_utime) +
- timeval_to_msec(&recent_rusage.ru_stime));
-
- if (older.when != LLONG_MIN && newer.cpu > older.cpu) {
- unsigned int dividend = newer.cpu - older.cpu;
- unsigned int divisor = (newer.when - older.when) / 100;
- cpu_usage = divisor > 0 ? dividend / divisor : -1;
- } else {
- cpu_usage = -1;
+ if (!getrusage_thread(recent_rusage)) {
+ long long int now = time_msec();
+ if (now >= t->newer.when + 3 * 1000) {
+ t->older = t->newer;
+ t->newer.when = now;
+ t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
+ timeval_to_msec(&recent_rusage->ru_stime));
+
+ if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
+ unsigned int dividend = t->newer.cpu - t->older.cpu;
+ unsigned int divisor = (t->newer.when - t->older.when) / 100;
+ t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
+ } else {
+ t->cpu_usage = -1;
+ }
}
}
}
int
get_cpu_usage(void)
{
- return cpu_usage;
+ return get_cpu_tracker()->cpu_usage;
}
\f
/* Unixctl interface. */
int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
void *aux OVS_UNUSED)
{
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
monotonic_clock.stopped = true;
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ xclock_gettime(monotonic_clock.id, &monotonic_clock.cache);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, NULL);
}
ts.tv_sec = msecs / 1000;
ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
timespec_add(&monotonic_clock.warp, &monotonic_clock.warp, &ts);
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, "warped");
}