+ coverage_log();
+ }
+}
+\f
+/* CPU usage tracking. */
+
+struct cpu_usage {
+ long long int when; /* Time that this sample was taken. */
+ unsigned long long int cpu; /* Total user+system CPU usage when sampled. */
+};
+
+struct cpu_tracker {
+ struct cpu_usage older;
+ struct cpu_usage newer;
+ int cpu_usage;
+
+ struct rusage recent_rusage;
+};
+DEFINE_PER_THREAD_MALLOCED_DATA(struct cpu_tracker *, cpu_tracker_var);
+
+static struct cpu_tracker *
+get_cpu_tracker(void)
+{
+ struct cpu_tracker *t = cpu_tracker_var_get();
+ if (!t) {
+ t = xzalloc(sizeof *t);
+ t->older.when = LLONG_MIN;
+ t->newer.when = LLONG_MIN;
+ cpu_tracker_var_set_unsafe(t);
+ }
+ return t;
+}
+
+static struct rusage *
+get_recent_rusage(void)
+{
+ return &get_cpu_tracker()->recent_rusage;
+}
+
+static int
+getrusage_thread(struct rusage *rusage OVS_UNUSED)
+{
+#ifdef RUSAGE_THREAD
+ return getrusage(RUSAGE_THREAD, rusage);
+#else
+ errno = EINVAL;
+ return -1;
+#endif
+}
+
+static void
+refresh_rusage(void)
+{
+ struct cpu_tracker *t = get_cpu_tracker();
+ struct rusage *recent_rusage = &t->recent_rusage;
+
+ if (!getrusage_thread(recent_rusage)) {
+ long long int now = time_msec();
+ if (now >= t->newer.when + 3 * 1000) {
+ t->older = t->newer;
+ t->newer.when = now;
+ t->newer.cpu = (timeval_to_msec(&recent_rusage->ru_utime) +
+ timeval_to_msec(&recent_rusage->ru_stime));
+
+ if (t->older.when != LLONG_MIN && t->newer.cpu > t->older.cpu) {
+ unsigned int dividend = t->newer.cpu - t->older.cpu;
+ unsigned int divisor = (t->newer.when - t->older.when) / 100;
+ t->cpu_usage = divisor > 0 ? dividend / divisor : -1;
+ } else {
+ t->cpu_usage = -1;
+ }
+ }
+ }
+}
+
+/* Returns an estimate of this process's CPU usage, as a percentage, over the
+ * past few seconds of wall-clock time. Returns -1 if no estimate is available
+ * (which will happen if the process has not been running long enough to have
+ * an estimate, and can happen for other reasons as well). */
+int
+get_cpu_usage(void)
+{
+ return get_cpu_tracker()->cpu_usage;
+}
+\f
+/* Unixctl interface. */
+
+/* "time/stop" stops the monotonic time returned by e.g. time_msec() from
+ * advancing, except due to later calls to "time/warp". */
+static void
+timeval_stop_cb(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ ovs_mutex_lock(&monotonic_clock.mutex);
+ atomic_store(&monotonic_clock.slow_path, true);
+ monotonic_clock.stopped = true;
+ xclock_gettime(monotonic_clock.id, &monotonic_clock.cache);
+ ovs_mutex_unlock(&monotonic_clock.mutex);