VLOG_DEFINE_THIS_MODULE(coverage);
/* The coverage counters. */
-#if USE_LINKER_SECTIONS
-extern struct coverage_counter *__start_coverage[];
-extern struct coverage_counter *__stop_coverage[];
-#define coverage_counters __start_coverage
-#define n_coverage_counters (__stop_coverage - __start_coverage)
-#else /* !USE_LINKER_SECTIONS */
-#define COVERAGE_COUNTER(COUNTER) \
- DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, \
- counter_##COUNTER); \
- DEFINE_EXTERN_PER_THREAD_DATA(counter_##COUNTER, 0); \
- static unsigned int COUNTER##_count(void) \
- { \
- unsigned int *countp = counter_##COUNTER##_get(); \
- unsigned int count = *countp; \
- *countp = 0; \
- return count; \
- } \
- extern struct coverage_counter counter_##COUNTER; \
- struct coverage_counter counter_##COUNTER \
- = { #COUNTER, COUNTER##_count, 0 };
-#include "coverage.def"
-#undef COVERAGE_COUNTER
-
-extern struct coverage_counter *coverage_counters[];
-struct coverage_counter *coverage_counters[] = {
-#define COVERAGE_COUNTER(NAME) &counter_##NAME,
-#include "coverage.def"
-#undef COVERAGE_COUNTER
-};
-#define n_coverage_counters ARRAY_SIZE(coverage_counters)
-#endif /* !USE_LINKER_SECTIONS */
+static struct coverage_counter **coverage_counters = NULL;
+static size_t n_coverage_counters = 0;
+static size_t allocated_coverage_counters = 0;
static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER;
+DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN);
static long long int coverage_run_time = LLONG_MIN;
/* Index counter used to compute the moving average array's index. */
static unsigned int coverage_array_sum(const unsigned int *arr,
const unsigned int len);
+/* Registers a coverage counter with the coverage core */
+void
+coverage_counter_register(struct coverage_counter* counter)
+{
+ if (n_coverage_counters >= allocated_coverage_counters) {
+ coverage_counters = x2nrealloc(coverage_counters,
+ &allocated_coverage_counters,
+ sizeof(struct coverage_counter*));
+ }
+ coverage_counters[n_coverage_counters++] = counter;
+}
+
static void
coverage_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
}
}
- svec_add_nocopy(lines, xasprintf("%zu events never hit", n_never_hit));
+ svec_add_nocopy(lines, xasprintf("%"PRIuSIZE" events never hit", n_never_hit));
free(totals);
}
+/* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
+ * synchronize per-thread counters with global counters. Every thread maintains
+ * a separate timer to ensure all counters are periodically aggregated. */
void
coverage_clear(void)
{
- size_t i;
+ long long int now, *thread_time;
- ovs_mutex_lock(&coverage_mutex);
- for (i = 0; i < n_coverage_counters; i++) {
- struct coverage_counter *c = coverage_counters[i];
- c->total += c->count();
+ now = time_msec();
+ thread_time = coverage_clear_time_get();
+
+ /* Initialize the coverage_clear_time. */
+ if (*thread_time == LLONG_MIN) {
+ *thread_time = now + COVERAGE_CLEAR_INTERVAL;
+ }
+
+ if (now >= *thread_time) {
+ size_t i;
+
+ ovs_mutex_lock(&coverage_mutex);
+ for (i = 0; i < n_coverage_counters; i++) {
+ struct coverage_counter *c = coverage_counters[i];
+ c->total += c->count();
+ }
+ ovs_mutex_unlock(&coverage_mutex);
+ *thread_time = now + COVERAGE_CLEAR_INTERVAL;
}
- ovs_mutex_unlock(&coverage_mutex);
}
/* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the