2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "dynamic-string.h"
29 VLOG_DEFINE_THIS_MODULE(coverage);
31 /* The coverage counters. */
32 static struct coverage_counter **coverage_counters = NULL;
33 static size_t n_coverage_counters = 0;
34 static size_t allocated_coverage_counters = 0;
36 static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER;
38 DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN);
39 static long long int coverage_run_time = LLONG_MIN;
41 /* Index counter used to compute the moving average array's index. */
42 static unsigned int idx_count = 0;
44 static void coverage_read(struct svec *);
45 static unsigned int coverage_array_sum(const unsigned int *arr,
46 const unsigned int len);
48 /* Registers a coverage counter with the coverage core */
50 coverage_counter_register(struct coverage_counter* counter)
52 if (n_coverage_counters >= allocated_coverage_counters) {
53 coverage_counters = x2nrealloc(coverage_counters,
54 &allocated_coverage_counters,
55 sizeof(struct coverage_counter*));
57 coverage_counters[n_coverage_counters++] = counter;
61 coverage_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
62 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
68 coverage_read(&lines);
69 reply = svec_join(&lines, "\n", "\n");
70 unixctl_command_reply(conn, reply);
78 unixctl_command_register("coverage/show", "", 0, 0,
79 coverage_unixctl_show, NULL);
82 /* Sorts coverage counters in descending order by total, within equal
83 * totals alphabetically by name. */
85 compare_coverage_counters(const void *a_, const void *b_)
87 const struct coverage_counter *const *ap = a_;
88 const struct coverage_counter *const *bp = b_;
89 const struct coverage_counter *a = *ap;
90 const struct coverage_counter *b = *bp;
91 if (a->total != b->total) {
92 return a->total < b->total ? 1 : -1;
94 return strcmp(a->name, b->name);
101 struct coverage_counter **c;
105 /* Sort coverage counters into groups with equal totals. */
106 c = xmalloc(n_coverage_counters * sizeof *c);
107 ovs_mutex_lock(&coverage_mutex);
108 for (i = 0; i < n_coverage_counters; i++) {
109 c[i] = coverage_counters[i];
111 ovs_mutex_unlock(&coverage_mutex);
112 qsort(c, n_coverage_counters, sizeof *c, compare_coverage_counters);
114 /* Hash the names in each group along with the rank. */
116 for (i = 0; i < n_coverage_counters; ) {
123 hash = hash_int(i, hash);
124 for (j = i; j < n_coverage_counters; j++) {
125 if (c[j]->total != c[i]->total) {
128 hash = hash_string(c[j]->name, hash);
135 return hash_int(n_groups, hash);
139 coverage_hit(uint32_t hash)
141 enum { HIT_BITS = 1024, BITS_PER_WORD = 32 };
142 static uint32_t hit[HIT_BITS / BITS_PER_WORD];
143 BUILD_ASSERT_DECL(IS_POW2(HIT_BITS));
145 static long long int next_clear = LLONG_MIN;
147 unsigned int bit_index = hash & (HIT_BITS - 1);
148 unsigned int word_index = bit_index / BITS_PER_WORD;
149 unsigned int word_mask = 1u << (bit_index % BITS_PER_WORD);
151 /* Expire coverage hash suppression once a day. */
152 if (time_msec() >= next_clear) {
153 memset(hit, 0, sizeof hit);
154 next_clear = time_msec() + 60 * 60 * 24 * 1000LL;
157 if (hit[word_index] & word_mask) {
160 hit[word_index] |= word_mask;
165 /* Logs the coverage counters, unless a similar set of events has already been
168 * This function logs at log level VLL_INFO. Use care before adjusting this
169 * level, because depending on its configuration, syslogd can write changes
170 * synchronously, which can cause the coverage messages to take several seconds
175 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 3);
177 if (!VLOG_DROP_INFO(&rl)) {
178 uint32_t hash = coverage_hash();
179 if (coverage_hit(hash)) {
180 VLOG_INFO("Skipping details of duplicate event coverage for "
181 "hash=%08"PRIx32, hash);
188 coverage_read(&lines);
189 SVEC_FOR_EACH (i, line, &lines) {
190 VLOG_INFO("%s", line);
192 svec_destroy(&lines);
197 /* Adds coverage counter information to 'lines'. */
199 coverage_read(struct svec *lines)
201 struct coverage_counter **c = coverage_counters;
202 unsigned long long int *totals;
207 hash = coverage_hash();
210 svec_add_nocopy(lines,
211 xasprintf("Event coverage, avg rate over last: %d "
212 "seconds, last minute, last hour, "
214 COVERAGE_RUN_INTERVAL/1000, hash));
216 totals = xmalloc(n_coverage_counters * sizeof *totals);
217 ovs_mutex_lock(&coverage_mutex);
218 for (i = 0; i < n_coverage_counters; i++) {
219 totals[i] = c[i]->total;
221 ovs_mutex_unlock(&coverage_mutex);
223 for (i = 0; i < n_coverage_counters; i++) {
225 /* Shows the averaged per-second rates for the last
226 * COVERAGE_RUN_INTERVAL interval, the last minute and
228 svec_add_nocopy(lines,
229 xasprintf("%-24s %5.1f/sec %9.3f/sec "
230 "%13.4f/sec total: %llu",
232 (c[i]->min[(idx_count - 1) % MIN_AVG_LEN]
233 * 1000.0 / COVERAGE_RUN_INTERVAL),
234 coverage_array_sum(c[i]->min, MIN_AVG_LEN) / 60.0,
235 coverage_array_sum(c[i]->hr, HR_AVG_LEN) / 3600.0,
242 svec_add_nocopy(lines, xasprintf("%"PRIuSIZE" events never hit", n_never_hit));
246 /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
247 * synchronize per-thread counters with global counters. Every thread maintains
248 * a separate timer to ensure all counters are periodically aggregated. */
252 long long int now, *thread_time;
255 thread_time = coverage_clear_time_get();
257 /* Initialize the coverage_clear_time. */
258 if (*thread_time == LLONG_MIN) {
259 *thread_time = now + COVERAGE_CLEAR_INTERVAL;
262 if (now >= *thread_time) {
265 ovs_mutex_lock(&coverage_mutex);
266 for (i = 0; i < n_coverage_counters; i++) {
267 struct coverage_counter *c = coverage_counters[i];
268 c->total += c->count();
270 ovs_mutex_unlock(&coverage_mutex);
271 *thread_time = now + COVERAGE_CLEAR_INTERVAL;
275 /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
276 * coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
277 * per second counts into per minute count. 'hr' array is for cumulating per
278 * minute counts into per hour count. Every thread may call this function. */
282 /* Defines the moving average array index variables. */
283 static unsigned int min_idx, hr_idx;
284 struct coverage_counter **c = coverage_counters;
287 ovs_mutex_lock(&coverage_mutex);
289 /* Initialize the coverage_run_time. */
290 if (coverage_run_time == LLONG_MIN) {
291 coverage_run_time = now + COVERAGE_RUN_INTERVAL;
294 if (now >= coverage_run_time) {
296 /* Computes the number of COVERAGE_RUN_INTERVAL slots, since
297 * it is possible that the actual run interval is multiple of
298 * COVERAGE_RUN_INTERVAL. */
299 int slots = (now - coverage_run_time) / COVERAGE_RUN_INTERVAL + 1;
301 for (i = 0; i < n_coverage_counters; i++) {
302 unsigned int count, portion;
303 unsigned int m_idx = min_idx;
304 unsigned int h_idx = hr_idx;
305 unsigned int idx = idx_count;
307 /* Computes the differences between the current total and the one
308 * recorded in last invocation of coverage_run(). */
309 count = c[i]->total - c[i]->last_total;
310 c[i]->last_total = c[i]->total;
311 /* The count over the time interval is evenly distributed
312 * among slots by calculating the portion. */
313 portion = count / slots;
315 for (j = 0; j < slots; j++) {
316 /* Updates the index variables. */
317 /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every
318 * time the m_idx finishes a cycle (a cycle is one minute),
319 * the h_idx is incremented by 1. */
320 m_idx = idx % MIN_AVG_LEN;
321 h_idx = idx / MIN_AVG_LEN;
323 c[i]->min[m_idx] = portion + (j == (slots - 1)
324 ? count % slots : 0);
325 c[i]->hr[h_idx] = m_idx == 0
327 : (c[i]->hr[h_idx] + c[i]->min[m_idx]);
328 /* This is to guarantee that h_idx ranges from 0 to 59. */
329 idx = (idx + 1) % (MIN_AVG_LEN * HR_AVG_LEN);
333 /* Updates the global index variables. */
334 idx_count = (idx_count + slots) % (MIN_AVG_LEN * HR_AVG_LEN);
335 min_idx = idx_count % MIN_AVG_LEN;
336 hr_idx = idx_count / MIN_AVG_LEN;
337 /* Updates the run time. */
338 coverage_run_time = now + COVERAGE_RUN_INTERVAL;
340 ovs_mutex_unlock(&coverage_mutex);
344 coverage_array_sum(const unsigned int *arr, const unsigned int len)
346 unsigned int sum = 0;
349 ovs_mutex_lock(&coverage_mutex);
350 for (i = 0; i < len; i++) {
353 ovs_mutex_unlock(&coverage_mutex);