80316ef78a6cafa775d648ec7f37cfb74be687f8
[sliver-openvswitch.git] / lib / coverage.c
1 /*
2  * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <config.h>
18 #include "coverage.h"
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include "dynamic-string.h"
22 #include "hash.h"
23 #include "svec.h"
24 #include "timeval.h"
25 #include "unixctl.h"
26 #include "util.h"
27 #include "vlog.h"
28
29 VLOG_DEFINE_THIS_MODULE(coverage);
30
31 /* The coverage counters. */
32 #if USE_LINKER_SECTIONS
33 extern struct coverage_counter *__start_coverage[];
34 extern struct coverage_counter *__stop_coverage[];
35 #define coverage_counters __start_coverage
36 #define n_coverage_counters  (__stop_coverage - __start_coverage)
37 #else  /* !USE_LINKER_SECTIONS */
38 #define COVERAGE_COUNTER(COUNTER)                                       \
39         DECLARE_EXTERN_PER_THREAD_DATA(unsigned int,                    \
40                                        counter_##COUNTER);              \
41         DEFINE_EXTERN_PER_THREAD_DATA(counter_##COUNTER, 0);            \
42         static unsigned int COUNTER##_count(void)                       \
43         {                                                               \
44             unsigned int *countp = counter_##COUNTER##_get();           \
45             unsigned int count = *countp;                               \
46             *countp = 0;                                                \
47             return count;                                               \
48         }                                                               \
49         extern struct coverage_counter counter_##COUNTER;               \
50         struct coverage_counter counter_##COUNTER                       \
51             = { #COUNTER, COUNTER##_count, 0 };
52 #include "coverage.def"
53 #undef COVERAGE_COUNTER
54
55 extern struct coverage_counter *coverage_counters[];
56 struct coverage_counter *coverage_counters[] = {
57 #define COVERAGE_COUNTER(NAME) &counter_##NAME,
58 #include "coverage.def"
59 #undef COVERAGE_COUNTER
60 };
61 #define n_coverage_counters ARRAY_SIZE(coverage_counters)
62 #endif  /* !USE_LINKER_SECTIONS */
63
64 static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER;
65
66 DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN);
67 static long long int coverage_run_time = LLONG_MIN;
68
69 /* Index counter used to compute the moving average array's index. */
70 static unsigned int idx_count = 0;
71
72 static void coverage_read(struct svec *);
73 static unsigned int coverage_array_sum(const unsigned int *arr,
74                                        const unsigned int len);
75
76 static void
77 coverage_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
78                      const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
79 {
80     struct svec lines;
81     char *reply;
82
83     svec_init(&lines);
84     coverage_read(&lines);
85     reply = svec_join(&lines, "\n", "\n");
86     unixctl_command_reply(conn, reply);
87     free(reply);
88     svec_destroy(&lines);
89 }
90
91 void
92 coverage_init(void)
93 {
94     unixctl_command_register("coverage/show", "", 0, 0,
95                              coverage_unixctl_show, NULL);
96 }
97
98 /* Sorts coverage counters in descending order by total, within equal
99  * totals alphabetically by name. */
100 static int
101 compare_coverage_counters(const void *a_, const void *b_)
102 {
103     const struct coverage_counter *const *ap = a_;
104     const struct coverage_counter *const *bp = b_;
105     const struct coverage_counter *a = *ap;
106     const struct coverage_counter *b = *bp;
107     if (a->total != b->total) {
108         return a->total < b->total ? 1 : -1;
109     } else {
110         return strcmp(a->name, b->name);
111     }
112 }
113
114 static uint32_t
115 coverage_hash(void)
116 {
117     struct coverage_counter **c;
118     uint32_t hash = 0;
119     int n_groups, i;
120
121     /* Sort coverage counters into groups with equal totals. */
122     c = xmalloc(n_coverage_counters * sizeof *c);
123     ovs_mutex_lock(&coverage_mutex);
124     for (i = 0; i < n_coverage_counters; i++) {
125         c[i] = coverage_counters[i];
126     }
127     ovs_mutex_unlock(&coverage_mutex);
128     qsort(c, n_coverage_counters, sizeof *c, compare_coverage_counters);
129
130     /* Hash the names in each group along with the rank. */
131     n_groups = 0;
132     for (i = 0; i < n_coverage_counters; ) {
133         int j;
134
135         if (!c[i]->total) {
136             break;
137         }
138         n_groups++;
139         hash = hash_int(i, hash);
140         for (j = i; j < n_coverage_counters; j++) {
141             if (c[j]->total != c[i]->total) {
142                 break;
143             }
144             hash = hash_string(c[j]->name, hash);
145         }
146         i = j;
147     }
148
149     free(c);
150
151     return hash_int(n_groups, hash);
152 }
153
154 static bool
155 coverage_hit(uint32_t hash)
156 {
157     enum { HIT_BITS = 1024, BITS_PER_WORD = 32 };
158     static uint32_t hit[HIT_BITS / BITS_PER_WORD];
159     BUILD_ASSERT_DECL(IS_POW2(HIT_BITS));
160
161     static long long int next_clear = LLONG_MIN;
162
163     unsigned int bit_index = hash & (HIT_BITS - 1);
164     unsigned int word_index = bit_index / BITS_PER_WORD;
165     unsigned int word_mask = 1u << (bit_index % BITS_PER_WORD);
166
167     /* Expire coverage hash suppression once a day. */
168     if (time_msec() >= next_clear) {
169         memset(hit, 0, sizeof hit);
170         next_clear = time_msec() + 60 * 60 * 24 * 1000LL;
171     }
172
173     if (hit[word_index] & word_mask) {
174         return true;
175     } else {
176         hit[word_index] |= word_mask;
177         return false;
178     }
179 }
180
181 /* Logs the coverage counters, unless a similar set of events has already been
182  * logged.
183  *
184  * This function logs at log level VLL_INFO.  Use care before adjusting this
185  * level, because depending on its configuration, syslogd can write changes
186  * synchronously, which can cause the coverage messages to take several seconds
187  * to write. */
188 void
189 coverage_log(void)
190 {
191     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 3);
192
193     if (!VLOG_DROP_INFO(&rl)) {
194         uint32_t hash = coverage_hash();
195         if (coverage_hit(hash)) {
196             VLOG_INFO("Skipping details of duplicate event coverage for "
197                       "hash=%08"PRIx32, hash);
198         } else {
199             struct svec lines;
200             const char *line;
201             size_t i;
202
203             svec_init(&lines);
204             coverage_read(&lines);
205             SVEC_FOR_EACH (i, line, &lines) {
206                 VLOG_INFO("%s", line);
207             }
208             svec_destroy(&lines);
209         }
210     }
211 }
212
213 /* Adds coverage counter information to 'lines'. */
214 static void
215 coverage_read(struct svec *lines)
216 {
217     struct coverage_counter **c = coverage_counters;
218     unsigned long long int *totals;
219     size_t n_never_hit;
220     uint32_t hash;
221     size_t i;
222
223     hash = coverage_hash();
224
225     n_never_hit = 0;
226     svec_add_nocopy(lines,
227                     xasprintf("Event coverage, avg rate over last: %d "
228                               "seconds, last minute, last hour,  "
229                               "hash=%08"PRIx32":",
230                               COVERAGE_RUN_INTERVAL/1000, hash));
231
232     totals = xmalloc(n_coverage_counters * sizeof *totals);
233     ovs_mutex_lock(&coverage_mutex);
234     for (i = 0; i < n_coverage_counters; i++) {
235         totals[i] = c[i]->total;
236     }
237     ovs_mutex_unlock(&coverage_mutex);
238
239     for (i = 0; i < n_coverage_counters; i++) {
240         if (totals[i]) {
241             /* Shows the averaged per-second rates for the last
242              * COVERAGE_RUN_INTERVAL interval, the last minute and
243              * the last hour. */
244             svec_add_nocopy(lines,
245                 xasprintf("%-24s %5.1f/sec %9.3f/sec "
246                           "%13.4f/sec   total: %llu",
247                           c[i]->name,
248                           (c[i]->min[(idx_count - 1) % MIN_AVG_LEN]
249                            * 1000.0 / COVERAGE_RUN_INTERVAL),
250                           coverage_array_sum(c[i]->min, MIN_AVG_LEN) / 60.0,
251                           coverage_array_sum(c[i]->hr,  HR_AVG_LEN) / 3600.0,
252                           totals[i]));
253         } else {
254             n_never_hit++;
255         }
256     }
257
258     svec_add_nocopy(lines, xasprintf("%zu events never hit", n_never_hit));
259     free(totals);
260 }
261
262 /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
263  * synchronize per-thread counters with global counters. Every thread maintains
264  * a separate timer to ensure all counters are periodically aggregated. */
265 void
266 coverage_clear(void)
267 {
268     long long int now, *thread_time;
269
270     now = time_msec();
271     thread_time = coverage_clear_time_get();
272
273     /* Initialize the coverage_clear_time. */
274     if (*thread_time == LLONG_MIN) {
275         *thread_time = now + COVERAGE_CLEAR_INTERVAL;
276     }
277
278     if (now >= *thread_time) {
279         size_t i;
280
281         ovs_mutex_lock(&coverage_mutex);
282         for (i = 0; i < n_coverage_counters; i++) {
283             struct coverage_counter *c = coverage_counters[i];
284             c->total += c->count();
285         }
286         ovs_mutex_unlock(&coverage_mutex);
287         *thread_time = now + COVERAGE_CLEAR_INTERVAL;
288     }
289 }
290
291 /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
292  * coverage counters' 'min' and 'hr' array.  'min' array is for cumulating
293  * per second counts into per minute count.  'hr' array is for cumulating per
294  * minute counts into per hour count.  Every thread may call this function. */
295 void
296 coverage_run(void)
297 {
298     /* Defines the moving average array index variables. */
299     static unsigned int min_idx, hr_idx;
300     struct coverage_counter **c = coverage_counters;
301     long long int now;
302
303     ovs_mutex_lock(&coverage_mutex);
304     now = time_msec();
305     /* Initialize the coverage_run_time. */
306     if (coverage_run_time == LLONG_MIN) {
307         coverage_run_time = now + COVERAGE_RUN_INTERVAL;
308     }
309
310     if (now >= coverage_run_time) {
311         size_t i, j;
312         /* Computes the number of COVERAGE_RUN_INTERVAL slots, since
313          * it is possible that the actual run interval is multiple of
314          * COVERAGE_RUN_INTERVAL. */
315         int slots = (now - coverage_run_time) / COVERAGE_RUN_INTERVAL + 1;
316
317         for (i = 0; i < n_coverage_counters; i++) {
318             unsigned int count, portion;
319             unsigned int m_idx = min_idx;
320             unsigned int h_idx = hr_idx;
321             unsigned int idx = idx_count;
322
323             /* Computes the differences between the current total and the one
324              * recorded in last invocation of coverage_run(). */
325             count = c[i]->total - c[i]->last_total;
326             c[i]->last_total = c[i]->total;
327             /* The count over the time interval is evenly distributed
328              * among slots by calculating the portion. */
329             portion = count / slots;
330
331             for (j = 0; j < slots; j++) {
332                 /* Updates the index variables. */
333                 /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every
334                  * time the m_idx finishes a cycle (a cycle is one minute),
335                  * the h_idx is incremented by 1. */
336                 m_idx = idx % MIN_AVG_LEN;
337                 h_idx = idx / MIN_AVG_LEN;
338
339                 c[i]->min[m_idx] = portion + (j == (slots - 1)
340                                               ? count % slots : 0);
341                 c[i]->hr[h_idx] = m_idx == 0
342                                   ? c[i]->min[m_idx]
343                                   : (c[i]->hr[h_idx] + c[i]->min[m_idx]);
344                 /* This is to guarantee that h_idx ranges from 0 to 59. */
345                 idx = (idx + 1) % (MIN_AVG_LEN * HR_AVG_LEN);
346             }
347         }
348
349         /* Updates the global index variables. */
350         idx_count = (idx_count + slots) % (MIN_AVG_LEN * HR_AVG_LEN);
351         min_idx = idx_count % MIN_AVG_LEN;
352         hr_idx  = idx_count / MIN_AVG_LEN;
353         /* Updates the run time. */
354         coverage_run_time = now + COVERAGE_RUN_INTERVAL;
355     }
356     ovs_mutex_unlock(&coverage_mutex);
357 }
358
359 static unsigned int
360 coverage_array_sum(const unsigned int *arr, const unsigned int len)
361 {
362     unsigned int sum = 0;
363     size_t i;
364
365     ovs_mutex_lock(&coverage_mutex);
366     for (i = 0; i < len; i++) {
367         sum += arr[i];
368     }
369     ovs_mutex_unlock(&coverage_mutex);
370     return sum;
371 }