timeval: On Linux x86-64 systems refresh time whenever it is requested.
[sliver-openvswitch.git] / lib / timeval.c
index 3f12b50..3d339e4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -40,7 +40,7 @@ VLOG_DEFINE_THIS_MODULE(timeval);
  * to CLOCK_REALTIME. */
 static clockid_t monotonic_clock;
 
-/* Has a timer tick occurred?
+/* Has a timer tick occurred? Only relevant if CACHE_TIME is 1.
  *
  * We initialize these to true to force time_init() to get called on the first
  * call to time_msec() or another function that queries the current time. */
@@ -94,8 +94,11 @@ time_init(void)
         VLOG_DBG("monotonic timer not available");
     }
 
-    set_up_signal(SA_RESTART);
-    set_up_timer();
+    if (CACHE_TIME) {
+        set_up_signal(SA_RESTART);
+        set_up_timer();
+    }
+
     boot_time = time_msec();
 }
 
@@ -168,7 +171,16 @@ void
 time_postfork(void)
 {
     time_init();
-    set_up_timer();
+
+    if (CACHE_TIME) {
+        set_up_timer();
+    } else {
+        /* If we are not caching  kernel time, the only reason the timer should
+         * exist is if time_alarm() was called and deadline is set */
+        if (deadline != TIME_MIN) {
+            set_up_timer();
+        }
+    }
 }
 
 static void
@@ -199,7 +211,9 @@ refresh_monotonic(void)
 
 /* Forces a refresh of the current time from the kernel.  It is not usually
  * necessary to call this function, since the time will be refreshed
- * automatically at least every TIME_UPDATE_INTERVAL milliseconds. */
+ * automatically at least every TIME_UPDATE_INTERVAL milliseconds.  If
+ * CACHE_TIME is 0, we will always refresh the current time so this
+ * function has no effect. */
 void
 time_refresh(void)
 {
@@ -275,9 +289,17 @@ time_alarm(unsigned int secs)
     sigset_t oldsigs;
 
     time_init();
+
     block_sigalrm(&oldsigs);
     deadline = secs ? time_add(time_now(), secs) : TIME_MIN;
     unblock_sigalrm(&oldsigs);
+
+    if (!CACHE_TIME) {
+        /* If we aren't timing the gaps between kernel time refreshes we need to
+         * to start the timer up now */
+        set_up_signal(SA_RESTART);
+        set_up_timer();
+    }
 }
 
 /* Like poll(), except:
@@ -366,7 +388,7 @@ sigalrm_handler(int sig_nr)
 static void
 refresh_wall_if_ticked(void)
 {
-    if (wall_tick) {
+    if (!CACHE_TIME || wall_tick) {
         refresh_wall();
     }
 }
@@ -374,7 +396,7 @@ refresh_wall_if_ticked(void)
 static void
 refresh_monotonic_if_ticked(void)
 {
-    if (monotonic_tick) {
+    if (!CACHE_TIME || monotonic_tick) {
         refresh_monotonic();
     }
 }
@@ -463,42 +485,43 @@ log_poll_interval(long long int last_wakeup)
     /* Warn if we took too much time between polls: at least 50 ms and at least
      * 8X the mean interval. */
     if (n_samples > 10 && interval > mean_interval * 8 && interval > 50 * 16) {
-        const struct rusage *last_rusage = get_recent_rusage();
-        struct rusage rusage;
-
-        getrusage(RUSAGE_SELF, &rusage);
-        VLOG_WARN("%lld ms poll interval (%lld ms user, %lld ms system) "
-                  "is over %u times the weighted mean interval %u ms "
-                  "(%u samples)",
-                  now - last_wakeup,
-                  timeval_diff_msec(&rusage.ru_utime, &last_rusage->ru_utime),
-                  timeval_diff_msec(&rusage.ru_stime, &last_rusage->ru_stime),
-                  interval / mean_interval,
-                  (mean_interval + 8) / 16, n_samples);
-        if (rusage.ru_minflt > last_rusage->ru_minflt
-            || rusage.ru_majflt > last_rusage->ru_majflt) {
-            VLOG_WARN("faults: %ld minor, %ld major",
-                      rusage.ru_minflt - last_rusage->ru_minflt,
-                      rusage.ru_majflt - last_rusage->ru_majflt);
+        static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 3);
+
+        if (!VLOG_DROP_WARN(&rl)) {
+            const struct rusage *last_rusage = get_recent_rusage();
+            struct rusage rusage;
+
+            getrusage(RUSAGE_SELF, &rusage);
+            VLOG_WARN("%lld ms poll interval (%lld ms user, %lld ms system) "
+                      "is over %u times the weighted mean interval %u ms "
+                      "(%u samples)",
+                      now - last_wakeup,
+                      timeval_diff_msec(&rusage.ru_utime,
+                                        &last_rusage->ru_utime),
+                      timeval_diff_msec(&rusage.ru_stime,
+                                        &last_rusage->ru_stime),
+                      interval / mean_interval,
+                      (mean_interval + 8) / 16, n_samples);
+            if (rusage.ru_minflt > last_rusage->ru_minflt
+                || rusage.ru_majflt > last_rusage->ru_majflt) {
+                VLOG_WARN("faults: %ld minor, %ld major",
+                          rusage.ru_minflt - last_rusage->ru_minflt,
+                          rusage.ru_majflt - last_rusage->ru_majflt);
+            }
+            if (rusage.ru_inblock > last_rusage->ru_inblock
+                || rusage.ru_oublock > last_rusage->ru_oublock) {
+                VLOG_WARN("disk: %ld reads, %ld writes",
+                          rusage.ru_inblock - last_rusage->ru_inblock,
+                          rusage.ru_oublock - last_rusage->ru_oublock);
+            }
+            if (rusage.ru_nvcsw > last_rusage->ru_nvcsw
+                || rusage.ru_nivcsw > last_rusage->ru_nivcsw) {
+                VLOG_WARN("context switches: %ld voluntary, %ld involuntary",
+                          rusage.ru_nvcsw - last_rusage->ru_nvcsw,
+                          rusage.ru_nivcsw - last_rusage->ru_nivcsw);
+            }
         }
-        if (rusage.ru_inblock > last_rusage->ru_inblock
-            || rusage.ru_oublock > last_rusage->ru_oublock) {
-            VLOG_WARN("disk: %ld reads, %ld writes",
-                      rusage.ru_inblock - last_rusage->ru_inblock,
-                      rusage.ru_oublock - last_rusage->ru_oublock);
-        }
-        if (rusage.ru_nvcsw > last_rusage->ru_nvcsw
-            || rusage.ru_nivcsw > last_rusage->ru_nivcsw) {
-            VLOG_WARN("context switches: %ld voluntary, %ld involuntary",
-                      rusage.ru_nvcsw - last_rusage->ru_nvcsw,
-                      rusage.ru_nivcsw - last_rusage->ru_nivcsw);
-        }
-
-        /* Care should be taken in the value chosen for logging.  Depending
-         * on the configuration, syslog can write changes synchronously,
-         * which can cause the coverage messages to take longer to log
-         * than the processing delay that triggered it. */
-        coverage_log(VLL_INFO, true);
+        coverage_log();
     }
 
     /* Update exponentially weighted moving average.  With these parameters, a