+/*
+ * Support function to read the TSC (or equivalent). We use this
+ * high resolution timer to adapt the amount of work done for
+ * expiring the clock.
+ * Supports Linux and FreeBSD both i386 and amd64 platform
+ * Supports OpenWRT mips architecture
+ *
+ * SMP no special works is needed in
+ * - In linux 2.6 timers will always run in the same cpu that have added it.See
+ * (http://book.opensourceproject.org.cn/kernel/kernel3rd/opensource/0596005652/understandlk-chp-6-sect-5.html)
+ * - FreeBSD8 has a new callout_reset_on() with specify the cpu on which
+ * the timer must be run
+ * - Windows runs dummynet_task() on cpu0.
+ *
+ * - Linux 2.4 doesn't assure to run a timer in the same cpu every time.
+ */
+#ifdef HAVE_TSC
+uint64_t
+readTSC (void)
+{
+ uint64_t a=0;
+
+#ifdef __linux__
+ /* Linux and openwrt have a macro to read the tsc for i386 and
+ * amd64.
+ * Openwrt have patched the kernel and allow use of tsc with mips
+ * and other platforms
+ * rdtscll() is a macro defined in include/asm-xxx/msr.h,
+ * where xxx is the architecture (x86, mips).
+ */
+ rdtscll(a);
+#elif defined(_WIN32)
+ /* Microsoft recommends the use of KeQueryPerformanceCounter()
+ * insteead of rdtsc().
+ */
+ KeQueryPerformanceCounter((PLARGE_INTEGER)&a); //XXX not tested!
+#elif defined(__FreeBSD__)
+ /* FreeBSD (i386/amd64) has macro rdtsc() defined in machine/cpufunc.h.
+ * We could use the macro instead of explicity assembly XXX
+ */
+ return rdtsc();
+#endif
+ return a;
+}
+#endif /* HAVE_TSC */
+
+/*
+ * compute avg task period.
+ * We could do something more complex, possibly.
+ */
+static void
+do_update_cycle(void)
+{
+#ifdef HAVE_TSC
+ uint64_t tmp = readTSC();
+#if defined (LINUX_24) && defined(CONFIG_SMP)
+ /* on LINUX24 and SMP, we have no guarantees on which cpu runs
+ * the timer callbacks. If the difference between new and
+ * old value is negative, we assume that the values come from
+ * different cpus so we adjust 'new' accordingly.
+ */
+ if (tmp <= dn_cfg.cycle_task_new)
+ dn_cfg.cycle_task_new = tmp - dn_cfg.cycle_task;
+#endif /* !(linux24 && SMP) */
+ dn_cfg.cycle_task_old = dn_cfg.cycle_task_new;
+ dn_cfg.cycle_task_new = tmp;
+ dn_cfg.cycle_task = dn_cfg.cycle_task_new - dn_cfg.cycle_task_old;
+
+ /* Update the average
+ * avg = (2^N * avg + new - avg ) / 2^N * avg
+ * N==4 seems to be a good compromise between clock clock change
+ * and 'spurious' cycle_task value
+ */
+#define DN_N 4
+ dn_cfg.cycle_task_avg = (dn_cfg.cycle_task_avg << DN_N) +
+ dn_cfg.cycle_task - dn_cfg.cycle_task_avg;
+ dn_cfg.cycle_task_avg = dn_cfg.cycle_task_avg >> DN_N;
+#undef DN_N
+
+#endif /* HAVE_TSC */
+}
+
+static void
+do_drain(void)
+{
+#ifdef HAVE_TSC
+ uint64_t dt_max;
+#endif
+ if (!dn_cfg.expire || ++dn_cfg.expire_cycle < dn_cfg.expire)
+ return;
+ /* It's time to check if drain routines should be called */
+ dn_cfg.expire_cycle = 0;
+
+ dn_cfg.idle_queue_wait = 0;
+ dn_cfg.idle_si_wait = 0;
+ /* Do a drain cycle even if there isn't time to do it */
+#ifdef HAVE_TSC
+ dt_max = dn_cfg.cycle_task_avg * dn_cfg.drain_ratio;
+#endif
+ for (;;) {
+ int done = 0;
+
+ if (dn_cfg.idle_queue > dn_cfg.expire_object &&
+ dn_cfg.idle_queue_wait < dn_cfg.idle_queue) {
+ dn_drain_queue();
+ done = 1;
+ }
+ if (dn_cfg.idle_si > dn_cfg.expire_object &&
+ dn_cfg.idle_si_wait < dn_cfg.idle_si) {
+ dn_drain_scheduler();
+ done = 1;
+ }
+ /* time to end ? */
+#ifndef HAVE_TSC
+ /* If tsc does not exist, do only one drain cycle and exit */
+ break;
+#else
+ /* Exit when nothing was done or we have consumed all time */
+ if ( (done == 0) ||
+ ((readTSC() - dn_cfg.cycle_task_new) * 100 > dt_max) )
+ break;
+#endif /* HAVE_TSC */
+ }
+}
+