2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
7 #include <linux/clocksource.h>
8 #include <linux/workqueue.h>
9 #include <linux/cpufreq.h>
10 #include <linux/jiffies.h>
11 #include <linux/init.h>
12 #include <linux/dmi.h>
14 #include <asm/delay.h>
16 #include <asm/delay.h>
19 #include "mach_timer.h"
22 * On some systems the TSC frequency does not
23 * change with the cpu frequency. So we need
24 * an extra value to store the TSC freq
28 int tsc_disable __cpuinitdata = 0;
31 static int __init tsc_setup(char *str)
33 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
34 "cannot disable TSC.\n");
39 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
42 static int __init tsc_setup(char *str)
50 __setup("notsc", tsc_setup);
53 * code to mark and check if the TSC is unstable
54 * due to cpufreq or due to unsynced TSCs
56 static int tsc_unstable;
58 static inline int check_tsc_unstable(void)
63 void mark_tsc_unstable(void)
67 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
69 /* Accellerators for sched_clock()
70 * convert from cycles(64bits) => nanoseconds (64bits)
72 * ns = cycles / (freq / ns_per_sec)
73 * ns = cycles * (ns_per_sec / freq)
74 * ns = cycles * (10^9 / (cpu_khz * 10^3))
75 * ns = cycles * (10^6 / cpu_khz)
77 * Then we use scaling math (suggested by george@mvista.com) to get:
78 * ns = cycles * (10^6 * SC / cpu_khz) / SC
79 * ns = cycles * cyc2ns_scale / SC
81 * And since SC is a constant power of two, we can convert the div
84 * We can use khz divisor instead of mhz to keep a better percision, since
85 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
86 * (mathieu.desnoyers@polymtl.ca)
88 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
90 static unsigned long cyc2ns_scale __read_mostly;
92 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
94 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
96 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
99 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
101 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
106 * Scheduler clock - returns current time in nanosec units.
108 unsigned long long sched_clock(void)
110 unsigned long long this_offset;
113 * in the NUMA case we dont use the TSC as they are not
114 * synchronized across all CPUs.
117 if (!cpu_khz || check_tsc_unstable())
119 /* no locking but a rare wrong value is not a big deal */
120 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
122 /* read the Time Stamp Counter: */
123 rdtscll(this_offset);
125 /* return the value in ns */
126 return cycles_2_ns(this_offset);
130 static unsigned long calculate_cpu_khz(void)
132 unsigned long long start, end;
138 local_irq_save(flags);
140 /* run 3 times to ensure the cache is warm */
141 for (i = 0; i < 3; i++) {
142 mach_prepare_counter();
144 mach_countup(&count);
148 * Error: ECTCNEVERSET
149 * The CTC wasn't reliable: we got a hit on the very first read,
150 * or the CPU was so fast/slow that the quotient wouldn't fit in
156 delta64 = end - start;
158 /* cpu freq too fast: */
159 if (delta64 > (1ULL<<32))
162 /* cpu freq too slow: */
163 if (delta64 <= CALIBRATE_TIME_MSEC)
166 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
167 do_div(delta64,CALIBRATE_TIME_MSEC);
169 local_irq_restore(flags);
170 return (unsigned long)delta64;
172 local_irq_restore(flags);
176 int recalibrate_cpu_khz(void)
179 unsigned long cpu_khz_old = cpu_khz;
182 cpu_khz = calculate_cpu_khz();
184 cpu_data[0].loops_per_jiffy =
185 cpufreq_scale(cpu_data[0].loops_per_jiffy,
186 cpu_khz_old, cpu_khz);
195 EXPORT_SYMBOL(recalibrate_cpu_khz);
199 if (!cpu_has_tsc || tsc_disable)
202 cpu_khz = calculate_cpu_khz();
208 printk("Detected %lu.%03lu MHz processor.\n",
209 (unsigned long)cpu_khz / 1000,
210 (unsigned long)cpu_khz % 1000);
212 set_cyc2ns_scale(cpu_khz);
216 #ifdef CONFIG_CPU_FREQ
218 static unsigned int cpufreq_delayed_issched = 0;
219 static unsigned int cpufreq_init = 0;
220 static struct work_struct cpufreq_delayed_get_work;
222 static void handle_cpufreq_delayed_get(void *v)
226 for_each_online_cpu(cpu)
229 cpufreq_delayed_issched = 0;
233 * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
234 * to verify the CPU frequency the timing core thinks the CPU is running
235 * at is still correct.
237 static inline void cpufreq_delayed_get(void)
239 if (cpufreq_init && !cpufreq_delayed_issched) {
240 cpufreq_delayed_issched = 1;
241 printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
242 schedule_work(&cpufreq_delayed_get_work);
247 * if the CPU frequency is scaled, TSC-based delays will need a different
248 * loops_per_jiffy value to function properly.
250 static unsigned int ref_freq = 0;
251 static unsigned long loops_per_jiffy_ref = 0;
252 static unsigned long cpu_khz_ref = 0;
255 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
257 struct cpufreq_freqs *freq = data;
259 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
260 write_seqlock_irq(&xtime_lock);
264 ref_freq = freq->new;
267 ref_freq = freq->old;
268 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
269 cpu_khz_ref = cpu_khz;
272 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
273 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
274 (val == CPUFREQ_RESUMECHANGE)) {
275 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
276 cpu_data[freq->cpu].loops_per_jiffy =
277 cpufreq_scale(loops_per_jiffy_ref,
278 ref_freq, freq->new);
282 if (num_online_cpus() == 1)
283 cpu_khz = cpufreq_scale(cpu_khz_ref,
284 ref_freq, freq->new);
285 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
287 set_cyc2ns_scale(cpu_khz);
289 * TSC based sched_clock turns
297 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
298 write_sequnlock_irq(&xtime_lock);
303 static struct notifier_block time_cpufreq_notifier_block = {
304 .notifier_call = time_cpufreq_notifier
307 static int __init cpufreq_tsc(void)
311 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
312 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
313 CPUFREQ_TRANSITION_NOTIFIER);
320 core_initcall(cpufreq_tsc);
324 /* clock source code */
326 static unsigned long current_tsc_khz = 0;
327 static int tsc_update_callback(void);
329 static cycle_t read_tsc(void)
338 static struct clocksource clocksource_tsc = {
342 .mask = CLOCKSOURCE_MASK(64),
343 .mult = 0, /* to be set */
345 .update_callback = tsc_update_callback,
349 static int tsc_update_callback(void)
353 /* check to see if we should switch to the safe clocksource: */
354 if (clocksource_tsc.rating != 50 && check_tsc_unstable()) {
355 clocksource_tsc.rating = 50;
356 clocksource_reselect();
360 /* only update if tsc_khz has changed: */
361 if (current_tsc_khz != tsc_khz) {
362 current_tsc_khz = tsc_khz;
363 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
364 clocksource_tsc.shift);
371 static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
373 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
379 /* List of systems that have known TSC problems */
380 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
382 .callback = dmi_mark_tsc_unstable,
383 .ident = "IBM Thinkpad 380XD",
385 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
386 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
392 #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
393 static struct timer_list verify_tsc_freq_timer;
395 /* XXX - Probably should add locking */
396 static void verify_tsc_freq(unsigned long unused)
399 static unsigned long last_jiffies;
401 u64 now_tsc, interval_tsc;
402 unsigned long now_jiffies, interval_jiffies;
405 if (check_tsc_unstable())
409 now_jiffies = jiffies;
415 interval_jiffies = now_jiffies - last_jiffies;
416 interval_tsc = now_tsc - last_tsc;
418 do_div(interval_tsc, cpu_khz*1000);
420 if (interval_tsc < (interval_jiffies * 3 / 4)) {
421 printk("TSC appears to be running slowly. "
422 "Marking it as unstable\n");
429 last_jiffies = now_jiffies;
430 /* set us up to go off on the next interval: */
431 mod_timer(&verify_tsc_freq_timer,
432 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
436 * Make an educated guess if the TSC is trustworthy and synchronized
439 static __init int unsynchronized_tsc(void)
442 * Intel systems are normally all synchronized.
443 * Exceptions must mark TSC as unstable:
445 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
448 /* assume multi socket systems are not synchronized: */
449 return num_possible_cpus() > 1;
452 static int __init init_tsc_clocksource(void)
455 if (cpu_has_tsc && tsc_khz && !tsc_disable) {
456 /* check blacklist */
457 dmi_check_system(bad_tsc_dmi_table);
459 if (unsynchronized_tsc()) /* mark unstable if unsynced */
461 current_tsc_khz = tsc_khz;
462 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
463 clocksource_tsc.shift);
464 /* lower the rating if we already know its unstable: */
465 if (check_tsc_unstable())
466 clocksource_tsc.rating = 50;
468 init_timer(&verify_tsc_freq_timer);
469 verify_tsc_freq_timer.function = verify_tsc_freq;
470 verify_tsc_freq_timer.expires =
471 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
472 add_timer(&verify_tsc_freq_timer);
474 return clocksource_register(&clocksource_tsc);
480 module_init(init_tsc_clocksource);