2 * This code largely moved from arch/i386/kernel/timer/timer_tsc.c
3 * which was originally moved from arch/i386/kernel/time.c.
4 * See comments there for proper credits.
7 #include <linux/clocksource.h>
8 #include <linux/workqueue.h>
9 #include <linux/cpufreq.h>
10 #include <linux/jiffies.h>
11 #include <linux/init.h>
12 #include <linux/dmi.h>
14 #include <asm/delay.h>
18 #include "mach_timer.h"
21 * On some systems the TSC frequency does not
22 * change with the cpu frequency. So we need
23 * an extra value to store the TSC freq
30 static int __init tsc_setup(char *str)
32 printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
33 "cannot disable TSC.\n");
38 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
41 static int __init tsc_setup(char *str)
49 __setup("notsc", tsc_setup);
52 * code to mark and check if the TSC is unstable
53 * due to cpufreq or due to unsynced TSCs
55 static int tsc_unstable;
57 static inline int check_tsc_unstable(void)
62 void mark_tsc_unstable(void)
66 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
68 /* Accellerators for sched_clock()
69 * convert from cycles(64bits) => nanoseconds (64bits)
71 * ns = cycles / (freq / ns_per_sec)
72 * ns = cycles * (ns_per_sec / freq)
73 * ns = cycles * (10^9 / (cpu_khz * 10^3))
74 * ns = cycles * (10^6 / cpu_khz)
76 * Then we use scaling math (suggested by george@mvista.com) to get:
77 * ns = cycles * (10^6 * SC / cpu_khz) / SC
78 * ns = cycles * cyc2ns_scale / SC
80 * And since SC is a constant power of two, we can convert the div
83 * We can use khz divisor instead of mhz to keep a better percision, since
84 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
85 * (mathieu.desnoyers@polymtl.ca)
87 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
89 static unsigned long cyc2ns_scale __read_mostly;
91 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
93 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
95 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
98 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
100 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
105 * Scheduler clock - returns current time in nanosec units.
107 unsigned long long sched_clock(void)
109 unsigned long long this_offset;
112 * in the NUMA case we dont use the TSC as they are not
113 * synchronized across all CPUs.
116 if (!cpu_khz || check_tsc_unstable())
118 /* no locking but a rare wrong value is not a big deal */
119 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
121 /* read the Time Stamp Counter: */
122 rdtscll(this_offset);
124 /* return the value in ns */
125 return cycles_2_ns(this_offset);
129 static unsigned long calculate_cpu_khz(void)
131 unsigned long long start, end;
137 local_irq_save(flags);
139 /* run 3 times to ensure the cache is warm */
140 for (i = 0; i < 3; i++) {
141 mach_prepare_counter();
143 mach_countup(&count);
147 * Error: ECTCNEVERSET
148 * The CTC wasn't reliable: we got a hit on the very first read,
149 * or the CPU was so fast/slow that the quotient wouldn't fit in
155 delta64 = end - start;
157 /* cpu freq too fast: */
158 if (delta64 > (1ULL<<32))
161 /* cpu freq too slow: */
162 if (delta64 <= CALIBRATE_TIME_MSEC)
165 delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
166 do_div(delta64,CALIBRATE_TIME_MSEC);
168 local_irq_restore(flags);
169 return (unsigned long)delta64;
171 local_irq_restore(flags);
175 int recalibrate_cpu_khz(void)
178 unsigned long cpu_khz_old = cpu_khz;
181 cpu_khz = calculate_cpu_khz();
183 cpu_data[0].loops_per_jiffy =
184 cpufreq_scale(cpu_data[0].loops_per_jiffy,
185 cpu_khz_old, cpu_khz);
194 EXPORT_SYMBOL(recalibrate_cpu_khz);
196 void __init tsc_init(void)
198 if (!cpu_has_tsc || tsc_disable)
201 cpu_khz = calculate_cpu_khz();
207 printk("Detected %lu.%03lu MHz processor.\n",
208 (unsigned long)cpu_khz / 1000,
209 (unsigned long)cpu_khz % 1000);
211 set_cyc2ns_scale(cpu_khz);
215 #ifdef CONFIG_CPU_FREQ
217 static unsigned int cpufreq_delayed_issched = 0;
218 static unsigned int cpufreq_init = 0;
219 static struct work_struct cpufreq_delayed_get_work;
221 static void handle_cpufreq_delayed_get(struct work_struct *work)
225 for_each_online_cpu(cpu)
228 cpufreq_delayed_issched = 0;
232 * if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
233 * to verify the CPU frequency the timing core thinks the CPU is running
234 * at is still correct.
236 static inline void cpufreq_delayed_get(void)
238 if (cpufreq_init && !cpufreq_delayed_issched) {
239 cpufreq_delayed_issched = 1;
240 printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
241 schedule_work(&cpufreq_delayed_get_work);
246 * if the CPU frequency is scaled, TSC-based delays will need a different
247 * loops_per_jiffy value to function properly.
249 static unsigned int ref_freq = 0;
250 static unsigned long loops_per_jiffy_ref = 0;
251 static unsigned long cpu_khz_ref = 0;
254 time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
256 struct cpufreq_freqs *freq = data;
258 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
259 write_seqlock_irq(&xtime_lock);
263 ref_freq = freq->new;
266 ref_freq = freq->old;
267 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
268 cpu_khz_ref = cpu_khz;
271 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
272 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
273 (val == CPUFREQ_RESUMECHANGE)) {
274 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
275 cpu_data[freq->cpu].loops_per_jiffy =
276 cpufreq_scale(loops_per_jiffy_ref,
277 ref_freq, freq->new);
281 if (num_online_cpus() == 1)
282 cpu_khz = cpufreq_scale(cpu_khz_ref,
283 ref_freq, freq->new);
284 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
286 set_cyc2ns_scale(cpu_khz);
288 * TSC based sched_clock turns
296 if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
297 write_sequnlock_irq(&xtime_lock);
302 static struct notifier_block time_cpufreq_notifier_block = {
303 .notifier_call = time_cpufreq_notifier
306 static int __init cpufreq_tsc(void)
310 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
311 ret = cpufreq_register_notifier(&time_cpufreq_notifier_block,
312 CPUFREQ_TRANSITION_NOTIFIER);
319 core_initcall(cpufreq_tsc);
323 /* clock source code */
325 static unsigned long current_tsc_khz = 0;
326 static int tsc_update_callback(void);
328 static cycle_t read_tsc(void)
337 static struct clocksource clocksource_tsc = {
341 .mask = CLOCKSOURCE_MASK(64),
342 .mult = 0, /* to be set */
344 .update_callback = tsc_update_callback,
348 static int tsc_update_callback(void)
352 /* check to see if we should switch to the safe clocksource: */
353 if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
354 clocksource_tsc.rating = 0;
355 clocksource_reselect();
359 /* only update if tsc_khz has changed: */
360 if (current_tsc_khz != tsc_khz) {
361 current_tsc_khz = tsc_khz;
362 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
363 clocksource_tsc.shift);
370 static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
372 printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
378 /* List of systems that have known TSC problems */
379 static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
381 .callback = dmi_mark_tsc_unstable,
382 .ident = "IBM Thinkpad 380XD",
384 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
385 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
391 #define TSC_FREQ_CHECK_INTERVAL (10*MSEC_PER_SEC) /* 10sec in MS */
392 static struct timer_list verify_tsc_freq_timer;
394 /* XXX - Probably should add locking */
395 static void verify_tsc_freq(unsigned long unused)
398 static unsigned long last_jiffies;
400 u64 now_tsc, interval_tsc;
401 unsigned long now_jiffies, interval_jiffies;
404 if (check_tsc_unstable())
408 now_jiffies = jiffies;
414 interval_jiffies = now_jiffies - last_jiffies;
415 interval_tsc = now_tsc - last_tsc;
417 do_div(interval_tsc, cpu_khz*1000);
419 if (interval_tsc < (interval_jiffies * 3 / 4)) {
420 printk("TSC appears to be running slowly. "
421 "Marking it as unstable\n");
428 last_jiffies = now_jiffies;
429 /* set us up to go off on the next interval: */
430 mod_timer(&verify_tsc_freq_timer,
431 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL));
435 * Make an educated guess if the TSC is trustworthy and synchronized
438 static __init int unsynchronized_tsc(void)
441 * Intel systems are normally all synchronized.
442 * Exceptions must mark TSC as unstable:
444 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
447 /* assume multi socket systems are not synchronized: */
448 return num_possible_cpus() > 1;
451 static int __init init_tsc_clocksource(void)
454 if (cpu_has_tsc && tsc_khz && !tsc_disable) {
455 /* check blacklist */
456 dmi_check_system(bad_tsc_dmi_table);
458 if (unsynchronized_tsc()) /* mark unstable if unsynced */
460 current_tsc_khz = tsc_khz;
461 clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
462 clocksource_tsc.shift);
463 /* lower the rating if we already know its unstable: */
464 if (check_tsc_unstable())
465 clocksource_tsc.rating = 0;
467 init_timer(&verify_tsc_freq_timer);
468 verify_tsc_freq_timer.function = verify_tsc_freq;
469 verify_tsc_freq_timer.expires =
470 jiffies + msecs_to_jiffies(TSC_FREQ_CHECK_INTERVAL);
471 add_timer(&verify_tsc_freq_timer);
473 return clocksource_register(&clocksource_tsc);
479 module_init(init_tsc_clocksource);