* Copyright (c) 1995 Markus Kuhn
* Copyright (c) 1996 Ingo Molnar
* Copyright (c) 1998 Andrea Arcangeli
- * Copyright (c) 2002,2006 Vojtech Pavlik
+ * Copyright (c) 2002 Vojtech Pavlik
* Copyright (c) 2003 Andi Kleen
* RTC support code taken from arch/i386/kernel/timers/time_hpet.c
*/
#include <linux/acpi.h>
#ifdef CONFIG_ACPI
#include <acpi/achware.h> /* for PM timer frequency */
-#include <acpi/acpi_bus.h>
#endif
#include <asm/8253pit.h>
#include <asm/pgtable.h>
static char *time_init_gtod(void);
DEFINE_SPINLOCK(rtc_lock);
-EXPORT_SYMBOL(rtc_lock);
DEFINE_SPINLOCK(i8253_lock);
int nohpet __initdata = 0;
static int notsc __initdata = 0;
-#define USEC_PER_TICK (USEC_PER_SEC / HZ)
-#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
-#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
-
-#define NS_SCALE 10 /* 2^10, carefully chosen */
-#define US_SCALE 32 /* 2^32, arbitralrily chosen */
+#undef HPET_HACK_ENABLE_DANGEROUS
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
-EXPORT_SYMBOL(cpu_khz);
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
t = get_cycles_sync();
if (t < vxtime.last_tsc)
t = vxtime.last_tsc; /* hack */
- x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
+ x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
return x;
}
{
/* cap counter read to one tick to avoid inconsistencies */
unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
- return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
+ return (min(counter,hpet_tick) * vxtime.quot) >> 32;
}
unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
seq = read_seqbegin(&xtime_lock);
sec = xtime.tv_sec;
- usec = xtime.tv_nsec / NSEC_PER_USEC;
+ usec = xtime.tv_nsec / 1000;
/* i386 does some correction here to keep the clock
monotonous even when ntpd is fixing drift.
in arch/x86_64/kernel/vsyscall.c and export all needed
variables in vmlinux.lds. -AK */
- t = (jiffies - wall_jiffies) * USEC_PER_TICK +
+ t = (jiffies - wall_jiffies) * (1000000L / HZ) +
do_gettimeoffset();
usec += t;
} while (read_seqretry(&xtime_lock, seq));
- tv->tv_sec = sec + usec / USEC_PER_SEC;
- tv->tv_usec = usec % USEC_PER_SEC;
+ tv->tv_sec = sec + usec / 1000000;
+ tv->tv_usec = usec % 1000000;
}
EXPORT_SYMBOL(do_gettimeofday);
write_seqlock_irq(&xtime_lock);
- nsec -= do_gettimeoffset() * NSEC_PER_USEC +
- (jiffies - wall_jiffies) * NSEC_PER_TICK;
+ nsec -= do_gettimeoffset() * 1000 +
+ (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
is just accounted to the spinlock function.
Better would be to write these functions in assembler again
and check exactly. */
- if (!user_mode(regs) && in_lock_functions(pc)) {
+ if (in_lock_functions(pc)) {
char *v = *(char **)regs->rsp;
if ((v >= _stext && v <= _etext) ||
(v >= _sinittext && v <= _einittext) ||
this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
- offset *= NSEC_PER_TICK / hpet_tick;
+ offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
} else {
do {
seq = read_seqbegin(&xtime_lock);
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
this_offset = get_cycles_sync();
- /* FIXME: 1000 or 1000000? */
- offset = (this_offset - last_offset)*1000 / cpu_khz;
+ offset = (this_offset - last_offset)*1000 / cpu_khz;
}
return base + offset;
}
}
monotonic_base +=
- (offset - vxtime.last) * NSEC_PER_TICK / hpet_tick;
+ (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
vxtime.last = offset;
#ifdef CONFIG_X86_PM_TIMER
#endif
} else {
offset = (((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> US_SCALE) - USEC_PER_TICK;
+ vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
if (offset < 0)
offset = 0;
- if (offset > USEC_PER_TICK) {
- lost = offset / USEC_PER_TICK;
- offset %= USEC_PER_TICK;
+ if (offset > (USEC_PER_SEC / HZ)) {
+ lost = offset / (USEC_PER_SEC / HZ);
+ offset %= (USEC_PER_SEC / HZ);
}
- /* FIXME: 1000 or 1000000? */
- monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
+ monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
if ((((tsc - vxtime.last_tsc) *
- vxtime.tsc_quot) >> US_SCALE) < offset)
+ vxtime.tsc_quot) >> 32) < offset)
vxtime.last_tsc = tsc -
- (((long) offset << US_SCALE) / vxtime.tsc_quot) - 1;
+ (((long) offset << 32) / vxtime.tsc_quot) - 1;
}
if (lost > 0) {
}
static unsigned int cyc2ns_scale __read_mostly;
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
static inline void set_cyc2ns_scale(unsigned long cpu_khz)
{
- cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / cpu_khz;
+ cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
- return (cyc * cyc2ns_scale) >> NS_SCALE;
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
unsigned long long sched_clock(void)
Disadvantage is a small drift between CPUs in some configurations,
but that should be tolerable. */
if (__vxtime.mode == VXTIME_HPET)
- return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> US_SCALE;
+ return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
#endif
/* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
- vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
}
set_cyc2ns_scale(cpu_khz_ref);
if (hpet_use_timer) {
hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT, HPET_T0_CFG);
- hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
- hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
+ hpet_writel(hpet_tick, HPET_T0_CMP);
+ hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
cfg |= HPET_CFG_LEGACY;
}
/*
if (hpet_period < 100000 || hpet_period > 100000000)
return -1;
- hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
+ hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
+ hpet_period;
hpet_use_timer = (id & HPET_ID_LEGSUP);
}
static struct irqaction irq0 = {
- timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL
+ timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
};
void __init time_init(void)
char *timename;
char *gtod;
+#ifdef HPET_HACK_ENABLE_DANGEROUS
+ if (!vxtime.hpet_address) {
+ printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
+ "manually!\n");
+ outl(0x800038a0, 0xcf8);
+ outl(0xff000001, 0xcfc);
+ outl(0x800038a0, 0xcf8);
+ vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
+ printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
+ "at %#lx.\n", vxtime.hpet_address);
+ }
+#endif
if (nohpet)
vxtime.hpet_address = 0;
-xtime.tv_sec, -xtime.tv_nsec);
if (!hpet_init())
- vxtime_hz = (FSEC_PER_SEC + hpet_period / 2) / hpet_period;
+ vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
else
vxtime.hpet_address = 0;
vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
- vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
- vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
+ vxtime.quot = (1000000L << 32) / vxtime_hz;
+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
__cpuinit int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
- if (apic_is_clustered_box())
+ if (oem_force_hpet_timer())
return 1;
-#endif
- /* Most intel systems have synchronized TSCs except for
- multi node systems */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
-#ifdef CONFIG_ACPI
- /* But TSC doesn't tick in C3 so don't use it there */
- if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000)
- return 1;
-#endif
+ /* Intel systems are normally all synchronized. Exceptions
+ are handled in the OEM check above. */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
return 0;
- }
-
+#endif
/* Assume multi socket systems are not synchronized */
return num_present_cpus() > 1;
}