* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/profile.h>
-#include <asm/segment.h>
#include <asm/io.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/8xx_immap.h>
#include <asm/machdep.h>
+#include <asm/irq_regs.h>
#include <asm/time.h>
-/* XXX false sharing with below? */
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
unsigned long disarm_decr[NR_CPUS];
extern struct timezone sys_tz;
unsigned tb_last_stamp;
unsigned long tb_to_ns_scale;
-extern unsigned long wall_jiffies;
-
-static long time_offset;
+/* used for timezone offset */
+static long timezone_offset;
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
return delta;
}
-extern char _stext;
-
-static inline void ppc_do_profile (struct pt_regs *regs)
+#ifdef CONFIG_SMP
+unsigned long profile_pc(struct pt_regs *regs)
{
- unsigned long nip;
- extern unsigned long prof_cpu_mask;
+ unsigned long pc = instruction_pointer(regs);
- profile_hook(regs);
+ if (in_lock_functions(pc))
+ return regs->link;
- if (user_mode(regs))
- return;
-
- if (!prof_buffer)
- return;
-
- nip = instruction_pointer(regs);
-
- /*
- * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
- * (default is all CPUs.)
- */
- if (!((1<<smp_processor_id()) & prof_cpu_mask))
- return;
+ return pc;
+}
+EXPORT_SYMBOL(profile_pc);
+#endif
- nip -= (unsigned long) &_stext;
- nip >>= prof_shift;
- /*
- * Don't ignore out-of-bounds EIP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
+void wakeup_decrementer(void)
+{
+ set_dec(tb_ticks_per_jiffy);
+ /* No currently-supported powerbook has a 601,
+ * so use get_tbl, not native
*/
- if (nip > prof_len-1)
- nip = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[nip]);
+ last_jiffy_stamp(0) = tb_last_stamp = get_tbl();
}
/*
*/
void timer_interrupt(struct pt_regs * regs)
{
+ struct pt_regs *old_regs;
int next_dec;
unsigned long cpu = smp_processor_id();
unsigned jiffy_stamp = last_jiffy_stamp(cpu);
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
+ old_regs = set_irq_regs(regs);
irq_enter();
- while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) < 0) {
+ while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) {
jiffy_stamp += tb_ticks_per_jiffy;
- ppc_do_profile(regs);
+ profile_tick(CPU_PROFILING);
+ update_process_times(user_mode(regs));
if (smp_processor_id())
continue;
/* We are in an interrupt, no need to save/restore flags */
write_seqlock(&xtime_lock);
tb_last_stamp = jiffy_stamp;
- do_timer(regs);
+ do_timer(1);
/*
* update the rtc when needed, this should be performed on the
* We should have an rtc call that only sets the minutes and
* seconds like on Intel to avoid problems with non UTC clocks.
*/
- if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 &&
+ if ( ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
- abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
- jiffies - wall_jiffies == 1) {
- if (ppc_md.set_rtc_time(xtime.tv_sec+1 + time_offset) == 0)
+ abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ) {
+ if (ppc_md.set_rtc_time(xtime.tv_sec+1 + timezone_offset) == 0)
last_rtc_update = xtime.tv_sec+1;
else
/* Try again one minute later */
set_dec(next_dec);
last_jiffy_stamp(cpu) = jiffy_stamp;
-#ifdef CONFIG_SMP
- smp_local_timer_interrupt(regs);
-#endif /* CONFIG_SMP */
-
if (ppc_md.heartbeat && !ppc_md.heartbeat_count--)
ppc_md.heartbeat();
irq_exit();
+ set_irq_regs(old_regs);
}
/*
{
unsigned long flags;
unsigned long seq;
- unsigned delta, lost_ticks, usec, sec;
+ unsigned delta, usec, sec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
if (!smp_tb_synchronized)
delta = 0;
#endif /* CONFIG_SMP */
- lost_ticks = jiffies - wall_jiffies;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
- usec += mulhwu(tb_to_us, tb_ticks_per_jiffy * lost_ticks + delta);
+ usec += mulhwu(tb_to_us, delta);
while (usec >= 1000000) {
sec++;
usec -= 1000000;
* still reasonable when gettimeofday resolution is 1 jiffy.
*/
tb_delta = tb_ticks_since(last_jiffy_stamp(smp_processor_id()));
- tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
new_nsec -= 1000 * mulhwu(tb_to_us, tb_delta);
*/
last_rtc_update = new_sec - 658;
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_state = TIME_ERROR; /* p. 24, (a) */
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irqrestore(&xtime_lock, flags);
clock_was_set();
return 0;
unsigned old_stamp, stamp, elapsed;
if (ppc_md.time_init != NULL)
- time_offset = ppc_md.time_init();
+ timezone_offset = ppc_md.time_init();
if (__USE_RTC()) {
/* 601 processor: dec counts down by 128 every 128ns */
set_dec(tb_ticks_per_jiffy);
/* If platform provided a timezone (pmac), we correct the time */
- if (time_offset) {
- sys_tz.tz_minuteswest = -time_offset / 60;
+ if (timezone_offset) {
+ sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
- xtime.tv_sec -= time_offset;
+ xtime.tv_sec -= timezone_offset;
}
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);