X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fkernel%2Ftime.c;h=13ff4da598cdfbcff3b2d34677c0e89cd3b55e28;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=d0c980a9b859e17ba705c0b959ce22bdec37ef3a;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index d0c980a9b..13ff4da59 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -1,7 +1,7 @@ /* * Copyright 2001 MontaVista Software Inc. * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net - * Copyright (c) 2003 Maciej W. Rozycki + * Copyright (c) 2003, 2004 Maciej W. Rozycki * * Common time service routines for MIPS machines. See * Documentation/mips/time.README. @@ -26,10 +26,11 @@ #include #include +#include +#include #include #include #include -#include #include #include @@ -44,22 +45,12 @@ #define TICK_SIZE (tick_nsec / 1000) -u64 jiffies_64 = INITIAL_JIFFIES; - -EXPORT_SYMBOL(jiffies_64); - /* * forward reference */ extern volatile unsigned long wall_jiffies; -spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED; - -/* - * whether we emulate local_timer_interrupts for SMP machines. - */ -int emulate_local_timer_interrupt; - +DEFINE_SPINLOCK(rtc_lock); /* * By default we provide the null RTC ops @@ -74,16 +65,16 @@ static int null_rtc_set_time(unsigned long sec) return 0; } -unsigned long (*rtc_get_time)(void) = null_rtc_get_time; -int (*rtc_set_time)(unsigned long) = null_rtc_set_time; -int (*rtc_set_mmss)(unsigned long); +unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time; +int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; +int (*rtc_mips_set_mmss)(unsigned long); /* usecs per counter cycle, shifted to left by 32 bits */ static unsigned int sll32_usecs_per_cycle; /* how many counter cycles in a jiffy */ -static unsigned long cycles_per_jiffy; +static unsigned long cycles_per_jiffy __read_mostly; /* Cycle counter value at the previous timer interrupt.. */ static unsigned int timerhi, timerlo; @@ -105,7 +96,10 @@ static unsigned int null_hpt_read(void) return 0; } -static void null_hpt_init(unsigned int count) { /* nothing */ } +static void null_hpt_init(unsigned int count) +{ + /* nothing */ +} /* @@ -115,13 +109,14 @@ static void c0_timer_ack(void) { unsigned int count; +#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */ /* Ack this timer interrupt and set the next one. */ expirelo += cycles_per_jiffy; +#endif write_c0_compare(expirelo); /* Check to see if we have missed any timer interrupts. */ - count = read_c0_count(); - if ((count - expirelo) < 0x7fffffff) { + while (((count = read_c0_count()) - expirelo) < 0x7fffffff) { /* missed_timer_count++; */ expirelo = count + cycles_per_jiffy; write_c0_compare(expirelo); @@ -167,7 +162,7 @@ void do_gettimeofday(struct timeval *tv) unsigned long seq; unsigned long lost; unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; + unsigned long max_ntp_tick; do { seq = read_seqbegin(&xtime_lock); @@ -182,12 +177,13 @@ void do_gettimeofday(struct timeval *tv) * Better to lose some accuracy than have time go backwards.. */ if (unlikely(time_adjust < 0)) { + max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj; usec = min(usec, max_ntp_tick); if (lost) usec += lost * max_ntp_tick; } else if (unlikely(lost)) - usec += lost * tick_usec; + usec += lost * (USEC_PER_SEC / HZ); sec = xtime.tv_sec; usec += (xtime.tv_nsec / 1000); @@ -230,11 +226,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&xtime, sec, nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - time_adjust = 0; /* stop active adjtime() */ - time_status |= STA_UNSYNC; - time_maxerror = NTP_PHASE_LIMIT; - time_esterror = NTP_PHASE_LIMIT; - + ntp_clear(); write_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; @@ -278,7 +270,7 @@ static unsigned long fixed_rate_gettimeoffset(void) __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (sll32_usecs_per_cycle) - : "lo", "accum"); + : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check @@ -333,7 +325,7 @@ static unsigned long calibrate_div32_gettimeoffset(void) __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (quotient) - : "lo", "accum"); + : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check @@ -375,7 +367,7 @@ static unsigned long calibrate_div64_gettimeoffset(void) : "r" (timerhi), "m" (timerlo), "r" (tmp), "r" (USECS_PER_JIFFY), "r" (USECS_PER_JIFFY_FRAC) - : "hi", "lo", "accum"); + : "hi", "lo", GCC_REG_ACCUM); cached_quotient = quotient; } } @@ -389,7 +381,7 @@ static unsigned long calibrate_div64_gettimeoffset(void) __asm__("multu %1,%2" : "=h" (res) : "r" (count), "r" (quotient) - : "lo", "accum"); + : "lo", GCC_REG_ACCUM); /* * Due to possible jiffies inconsistencies, we need to check @@ -417,27 +409,9 @@ static long last_rtc_update; */ void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - if (!user_mode(regs)) { - if (prof_buffer && current->pid) { - unsigned long pc = regs->cp0_epc; - - pc -= (unsigned long) _stext; - pc >>= prof_shift; - /* - * Dont ignore out-of-bounds pc values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - atomic_inc((atomic_t *)&prof_buffer[pc]); - } - } - -#ifdef CONFIG_SMP - /* in UP mode, update_process_times() is invoked by do_timer() */ + if (current->pid) + profile_tick(CPU_PROFILING, regs); update_process_times(user_mode(regs)); -#endif } /* @@ -449,6 +423,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) unsigned long j; unsigned int count; + write_seqlock(&xtime_lock); + count = mips_hpt_read(); mips_timer_ack(); @@ -463,22 +439,20 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) /* * If we have an externally synchronized Linux clock, then update - * CMOS clock accordingly every ~11 minutes. rtc_set_time() has to be + * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be * called as close as possible to 500 ms before the new second starts. */ - write_seqlock(&xtime_lock); - if ((time_status & STA_UNSYNC) == 0 && + if (ntp_synced() && xtime.tv_sec > last_rtc_update + 660 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { - if (rtc_set_mmss(xtime.tv_sec) == 0) { + if (rtc_mips_set_mmss(xtime.tv_sec) == 0) { last_rtc_update = xtime.tv_sec; } else { /* do it again in 60 s */ last_rtc_update = xtime.tv_sec - 600; } } - write_sequnlock(&xtime_lock); /* * If jiffies has overflown in this timer_interrupt, we must @@ -521,7 +495,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } } -#if !defined(CONFIG_SMP) + write_sequnlock(&xtime_lock); + /* * In UP mode, we call local_timer_interrupt() to do profiling * and process accouting. @@ -531,32 +506,41 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) */ local_timer_interrupt(irq, dev_id, regs); -#else /* CONFIG_SMP */ - - if (emulate_local_timer_interrupt) { - /* - * this is the place where we send out inter-process - * interrupts and let each CPU do its own profiling - * and process accouting. - * - * Obviously we need to call local_timer_interrupt() for - * the current CPU too. - */ - panic("Not implemented yet!!!"); - } -#endif /* CONFIG_SMP */ - return IRQ_HANDLED; } +int null_perf_irq(struct pt_regs *regs) +{ + return 0; +} + +int (*perf_irq)(struct pt_regs *regs) = null_perf_irq; + +EXPORT_SYMBOL(null_perf_irq); +EXPORT_SYMBOL(perf_irq); + asmlinkage void ll_timer_interrupt(int irq, struct pt_regs *regs) { + int r2 = cpu_has_mips_r2; + irq_enter(); kstat_this_cpu.irqs[irq]++; + /* + * Suckage alert: + * Before R2 of the architecture there was no way to see if a + * performance counter interrupt was pending, so we have to run the + * performance counter interrupt handler anyway. + */ + if (!r2 || (read_c0_cause() & (1 << 26))) + if (perf_irq(regs)) + goto out; + /* we keep interrupt disabled all the time */ - timer_interrupt(irq, NULL, regs); + if (!r2 || (read_c0_cause() & (1 << 30))) + timer_interrupt(irq, NULL, regs); +out: irq_exit(); } @@ -580,7 +564,7 @@ asmlinkage void ll_local_timer_interrupt(int irq, struct pt_regs *regs) * b) (optional) calibrate and set the mips_hpt_frequency * (only needed if you intended to use fixed_rate_gettimeoffset * or use cpu counter as timer interrupt source) - * 2) setup xtime based on rtc_get_time(). + * 2) setup xtime based on rtc_mips_get_time(). * 3) choose a appropriate gettimeoffset routine. * 4) calculate a couple of cached variables for later usage * 5) board_timer_setup() - @@ -648,10 +632,10 @@ void __init time_init(void) if (board_time_init) board_time_init(); - if (!rtc_set_mmss) - rtc_set_mmss = rtc_set_time; + if (!rtc_mips_set_mmss) + rtc_mips_set_mmss = rtc_mips_set_time; - xtime.tv_sec = rtc_get_time(); + xtime.tv_sec = rtc_mips_get_time(); xtime.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, @@ -670,9 +654,9 @@ void __init time_init(void) mips_hpt_init = c0_hpt_init; } - if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32) || - (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || - (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) + if (cpu_has_mips32r1 || cpu_has_mips32r2 || + (current_cpu_data.isa_level == MIPS_CPU_ISA_I) || + (current_cpu_data.isa_level == MIPS_CPU_ISA_II)) /* * We need to calibrate the counter but we don't have * 64-bit division. @@ -787,5 +771,10 @@ void to_tm(unsigned long tim, struct rtc_time *tm) EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(to_tm); -EXPORT_SYMBOL(rtc_set_time); -EXPORT_SYMBOL(rtc_get_time); +EXPORT_SYMBOL(rtc_mips_set_time); +EXPORT_SYMBOL(rtc_mips_get_time); + +unsigned long long sched_clock(void) +{ + return (unsigned long long)jiffies*(1000000000/HZ); +}