X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fpowerpc%2Fkernel%2Ftime.c;fp=arch%2Fpowerpc%2Fkernel%2Ftime.c;h=f6f0c6b07c4cf335e41357def28b7388bf07d793;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=86f7e3d154d8a11065e16064d4dc083fa99d115c;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 86f7e3d15..f6f0c6b07 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -32,7 +32,6 @@ * 2 of the License, or (at your option) any later version. */ -#include #include #include #include @@ -51,6 +50,8 @@ #include #include #include +#include +#include #include #include @@ -75,7 +76,6 @@ /* keep track of when we need to update the rtc */ time_t last_rtc_update; -extern int piranha_simulator; #ifdef CONFIG_PPC_ISERIES unsigned long iSeries_recal_titan = 0; unsigned long iSeries_recal_tb = 0; @@ -98,10 +98,11 @@ unsigned long tb_ticks_per_jiffy; unsigned long tb_ticks_per_usec = 100; /* sane default */ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; +EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */ u64 tb_to_xs; unsigned tb_to_us; -#define TICKLEN_SCALE (SHIFT_SCALE - 10) +#define TICKLEN_SCALE TICK_LENGTH_SHIFT u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ u64 ticklen_to_xs; /* 0.64 fraction */ @@ -117,23 +118,201 @@ unsigned tb_to_ns_shift; struct gettimeofday_struct do_gtod; -extern unsigned long wall_jiffies; - extern struct timezone sys_tz; static long timezone_offset; unsigned long ppc_proc_freq; unsigned long ppc_tb_freq; -u64 tb_last_jiffy __cacheline_aligned_in_smp; -unsigned long tb_last_stamp; +static u64 tb_last_jiffy __cacheline_aligned_in_smp; +static DEFINE_PER_CPU(u64, last_jiffy); + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +/* + * Factors for converting from cputime_t (timebase ticks) to + * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds). + * These are all stored as 0.64 fixed-point binary fractions. + */ +u64 __cputime_jiffies_factor; +EXPORT_SYMBOL(__cputime_jiffies_factor); +u64 __cputime_msec_factor; +EXPORT_SYMBOL(__cputime_msec_factor); +u64 __cputime_sec_factor; +EXPORT_SYMBOL(__cputime_sec_factor); +u64 __cputime_clockt_factor; +EXPORT_SYMBOL(__cputime_clockt_factor); + +static void calc_cputime_factors(void) +{ + struct div_result res; + + div128_by_32(HZ, 0, tb_ticks_per_sec, &res); + __cputime_jiffies_factor = res.result_low; + div128_by_32(1000, 0, tb_ticks_per_sec, &res); + __cputime_msec_factor = res.result_low; + div128_by_32(1, 0, tb_ticks_per_sec, &res); + __cputime_sec_factor = res.result_low; + div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res); + __cputime_clockt_factor = res.result_low; +} + +/* + * Read the PURR on systems that have it, otherwise the timebase. + */ +static u64 read_purr(void) +{ + if (cpu_has_feature(CPU_FTR_PURR)) + return mfspr(SPRN_PURR); + return mftb(); +} + +/* + * Account time for a transition between system, hard irq + * or soft irq state. + */ +void account_system_vtime(struct task_struct *tsk) +{ + u64 now, delta; + unsigned long flags; + + local_irq_save(flags); + now = read_purr(); + delta = now - get_paca()->startpurr; + get_paca()->startpurr = now; + if (!in_interrupt()) { + delta += get_paca()->system_time; + get_paca()->system_time = 0; + } + account_system_time(tsk, 0, delta); + local_irq_restore(flags); +} /* - * Note that on ppc32 this only stores the bottom 32 bits of - * the timebase value, but that's enough to tell when a jiffy - * has passed. + * Transfer the user and system times accumulated in the paca + * by the exception entry and exit code to the generic process + * user and system time records. + * Must be called with interrupts disabled. */ -DEFINE_PER_CPU(unsigned long, last_jiffy); +void account_process_vtime(struct task_struct *tsk) +{ + cputime_t utime; + + utime = get_paca()->user_time; + get_paca()->user_time = 0; + account_user_time(tsk, utime); +} + +static void account_process_time(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + account_process_vtime(current); + run_local_timers(); + if (rcu_pending(cpu)) + rcu_check_callbacks(cpu, user_mode(regs)); + scheduler_tick(); + run_posix_cpu_timers(current); +} + +#ifdef CONFIG_PPC_SPLPAR +/* + * Stuff for accounting stolen time. + */ +struct cpu_purr_data { + int initialized; /* thread is running */ + u64 tb; /* last TB value read */ + u64 purr; /* last PURR value read */ + spinlock_t lock; +}; + +static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); + +static void snapshot_tb_and_purr(void *data) +{ + struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); + + p->tb = mftb(); + p->purr = mfspr(SPRN_PURR); + wmb(); + p->initialized = 1; +} + +/* + * Called during boot when all cpus have come up. + */ +void snapshot_timebases(void) +{ + int cpu; + + if (!cpu_has_feature(CPU_FTR_PURR)) + return; + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock); + on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); +} + +void calculate_steal_time(void) +{ + u64 tb, purr; + s64 stolen; + struct cpu_purr_data *pme; + + if (!cpu_has_feature(CPU_FTR_PURR)) + return; + pme = &per_cpu(cpu_purr_data, smp_processor_id()); + if (!pme->initialized) + return; /* this can happen in early boot */ + spin_lock(&pme->lock); + tb = mftb(); + purr = mfspr(SPRN_PURR); + stolen = (tb - pme->tb) - (purr - pme->purr); + if (stolen > 0) + account_steal_time(current, stolen); + pme->tb = tb; + pme->purr = purr; + spin_unlock(&pme->lock); +} + +/* + * Must be called before the cpu is added to the online map when + * a cpu is being brought up at runtime. + */ +static void snapshot_purr(void) +{ + struct cpu_purr_data *pme; + unsigned long flags; + + if (!cpu_has_feature(CPU_FTR_PURR)) + return; + pme = &per_cpu(cpu_purr_data, smp_processor_id()); + spin_lock_irqsave(&pme->lock, flags); + pme->tb = mftb(); + pme->purr = mfspr(SPRN_PURR); + pme->initialized = 1; + spin_unlock_irqrestore(&pme->lock, flags); +} + +#endif /* CONFIG_PPC_SPLPAR */ + +#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ +#define calc_cputime_factors() +#define account_process_time(regs) update_process_times(user_mode(regs)) +#define calculate_steal_time() do { } while (0) +#endif + +#if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) +#define snapshot_purr() do { } while (0) +#endif + +/* + * Called when a cpu comes up after the system has finished booting, + * i.e. as a result of a hotplug cpu action. + */ +void snapshot_timebase(void) +{ + __get_cpu_var(last_jiffy) = get_tb(); + snapshot_purr(); +} void __delay(unsigned long loops) { @@ -199,7 +378,7 @@ static __inline__ void timer_check_rtc(void) /* * This version of gettimeofday has microsecond resolution. */ -static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) +static inline void __do_gettimeofday(struct timeval *tv) { unsigned long sec, usec; u64 tb_ticks, xsec; @@ -213,7 +392,12 @@ static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val) * without a divide (and in fact, without a multiply) */ temp_varp = do_gtod.varp; - tb_ticks = tb_val - temp_varp->tb_orig_stamp; + + /* Sampling the time base must be done after loading + * do_gtod.varp in order to avoid racing with update_gtod. + */ + data_barrier(temp_varp); + tb_ticks = get_tb() - temp_varp->tb_orig_stamp; temp_tb_to_xs = temp_varp->tb_to_xs; temp_stamp_xsec = temp_varp->stamp_xsec; xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs); @@ -235,7 +419,7 @@ void do_gettimeofday(struct timeval *tv) do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; - nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); + nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy); } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); usec = nsec / 1000; while (usec >= 1000000) { @@ -246,7 +430,7 @@ void do_gettimeofday(struct timeval *tv) tv->tv_usec = usec; return; } - __do_gettimeofday(tv, get_tb()); + __do_gettimeofday(tv); } EXPORT_SYMBOL(do_gettimeofday); @@ -392,6 +576,7 @@ static void iSeries_tb_recal(void) new_tb_ticks_per_jiffy, sign, tick_diff ); tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; tb_ticks_per_sec = new_tb_ticks_per_sec; + calc_cputime_factors(); div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; @@ -428,21 +613,26 @@ static void iSeries_tb_recal(void) */ void timer_interrupt(struct pt_regs * regs) { + struct pt_regs *old_regs; int next_dec; int cpu = smp_processor_id(); unsigned long ticks; + u64 tb_next_jiffy; #ifdef CONFIG_PPC32 if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); #endif + old_regs = set_irq_regs(regs); irq_enter(); - profile_tick(CPU_PROFILING, regs); + profile_tick(CPU_PROFILING); + calculate_steal_time(); #ifdef CONFIG_PPC_ISERIES - get_lppaca()->int_dword.fields.decr_int = 0; + if (firmware_has_feature(FW_FEATURE_ISERIES)) + get_lppaca()->int_dword.fields.decr_int = 0; #endif while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) @@ -461,7 +651,7 @@ void timer_interrupt(struct pt_regs * regs) * is the case. */ if (!cpu_is_offline(cpu)) - update_process_times(user_mode(regs)); + account_process_time(regs); /* * No need to check whether cpu is offline here; boot_cpuid @@ -471,11 +661,13 @@ void timer_interrupt(struct pt_regs * regs) continue; write_seqlock(&xtime_lock); - tb_last_jiffy += tb_ticks_per_jiffy; - tb_last_stamp = per_cpu(last_jiffy, cpu); - do_timer(regs); - timer_recalc_offset(tb_last_jiffy); - timer_check_rtc(); + tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy; + if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) { + tb_last_jiffy = tb_next_jiffy; + do_timer(1); + timer_recalc_offset(tb_last_jiffy); + timer_check_rtc(); + } write_sequnlock(&xtime_lock); } @@ -483,8 +675,8 @@ void timer_interrupt(struct pt_regs * regs) set_dec(next_dec); #ifdef CONFIG_PPC_ISERIES - if (hvlpevent_is_pending()) - process_hvlpevents(regs); + if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) + process_hvlpevents(); #endif #ifdef CONFIG_PPC64 @@ -496,6 +688,7 @@ void timer_interrupt(struct pt_regs * regs) #endif irq_exit(); + set_irq_regs(old_regs); } void wakeup_decrementer(void) @@ -518,13 +711,27 @@ void wakeup_decrementer(void) void __init smp_space_timers(unsigned int max_cpus) { int i; + unsigned long half = tb_ticks_per_jiffy / 2; unsigned long offset = tb_ticks_per_jiffy / max_cpus; - unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid); + u64 previous_tb = per_cpu(last_jiffy, boot_cpuid); /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */ previous_tb -= tb_ticks_per_jiffy; - for_each_cpu(i) { - if (i != boot_cpuid) { + /* + * The stolen time calculation for POWER5 shared-processor LPAR + * systems works better if the two threads' timebase interrupts + * are staggered by half a jiffy with respect to each other. + */ + for_each_possible_cpu(i) { + if (i == boot_cpuid) + continue; + if (i == (boot_cpuid ^ 1)) + per_cpu(last_jiffy, i) = + per_cpu(last_jiffy, boot_cpuid) - half; + else if (i & 1) + per_cpu(last_jiffy, i) = + per_cpu(last_jiffy, i ^ 1) + half; + else { previous_tb += offset; per_cpu(last_jiffy, i) = previous_tb; } @@ -568,7 +775,7 @@ int do_settimeofday(struct timespec *tv) * settimeofday to perform this operation. */ #ifdef CONFIG_PPC_ISERIES - if (first_settimeofday) { + if (firmware_has_feature(FW_FEATURE_ISERIES) && first_settimeofday) { iSeries_tb_recal(); first_settimeofday = 0; } @@ -581,13 +788,8 @@ int do_settimeofday(struct timespec *tv) /* * Subtract off the number of nanoseconds since the * beginning of the last tick. - * Note that since we don't increment jiffies_64 anywhere other - * than in do_timer (since we don't have a lost tick problem), - * wall_jiffies will always be the same as jiffies, - * and therefore the (jiffies - wall_jiffies) computation - * has been removed. */ - tb_delta = tb_ticks_since(tb_last_stamp); + tb_delta = tb_ticks_since(tb_last_jiffy); tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ new_nsec -= SCALE_XSEC(tb_delta, 1000000000); @@ -622,42 +824,48 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); -void __init generic_calibrate_decr(void) +static int __init get_freq(char *name, int cells, unsigned long *val) { struct device_node *cpu; - unsigned int *fp; - int node_found; + const unsigned int *fp; + int found = 0; - /* - * The cpu node should have a timebase-frequency property - * to tell us the rate at which the decrementer counts. - */ + /* The cpu node should have timebase and clock frequency properties */ cpu = of_find_node_by_type(NULL, "cpu"); - ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ - node_found = 0; if (cpu) { - fp = (unsigned int *)get_property(cpu, "timebase-frequency", - NULL); + fp = get_property(cpu, name, NULL); if (fp) { - node_found = 1; - ppc_tb_freq = *fp; + found = 1; + *val = of_read_ulong(fp, cells); } + + of_node_put(cpu); } - if (!node_found) + + return found; +} + +void __init generic_calibrate_decr(void) +{ + ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ + + if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) && + !get_freq("timebase-frequency", 1, &ppc_tb_freq)) { + printk(KERN_ERR "WARNING: Estimating decrementer frequency " "(not found)\n"); + } - ppc_proc_freq = DEFAULT_PROC_FREQ; - node_found = 0; - if (cpu) { - fp = (unsigned int *)get_property(cpu, "clock-frequency", - NULL); - if (fp) { - node_found = 1; - ppc_proc_freq = *fp; - } + ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */ + + if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) && + !get_freq("clock-frequency", 1, &ppc_proc_freq)) { + + printk(KERN_ERR "WARNING: Estimating processor frequency " + "(not found)\n"); } + #ifdef CONFIG_BOOKE /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); @@ -669,11 +877,6 @@ void __init generic_calibrate_decr(void) /* Enable decrementer interrupt */ mtspr(SPRN_TCR, TCR_DIE); #endif - if (!node_found) - printk(KERN_ERR "WARNING: Estimating processor frequency " - "(not found)\n"); - - of_node_put(cpu); } unsigned long get_boot_time(void) @@ -704,22 +907,22 @@ void __init time_init(void) if (__USE_RTC()) { /* 601 processor: dec counts down by 128 every 128ns */ ppc_tb_freq = 1000000000; - tb_last_stamp = get_rtcl(); - tb_last_jiffy = tb_last_stamp; + tb_last_jiffy = get_rtcl(); } else { /* Normal PowerPC with timebase register */ ppc_md.calibrate_decr(); - printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", + printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); - printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", + printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); - tb_last_stamp = tb_last_jiffy = get_tb(); + tb_last_jiffy = get_tb(); } tb_ticks_per_jiffy = ppc_tb_freq / HZ; tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); + calc_cputime_factors(); /* * Calculate the length of each tick in ns. It will not be @@ -773,10 +976,7 @@ void __init time_init(void) tb_to_ns_scale = scale; tb_to_ns_shift = shift; -#ifdef CONFIG_PPC_ISERIES - if (!piranha_simulator) -#endif - tm = get_boot_time(); + tm = get_boot_time(); write_seqlock_irqsave(&xtime_lock, flags); @@ -792,7 +992,7 @@ void __init time_init(void) do_gtod.varp = &do_gtod.vars[0]; do_gtod.var_idx = 0; do_gtod.varp->tb_orig_stamp = tb_last_jiffy; - __get_cpu_var(last_jiffy) = tb_last_stamp; + __get_cpu_var(last_jiffy) = tb_last_jiffy; do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; do_gtod.varp->tb_to_xs = tb_to_xs;