X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fs390%2Fkernel%2Ftime.c;h=2a6c6efb68653db8880853d09648d86f21af60b9;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=8f279421646f6da5809c9e7fd6b32c076b6e4551;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 8f2794216..2a6c6efb6 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -49,10 +49,6 @@ #define TICK_SIZE tick -u64 jiffies_64 = INITIAL_JIFFIES; - -EXPORT_SYMBOL(jiffies_64); - static ext_int_info_t ext_int_info_cc; static u64 init_timer_cc; static u64 jiffies_timer_cc; @@ -65,8 +61,17 @@ extern unsigned long wall_jiffies; */ unsigned long long sched_clock(void) { - return ((get_clock() - jiffies_timer_cc) * 1000) >> 12; + return ((get_clock() - jiffies_timer_cc) * 125) >> 9; +} + +/* + * Monotonic_clock - returns # of nanoseconds passed since time_init() + */ +unsigned long long monotonic_clock(void) +{ + return sched_clock(); } +EXPORT_SYMBOL(monotonic_clock); void tod_to_timeval(__u64 todval, struct timespec *xtime) { @@ -139,10 +144,7 @@ int do_settimeofday(struct timespec *tv) set_normalized_timespec(&xtime, sec, nsec); set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - time_adjust = 0; /* stop active adjtime() */ - time_status |= STA_UNSYNC; - time_maxerror = NTP_PHASE_LIMIT; - time_esterror = NTP_PHASE_LIMIT; + ntp_clear(); write_sequnlock_irq(&xtime_lock); clock_was_set(); return 0; @@ -150,28 +152,6 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); -#ifndef CONFIG_ARCH_S390X - -static inline __u32 -__calculate_ticks(__u64 elapsed) -{ - register_pair rp; - - rp.pair = elapsed >> 1; - asm ("dr %0,%1" : "+d" (rp) : "d" (CLK_TICKS_PER_JIFFY >> 1)); - return rp.subreg.odd; -} - -#else /* CONFIG_ARCH_S390X */ - -static inline __u32 -__calculate_ticks(__u64 elapsed) -{ - return elapsed / CLK_TICKS_PER_JIFFY; -} - -#endif /* CONFIG_ARCH_S390X */ - #ifdef CONFIG_PROFILING #define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs) @@ -187,14 +167,23 @@ __calculate_ticks(__u64 elapsed) void account_ticks(struct pt_regs *regs) { __u64 tmp; - __u32 ticks; + __u32 ticks, xticks; /* Calculate how many ticks have passed. */ - if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) + if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { + /* + * We have to program the clock comparator even if + * no tick has passed. That happens if e.g. an i/o + * interrupt wakes up an idle processor that has + * switched off its hz timer. + */ + tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; + asm volatile ("SCKC %0" : : "m" (tmp)); return; + } tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer; if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ - ticks = __calculate_ticks(tmp) + 1; + ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks; } else if (tmp >= CLK_TICKS_PER_JIFFY) { @@ -216,11 +205,9 @@ void account_ticks(struct pt_regs *regs) */ write_seqlock(&xtime_lock); if (S390_lowcore.jiffy_timer > xtime_cc) { - __u32 xticks; - tmp = S390_lowcore.jiffy_timer - xtime_cc; if (tmp >= 2*CLK_TICKS_PER_JIFFY) { - xticks = __calculate_ticks(tmp); + xticks = __div(tmp, CLK_TICKS_PER_JIFFY); xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; } else { xticks = 1; @@ -230,12 +217,18 @@ void account_ticks(struct pt_regs *regs) do_timer(regs); } write_sequnlock(&xtime_lock); - while (ticks--) - update_process_times(user_mode(regs)); #else - while (ticks--) + for (xticks = ticks; xticks > 0; xticks--) do_timer(regs); #endif + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING + account_tick_vtime(current); +#else + while (ticks--) + update_process_times(user_mode(regs)); +#endif + s390_do_profile(regs); } @@ -253,26 +246,42 @@ int sysctl_hz_timer = 1; */ static inline void stop_hz_timer(void) { - __u64 timer; + unsigned long flags; + unsigned long seq, next; + __u64 timer, todval; + int cpu = smp_processor_id(); if (sysctl_hz_timer != 0) return; + cpu_set(cpu, nohz_cpu_mask); + /* * Leave the clock comparator set up for the next timer * tick if either rcu or a softirq is pending. */ - if (rcu_pending(smp_processor_id()) || local_softirq_pending()) + if (rcu_needs_cpu(cpu) || local_softirq_pending()) { + cpu_clear(cpu, nohz_cpu_mask); return; + } /* * This cpu is going really idle. Set up the clock comparator * for the next event. */ - cpu_set(smp_processor_id(), nohz_cpu_mask); - timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64; - timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; - asm volatile ("SCKC %0" : : "m" (timer)); + next = next_timer_interrupt(); + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + todval = -1ULL; + /* Be careful about overflows. */ + if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { + timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; + if (timer >= jiffies_timer_cc) + todval = timer; + } + asm volatile ("SCKC %0" : : "m" (todval)); } /* @@ -283,7 +292,7 @@ static inline void start_hz_timer(void) { if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) return; - account_ticks(__KSTK_PTREGS(current)); + account_ticks(task_pt_regs(current)); cpu_clear(smp_processor_id(), nohz_cpu_mask); }