#define TICK_SIZE tick
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
static ext_int_info_t ext_int_info_cc;
static u64 init_timer_cc;
static u64 jiffies_timer_cc;
*/
unsigned long long sched_clock(void)
{
- return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
+ return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
+}
+
+/*
+ * Monotonic_clock - returns # of nanoseconds passed since time_init()
+ */
+unsigned long long monotonic_clock(void)
+{
+ return sched_clock();
}
+EXPORT_SYMBOL(monotonic_clock);
void tod_to_timeval(__u64 todval, struct timespec *xtime)
{
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
+ ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
__u32 ticks, xticks;
/* Calculate how many ticks have passed. */
- if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer)
+ if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
+ /*
+ * We have to program the clock comparator even if
+ * no tick has passed. That happens if e.g. an i/o
+ * interrupt wakes up an idle processor that has
+ * switched off its hz timer.
+ */
+ tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
+ asm volatile ("SCKC %0" : : "m" (tmp));
return;
+ }
tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- account_user_vtime(current);
+ account_tick_vtime(current);
#else
while (ticks--)
update_process_times(user_mode(regs));
*/
static inline void stop_hz_timer(void)
{
- __u64 timer;
+ unsigned long flags;
+ unsigned long seq, next;
+ __u64 timer, todval;
+ int cpu = smp_processor_id();
if (sysctl_hz_timer != 0)
return;
- cpu_set(smp_processor_id(), nohz_cpu_mask);
+ cpu_set(cpu, nohz_cpu_mask);
/*
* Leave the clock comparator set up for the next timer
* tick if either rcu or a softirq is pending.
*/
- if (rcu_pending(smp_processor_id()) || local_softirq_pending()) {
- cpu_clear(smp_processor_id(), nohz_cpu_mask);
+ if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
+ cpu_clear(cpu, nohz_cpu_mask);
return;
}
* This cpu is going really idle. Set up the clock comparator
* for the next event.
*/
- timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
- timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
- asm volatile ("SCKC %0" : : "m" (timer));
+ next = next_timer_interrupt();
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+ todval = -1ULL;
+ /* Be careful about overflows. */
+ if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
+ timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
+ if (timer >= jiffies_timer_cc)
+ todval = timer;
+ }
+ asm volatile ("SCKC %0" : : "m" (todval));
}
/*
{
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
- account_ticks(__KSTK_PTREGS(current));
+ account_ticks(task_pt_regs(current));
cpu_clear(smp_processor_id(), nohz_cpu_mask);
}