2 * linux/arch/ia64/kernel/time.c
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
11 #include <linux/config.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/profile.h>
17 #include <linux/sched.h>
18 #include <linux/time.h>
19 #include <linux/interrupt.h>
20 #include <linux/efi.h>
21 #include <linux/profile.h>
22 #include <linux/timex.h>
24 #include <asm/machvec.h>
25 #include <asm/delay.h>
26 #include <asm/hw_irq.h>
27 #include <asm/ptrace.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
32 extern unsigned long wall_jiffies;
34 u64 jiffies_64 = INITIAL_JIFFIES;
36 EXPORT_SYMBOL(jiffies_64);
38 #define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
40 #ifdef CONFIG_IA64_DEBUG_IRQ
42 unsigned long last_cli_ip;
43 EXPORT_SYMBOL(last_cli_ip);
50 unsigned long offset = ia64_get_itc();
52 return (offset * local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
61 * Adjust for the fact that xtime has been advanced by delta_nsec (may be negative and/or
62 * larger than NSEC_PER_SEC.
65 itc_update (long delta_nsec)
70 * Return the number of nano-seconds that elapsed since the last
71 * update to jiffy. It is quite possible that the timer interrupt
72 * will interrupt this and result in a race for any of jiffies,
73 * wall_jiffies or itm_next. Thus, the xtime_lock must be at least
74 * read synchronised when calling this routine (see do_gettimeofday()
75 * below for an example).
80 unsigned long elapsed_cycles, lost = jiffies - wall_jiffies;
81 unsigned long now = ia64_get_itc(), last_tick;
83 last_tick = (cpu_data(TIME_KEEPER_ID)->itm_next
84 - (lost + 1)*cpu_data(TIME_KEEPER_ID)->itm_delta);
86 elapsed_cycles = now - last_tick;
87 return (elapsed_cycles*local_cpu_data->nsec_per_cyc) >> IA64_NSEC_PER_CYC_SHIFT;
90 static struct time_interpolator itc_interpolator = {
91 .get_offset = itc_get_offset,
97 do_settimeofday (struct timespec *tv)
99 time_t wtm_sec, sec = tv->tv_sec;
100 long wtm_nsec, nsec = tv->tv_nsec;
102 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
105 write_seqlock_irq(&xtime_lock);
108 * This is revolting. We need to set "xtime" correctly. However, the value
109 * in this location is the value at the most recent update of wall time.
110 * Discover what correction gettimeofday would have done, and then undo
113 nsec -= time_interpolator_get_offset();
115 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
116 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
118 set_normalized_timespec(&xtime, sec, nsec);
119 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
121 time_adjust = 0; /* stop active adjtime() */
122 time_status |= STA_UNSYNC;
123 time_maxerror = NTP_PHASE_LIMIT;
124 time_esterror = NTP_PHASE_LIMIT;
125 time_interpolator_reset();
127 write_sequnlock_irq(&xtime_lock);
132 EXPORT_SYMBOL(do_settimeofday);
135 do_gettimeofday (struct timeval *tv)
137 unsigned long seq, nsec, usec, sec, old, offset;
140 seq = read_seqbegin(&xtime_lock);
142 old = last_nsec_offset;
143 offset = time_interpolator_get_offset();
145 nsec = xtime.tv_nsec;
147 if (unlikely(read_seqretry(&xtime_lock, seq)))
150 * Ensure that for any pair of causally ordered gettimeofday() calls, time
151 * never goes backwards (even when ITC on different CPUs are not perfectly
152 * synchronized). (A pair of concurrent calls to gettimeofday() is by
153 * definition non-causal and hence it makes no sense to talk about
154 * time-continuity for such calls.)
156 * Doing this in a lock-free and race-free manner is tricky. Here is why
157 * it works (most of the time): read_seqretry() just succeeded, which
158 * implies we calculated a consistent (valid) value for "offset". If the
159 * cmpxchg() below succeeds, we further know that last_nsec_offset still
160 * has the same value as at the beginning of the loop, so there was
161 * presumably no timer-tick or other updates to last_nsec_offset in the
162 * meantime. This isn't 100% true though: there _is_ a possibility of a
163 * timer-tick occurring right right after read_seqretry() and then getting
164 * zero or more other readers which will set last_nsec_offset to the same
165 * value as the one we read at the beginning of the loop. If this
166 * happens, we'll end up returning a slightly newer time than we ought to
167 * (the jump forward is at most "offset" nano-seconds). There is no
168 * danger of causing time to go backwards, though, so we are safe in that
169 * sense. We could make the probability of this unlucky case occurring
170 * arbitrarily small by encoding a version number in last_nsec_offset, but
171 * even without versioning, the probability of this unlucky case should be
172 * so small that we won't worry about it.
177 } else if (likely(cmpxchg(&last_nsec_offset, old, offset) == old))
180 /* someone else beat us to updating last_nsec_offset; try again */
183 usec = (nsec + offset) / 1000;
185 while (unlikely(usec >= USEC_PER_SEC)) {
186 usec -= USEC_PER_SEC;
194 EXPORT_SYMBOL(do_gettimeofday);
197 * The profiling function is SMP safe. (nothing can mess
198 * around with "current", and the profiling counters are
199 * updated with atomic operations). This is especially
200 * useful with a profiling multiplier != 1
203 ia64_do_profile (struct pt_regs * regs)
205 unsigned long ip, slot;
206 extern cpumask_t prof_cpu_mask;
216 ip = instruction_pointer(regs);
217 /* Conserve space in histogram by encoding slot bits in address
218 * bits 2 and 3 rather than bits 0 and 1.
221 ip = (ip & ~3UL) + 4*slot;
224 * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
225 * (default is all CPUs.)
227 if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
230 ip -= (unsigned long) &_stext;
233 * Don't ignore out-of-bounds IP values silently,
234 * put them into the last histogram slot, so if
235 * present, they will show up as a sharp peak.
239 atomic_inc((atomic_t *)&prof_buffer[ip]);
243 timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
245 unsigned long new_itm;
247 platform_timer_interrupt(irq, dev_id, regs);
249 new_itm = local_cpu_data->itm_next;
251 if (!time_after(ia64_get_itc(), new_itm))
252 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
253 ia64_get_itc(), new_itm);
255 ia64_do_profile(regs);
260 * For UP, this is done in do_timer(). Weird, but
261 * fixing that would require updates to all
264 update_process_times(user_mode(regs));
266 new_itm += local_cpu_data->itm_delta;
268 if (smp_processor_id() == TIME_KEEPER_ID) {
270 * Here we are in the timer irq handler. We have irqs locally
271 * disabled, but we don't know if the timer_bh is running on
272 * another CPU. We need to avoid to SMP race by acquiring the
275 write_seqlock(&xtime_lock);
277 local_cpu_data->itm_next = new_itm;
278 write_sequnlock(&xtime_lock);
280 local_cpu_data->itm_next = new_itm;
282 if (time_after(new_itm, ia64_get_itc()))
288 * If we're too close to the next clock tick for
289 * comfort, we increase the safety margin by
290 * intentionally dropping the next tick(s). We do NOT
291 * update itm.next because that would force us to call
292 * do_timer() which in turn would let our clock run
293 * too fast (with the potentially devastating effect
294 * of losing monotony of time).
296 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
297 new_itm += local_cpu_data->itm_delta;
298 ia64_set_itm(new_itm);
299 /* double check, in case we got hit by a (slow) PMI: */
300 } while (time_after_eq(ia64_get_itc(), new_itm));
305 * Encapsulate access to the itm structure for SMP.
308 ia64_cpu_local_tick (void)
310 int cpu = smp_processor_id();
311 unsigned long shift = 0, delta;
313 /* arrange for the cycle counter to generate a timer interrupt: */
314 ia64_set_itv(IA64_TIMER_VECTOR);
316 delta = local_cpu_data->itm_delta;
318 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
322 unsigned long hi = 1UL << ia64_fls(cpu);
323 shift = (2*(cpu - hi) + 1) * delta/hi/2;
325 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
326 ia64_set_itm(local_cpu_data->itm_next);
332 unsigned long platform_base_freq, itc_freq;
333 struct pal_freq_ratio itc_ratio, proc_ratio;
334 long status, platform_base_drift, itc_drift;
337 * According to SAL v2.6, we need to use a SAL call to determine the platform base
338 * frequency and then a PAL call to determine the frequency ratio between the ITC
339 * and the base frequency.
341 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
342 &platform_base_freq, &platform_base_drift);
344 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
346 status = ia64_pal_freq_ratios(&proc_ratio, 0, &itc_ratio);
348 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
351 /* invent "random" values */
353 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
354 platform_base_freq = 100000000;
355 platform_base_drift = -1; /* no drift info */
359 if (platform_base_freq < 40000000) {
360 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
362 platform_base_freq = 75000000;
363 platform_base_drift = -1;
366 proc_ratio.den = 1; /* avoid division by zero */
368 itc_ratio.den = 1; /* avoid division by zero */
370 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
371 if (platform_base_drift != -1)
372 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
376 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
377 printk(KERN_INFO "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
378 "ITC freq=%lu.%03luMHz+/-%ldppm\n", smp_processor_id(),
379 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
380 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000,
383 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
384 local_cpu_data->itc_freq = itc_freq;
385 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
386 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
387 + itc_freq/2)/itc_freq;
389 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
390 itc_interpolator.frequency = local_cpu_data->itc_freq;
391 itc_interpolator.drift = itc_drift;
392 register_time_interpolator(&itc_interpolator);
395 /* Setup the CPU local timer tick */
396 ia64_cpu_local_tick();
399 static struct irqaction timer_irqaction = {
400 .handler = timer_interrupt,
401 .flags = SA_INTERRUPT,
408 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
409 efi_gettimeofday(&xtime);
413 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
414 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
416 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);