2 * linux/arch/parisc/kernel/time.c
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
13 #include <linux/config.h>
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/param.h>
19 #include <linux/string.h>
21 #include <linux/interrupt.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/smp.h>
25 #include <linux/profile.h>
27 #include <asm/uaccess.h>
30 #include <asm/param.h>
34 #include <linux/timex.h>
36 u64 jiffies_64 = INITIAL_JIFFIES;
38 EXPORT_SYMBOL(jiffies_64);
40 /* xtime and wall_jiffies keep wall-clock time */
41 extern unsigned long wall_jiffies;
43 static long clocktick; /* timer cycles per tick */
47 extern void smp_do_timer(struct pt_regs *regs);
50 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
55 int cpu = smp_processor_id();
57 profile_tick(CPU_PROFILING, regs);
60 /* initialize next_tick to time at last clocktick */
61 next_tick = cpu_data[cpu].it_value;
63 /* since time passes between the interrupt and the mfctl()
64 * above, it is never true that last_tick + clocktick == now. If we
65 * never miss a clocktick, we could set next_tick = last_tick + clocktick
66 * but maybe we'll miss ticks, hence the loop.
68 * Variables are *signed*.
72 while((next_tick - now) < halftick) {
73 next_tick += clocktick;
77 cpu_data[cpu].it_value = next_tick;
84 write_seqlock(&xtime_lock);
86 write_sequnlock(&xtime_lock);
90 #ifdef CONFIG_CHASSIS_LCD_LED
91 /* Only schedule the led tasklet on cpu 0, and only if it
94 if (cpu == 0 && !atomic_read(&led_tasklet.count))
95 tasklet_schedule(&led_tasklet);
98 /* check soft power switch status */
99 if (cpu == 0 && !atomic_read(&power_tasklet.count))
100 tasklet_schedule(&power_tasklet);
105 /*** converted from ia64 ***/
107 * Return the number of micro-seconds that elapsed since the last
108 * update to wall time (aka xtime aka wall_jiffies). The xtime_lock
109 * must be at least read-locked when calling this routine.
111 static inline unsigned long
116 * FIXME: This won't work on smp because jiffies are updated by cpu 0.
117 * Once parisc-linux learns the cr16 difference between processors,
118 * this could be made to work.
123 /* it_value is the intended time of the next tick */
124 last_tick = cpu_data[smp_processor_id()].it_value;
126 /* Subtract one tick and account for possible difference between
127 * when we expected the tick and when it actually arrived.
130 last_tick -= clocktick * (jiffies - wall_jiffies + 1);
131 elapsed_cycles = mfctl(16) - last_tick;
133 /* the precision of this math could be improved */
134 return elapsed_cycles / (PAGE0->mem_10msec / 10000);
141 do_gettimeofday (struct timeval *tv)
143 unsigned long flags, seq, usec, sec;
146 seq = read_seqbegin_irqsave(&xtime_lock, flags);
147 usec = gettimeoffset();
149 usec += (xtime.tv_nsec / 1000);
150 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
152 while (usec >= 1000000) {
161 EXPORT_SYMBOL(do_gettimeofday);
164 do_settimeofday (struct timespec *tv)
166 time_t wtm_sec, sec = tv->tv_sec;
167 long wtm_nsec, nsec = tv->tv_nsec;
169 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
172 write_seqlock_irq(&xtime_lock);
175 * This is revolting. We need to set "xtime"
176 * correctly. However, the value in this location is
177 * the value at the most recent update of wall time.
178 * Discover what correction gettimeofday would have
179 * done, and then undo it!
181 nsec -= gettimeoffset() * 1000;
183 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
184 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
186 set_normalized_timespec(&xtime, sec, nsec);
187 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
189 time_adjust = 0; /* stop active adjtime() */
190 time_status |= STA_UNSYNC;
191 time_maxerror = NTP_PHASE_LIMIT;
192 time_esterror = NTP_PHASE_LIMIT;
194 write_sequnlock_irq(&xtime_lock);
198 EXPORT_SYMBOL(do_settimeofday);
201 * XXX: We can do better than this.
202 * Returns nanoseconds
205 unsigned long long sched_clock(void)
207 return (unsigned long long)jiffies * (1000000000 / HZ);
211 void __init time_init(void)
213 unsigned long next_tick;
214 static struct pdc_tod tod_data;
216 clocktick = (100 * PAGE0->mem_10msec) / HZ;
217 halftick = clocktick / 2;
219 /* Setup clock interrupt timing */
221 next_tick = mfctl(16);
222 next_tick += clocktick;
223 cpu_data[smp_processor_id()].it_value = next_tick;
225 /* kick off Itimer (CR16) */
226 mtctl(next_tick, 16);
228 if(pdc_tod_read(&tod_data) == 0) {
229 write_seqlock_irq(&xtime_lock);
230 xtime.tv_sec = tod_data.tod_sec;
231 xtime.tv_nsec = tod_data.tod_usec * 1000;
232 set_normalized_timespec(&wall_to_monotonic,
233 -xtime.tv_sec, -xtime.tv_nsec);
234 write_sequnlock_irq(&xtime_lock);
236 printk(KERN_ERR "Error reading tod clock\n");