2 * arch/s390/kernel/time.c
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10 * Derived from "arch/i386/kernel/time.c"
11 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
14 #include <linux/errno.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/param.h>
19 #include <linux/string.h>
21 #include <linux/interrupt.h>
22 #include <linux/time.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/smp.h>
26 #include <linux/types.h>
27 #include <linux/profile.h>
28 #include <linux/timex.h>
29 #include <linux/config.h>
31 #include <asm/uaccess.h>
32 #include <asm/delay.h>
33 #include <asm/s390_ext.h>
34 #include <asm/div64.h>
36 #ifdef CONFIG_VIRT_TIMER
37 #include <asm/timer.h>
40 /* change this if you have some constant time drift */
41 #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
42 #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
45 * Create a small time difference between the timer interrupts
46 * on the different cpus to avoid lock contention.
48 #define CPU_DEVIATION (smp_processor_id() << 12)
50 #define TICK_SIZE tick
52 u64 jiffies_64 = INITIAL_JIFFIES;
54 EXPORT_SYMBOL(jiffies_64);
56 static ext_int_info_t ext_int_info_cc;
57 static u64 init_timer_cc;
58 static u64 jiffies_timer_cc;
61 extern unsigned long wall_jiffies;
63 #ifdef CONFIG_VIRT_TIMER
64 #define VTIMER_MAGIC (0x4b87ad6e + 1)
65 static ext_int_info_t ext_int_info_timer;
66 DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
70 * Scheduler clock - returns current time in nanosec units.
72 unsigned long long sched_clock(void)
74 return ((get_clock() - jiffies_timer_cc) * 1000) >> 12;
77 void tod_to_timeval(__u64 todval, struct timespec *xtime)
79 unsigned long long sec;
84 todval -= (sec * 1000000) << 12;
85 xtime->tv_nsec = ((todval * 1000) >> 12);
88 static inline unsigned long do_gettimeoffset(void)
92 now = (get_clock() - jiffies_timer_cc) >> 12;
93 /* We require the offset from the latest update of xtime */
94 now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
95 return (unsigned long) now;
99 * This version of gettimeofday has microsecond resolution.
101 void do_gettimeofday(struct timeval *tv)
105 unsigned long usec, sec;
108 seq = read_seqbegin_irqsave(&xtime_lock, flags);
111 usec = xtime.tv_nsec / 1000 + do_gettimeoffset();
112 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
114 while (usec >= 1000000) {
123 EXPORT_SYMBOL(do_gettimeofday);
125 int do_settimeofday(struct timespec *tv)
127 time_t wtm_sec, sec = tv->tv_sec;
128 long wtm_nsec, nsec = tv->tv_nsec;
130 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
133 write_seqlock_irq(&xtime_lock);
134 /* This is revolting. We need to set the xtime.tv_nsec
135 * correctly. However, the value in this location is
136 * is value at the last tick.
137 * Discover what correction gettimeofday
138 * would have done, and then undo it!
140 nsec -= do_gettimeoffset() * 1000;
142 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
143 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
145 set_normalized_timespec(&xtime, sec, nsec);
146 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
148 time_adjust = 0; /* stop active adjtime() */
149 time_status |= STA_UNSYNC;
150 time_maxerror = NTP_PHASE_LIMIT;
151 time_esterror = NTP_PHASE_LIMIT;
152 write_sequnlock_irq(&xtime_lock);
157 EXPORT_SYMBOL(do_settimeofday);
159 #ifndef CONFIG_ARCH_S390X
162 __calculate_ticks(__u64 elapsed)
166 rp.pair = elapsed >> 1;
167 asm ("dr %0,%1" : "+d" (rp) : "d" (CLK_TICKS_PER_JIFFY >> 1));
168 return rp.subreg.odd;
171 #else /* CONFIG_ARCH_S390X */
174 __calculate_ticks(__u64 elapsed)
176 return elapsed / CLK_TICKS_PER_JIFFY;
179 #endif /* CONFIG_ARCH_S390X */
182 #ifdef CONFIG_PROFILING
183 extern char _stext, _etext;
186 * The profiling function is SMP safe. (nothing can mess
187 * around with "current", and the profiling counters are
188 * updated with atomic operations). This is especially
189 * useful with a profiling multiplier != 1
191 static inline void s390_do_profile(struct pt_regs * regs)
194 extern cpumask_t prof_cpu_mask;
204 eip = instruction_pointer(regs);
207 * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
208 * (default is all CPUs.)
210 if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
213 eip -= (unsigned long) &_stext;
216 * Don't ignore out-of-bounds EIP values silently,
217 * put them into the last histogram slot, so if
218 * present, they will show up as a sharp peak.
220 if (eip > prof_len-1)
222 atomic_inc((atomic_t *)&prof_buffer[eip]);
225 #define s390_do_profile(regs) do { ; } while(0)
226 #endif /* CONFIG_PROFILING */
230 * timer_interrupt() needs to keep up the real-time clock,
231 * as well as call the "do_timer()" routine every clocktick
233 void account_ticks(struct pt_regs *regs)
238 /* Calculate how many ticks have passed. */
239 tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
240 if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
241 ticks = __calculate_ticks(tmp) + 1;
242 S390_lowcore.jiffy_timer +=
243 CLK_TICKS_PER_JIFFY * (__u64) ticks;
244 } else if (tmp >= CLK_TICKS_PER_JIFFY) {
246 S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
249 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
252 /* set clock comparator for next tick */
253 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
254 asm volatile ("SCKC %0" : : "m" (tmp));
258 * Do not rely on the boot cpu to do the calls to do_timer.
259 * Spread it over all cpus instead.
261 write_seqlock(&xtime_lock);
262 if (S390_lowcore.jiffy_timer > xtime_cc) {
265 tmp = S390_lowcore.jiffy_timer - xtime_cc;
266 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
267 xticks = __calculate_ticks(tmp);
268 xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
271 xtime_cc += CLK_TICKS_PER_JIFFY;
276 write_sequnlock(&xtime_lock);
278 update_process_times(user_mode(regs));
283 s390_do_profile(regs);
286 #ifdef CONFIG_VIRT_TIMER
287 void start_cpu_timer(void)
289 struct vtimer_queue *vt_list;
291 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
292 set_vtimer(vt_list->idle);
295 int stop_cpu_timer(void)
298 struct vtimer_queue *vt_list;
300 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
303 if (list_empty(&vt_list->list)) {
304 vt_list->idle = VTIMER_MAX_SLICE;
309 asm volatile ("STPT %0" : "=m" (done));
312 * If done is negative we do not stop the CPU timer
313 * because we will get instantly an interrupt that
314 * will start the CPU timer again.
319 vt_list->offset += vt_list->to_expire - done;
321 /* save the actual expire value */
322 vt_list->idle = done;
325 * We cannot halt the CPU timer, we just write a value that
326 * nearly never expires (only after 71 years) and re-write
327 * the stored expire value if we continue the timer
330 set_vtimer(VTIMER_MAX_SLICE);
334 void set_vtimer(__u64 expires)
336 asm volatile ("SPT %0" : : "m" (expires));
338 /* store expire time for this CPU timer */
339 per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
343 * Sorted add to a list. List is linear searched until first bigger
346 void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
348 struct vtimer_list *event;
350 list_for_each_entry(event, head, entry) {
351 if (event->expires > timer->expires) {
352 list_add_tail(&timer->entry, &event->entry);
356 list_add_tail(&timer->entry, head);
360 * Do the callback functions of expired vtimer events.
361 * Called from within the interrupt handler.
363 static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
365 struct vtimer_queue *vt_list;
366 struct vtimer_list *event, *tmp;
367 void (*fn)(unsigned long, struct pt_regs*);
370 if (list_empty(cb_list))
373 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
375 list_for_each_entry_safe(event, tmp, cb_list, entry) {
376 fn = event->function;
380 if (!event->interval)
381 /* delete one shot timer */
382 list_del_init(&event->entry);
384 /* move interval timer back to list */
385 spin_lock(&vt_list->lock);
386 list_del_init(&event->entry);
387 list_add_sorted(event, &vt_list->list);
388 spin_unlock(&vt_list->lock);
394 * Handler for the virtual CPU timer.
396 static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
400 struct vtimer_queue *vt_list;
401 struct vtimer_list *event, *tmp;
402 struct list_head *ptr;
403 /* the callback queue */
404 struct list_head cb_list;
406 INIT_LIST_HEAD(&cb_list);
407 cpu = smp_processor_id();
408 vt_list = &per_cpu(virt_cpu_timer, cpu);
410 /* walk timer list, fire all expired events */
411 spin_lock(&vt_list->lock);
413 if (vt_list->to_expire < VTIMER_MAX_SLICE)
414 vt_list->offset += vt_list->to_expire;
416 list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
417 if (event->expires > vt_list->offset)
418 /* found first unexpired event, leave */
421 /* re-charge interval timer, we have to add the offset */
423 event->expires = event->interval + vt_list->offset;
425 /* move expired timer to the callback queue */
426 list_move_tail(&event->entry, &cb_list);
428 spin_unlock(&vt_list->lock);
429 do_callbacks(&cb_list, regs);
431 /* next event is first in list */
432 spin_lock(&vt_list->lock);
433 if (!list_empty(&vt_list->list)) {
434 ptr = vt_list->list.next;
435 event = list_entry(ptr, struct vtimer_list, entry);
436 next = event->expires - vt_list->offset;
438 /* add the expired time from this interrupt handler
439 * and the callback functions
441 asm volatile ("STPT %0" : "=m" (delta));
442 delta = 0xffffffffffffffffLL - delta + 1;
443 vt_list->offset += delta;
447 next = VTIMER_MAX_SLICE;
449 spin_unlock(&vt_list->lock);
454 #ifdef CONFIG_NO_IDLE_HZ
456 #ifdef CONFIG_NO_IDLE_HZ_INIT
457 int sysctl_hz_timer = 0;
459 int sysctl_hz_timer = 1;
463 * Start the HZ tick on the current CPU.
464 * Only cpu_idle may call this function.
466 void start_hz_timer(struct pt_regs *regs)
471 if (!cpu_isset(smp_processor_id(), idle_cpu_mask))
474 /* Calculate how many ticks have passed */
475 asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc");
476 tmp = tmp + CLK_TICKS_PER_JIFFY - S390_lowcore.jiffy_timer;
477 ticks = __calculate_ticks(tmp);
478 S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks;
480 /* Set the clock comparator to the next tick. */
481 tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
482 asm volatile ("SCKC %0" : : "m" (tmp));
484 /* Charge the ticks. */
488 * Do not rely on the boot cpu to do the calls to do_timer.
489 * Spread it over all cpus instead.
491 write_seqlock(&xtime_lock);
492 if (S390_lowcore.jiffy_timer > xtime_cc) {
495 tmp = S390_lowcore.jiffy_timer - xtime_cc;
496 if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
497 xticks = __calculate_ticks(tmp);
498 xtime_cc += (__u64) xticks*CLK_TICKS_PER_JIFFY;
501 xtime_cc += CLK_TICKS_PER_JIFFY;
506 write_sequnlock(&xtime_lock);
508 update_process_times(user_mode(regs));
514 cpu_clear(smp_processor_id(), idle_cpu_mask);
518 * Stop the HZ tick on the current CPU.
519 * Only cpu_idle may call this function.
521 int stop_hz_timer(void)
525 if (sysctl_hz_timer != 0)
529 * Leave the clock comparator set up for the next timer
530 * tick if either rcu or a softirq is pending.
532 if (rcu_pending(smp_processor_id()) || local_softirq_pending())
536 * This cpu is going really idle. Set up the clock comparator
537 * for the next event.
539 cpu_set(smp_processor_id(), idle_cpu_mask);
540 timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
541 timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
542 asm volatile ("SCKC %0" : : "m" (timer));
548 #if defined(CONFIG_VIRT_TIMER) || defined(CONFIG_NO_IDLE_HZ)
550 void do_monitor_call(struct pt_regs *regs, long interruption_code)
552 /* disable monitor call class 0 */
553 __ctl_clear_bit(8, 15);
555 #ifdef CONFIG_VIRT_TIMER
558 #ifdef CONFIG_NO_IDLE_HZ
559 start_hz_timer(regs);
564 * called from cpu_idle to stop any timers
565 * returns 1 if CPU should not be stopped
567 int stop_timers(void)
569 #ifdef CONFIG_VIRT_TIMER
570 if (stop_cpu_timer())
574 #ifdef CONFIG_NO_IDLE_HZ
579 /* enable monitor call class 0 */
580 __ctl_set_bit(8, 15);
588 * Start the clock comparator and the virtual CPU timer
589 * on the current CPU.
591 void init_cpu_timer(void)
595 #ifdef CONFIG_VIRT_TIMER
596 struct vtimer_queue *vt_list;
599 timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
600 S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
601 timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
602 asm volatile ("SCKC %0" : : "m" (timer));
603 /* allow clock comparator timer interrupt */
604 __ctl_store(cr0, 0, 0);
606 __ctl_load(cr0, 0, 0);
608 #ifdef CONFIG_VIRT_TIMER
609 /* kick the virtual timer */
610 timer = VTIMER_MAX_SLICE;
611 asm volatile ("SPT %0" : : "m" (timer));
612 __ctl_store(cr0, 0, 0);
614 __ctl_load(cr0, 0, 0);
616 vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
617 INIT_LIST_HEAD(&vt_list->list);
618 spin_lock_init(&vt_list->lock);
619 vt_list->to_expire = 0;
626 * Initialize the TOD clock and the CPU timer of
629 void __init time_init(void)
634 /* kick the TOD clock */
635 asm volatile ("STCK 0(%1)\n\t"
637 "SRL %0,28" : "=r" (cc) : "a" (&init_timer_cc)
640 case 0: /* clock in set state: all is fine */
642 case 1: /* clock in non-set state: FIXME */
643 printk("time_init: TOD clock in non-set state\n");
645 case 2: /* clock in error state: FIXME */
646 printk("time_init: TOD clock in error state\n");
648 case 3: /* clock in stopped or not-operational state: FIXME */
649 printk("time_init: TOD clock stopped/non-operational\n");
652 jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
655 xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
656 set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
657 (0x3c26700LL*1000000*4096);
658 tod_to_timeval(set_time_cc, &xtime);
659 set_normalized_timespec(&wall_to_monotonic,
660 -xtime.tv_sec, -xtime.tv_nsec);
662 /* request the clock comparator external interrupt */
663 if (register_early_external_interrupt(0x1004, 0,
664 &ext_int_info_cc) != 0)
665 panic("Couldn't request external interrupt 0x1004");
667 #ifdef CONFIG_VIRT_TIMER
668 /* request the cpu timer external interrupt */
669 if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
670 &ext_int_info_timer) != 0)
671 panic("Couldn't request external interrupt 0x1005");
677 #ifdef CONFIG_VIRT_TIMER
678 void init_virt_timer(struct vtimer_list *timer)
680 timer->magic = VTIMER_MAGIC;
681 timer->function = NULL;
682 INIT_LIST_HEAD(&timer->entry);
683 spin_lock_init(&timer->lock);
686 static inline int check_vtimer(struct vtimer_list *timer)
688 if (timer->magic != VTIMER_MAGIC)
693 static inline int vtimer_pending(struct vtimer_list *timer)
695 return (!list_empty(&timer->entry));
699 * this function should only run on the specified CPU
701 static void internal_add_vtimer(struct vtimer_list *timer)
705 struct vtimer_list *event;
706 struct vtimer_queue *vt_list;
708 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
709 spin_lock_irqsave(&vt_list->lock, flags);
711 if (timer->cpu != smp_processor_id())
712 printk("internal_add_vtimer: BUG, running on wrong CPU");
714 /* if list is empty we only have to set the timer */
715 if (list_empty(&vt_list->list)) {
716 /* reset the offset, this may happen if the last timer was
717 * just deleted by mod_virt_timer and the interrupt
718 * didn't happen until here
725 asm volatile ("STPT %0" : "=m" (done));
727 /* calculate completed work */
728 done = vt_list->to_expire - done + vt_list->offset;
731 list_for_each_entry(event, &vt_list->list, entry)
732 event->expires -= done;
735 list_add_sorted(timer, &vt_list->list);
737 /* get first element, which is the next vtimer slice */
738 event = list_entry(vt_list->list.next, struct vtimer_list, entry);
740 set_vtimer(event->expires);
741 spin_unlock_irqrestore(&vt_list->lock, flags);
742 /* release CPU aquired in prepare_vtimer or mod_virt_timer() */
746 static inline int prepare_vtimer(struct vtimer_list *timer)
748 if (check_vtimer(timer) || !timer->function) {
749 printk("add_virt_timer: uninitialized timer\n");
753 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
754 printk("add_virt_timer: invalid timer expire value!\n");
758 if (vtimer_pending(timer)) {
759 printk("add_virt_timer: timer pending\n");
763 timer->cpu = get_cpu();
768 * add_virt_timer - add an oneshot virtual CPU timer
770 void add_virt_timer(void *new)
772 struct vtimer_list *timer;
774 timer = (struct vtimer_list *)new;
776 if (prepare_vtimer(timer) < 0)
780 internal_add_vtimer(timer);
784 * add_virt_timer_int - add an interval virtual CPU timer
786 void add_virt_timer_periodic(void *new)
788 struct vtimer_list *timer;
790 timer = (struct vtimer_list *)new;
792 if (prepare_vtimer(timer) < 0)
795 timer->interval = timer->expires;
796 internal_add_vtimer(timer);
800 * If we change a pending timer the function must be called on the CPU
801 * where the timer is running on, e.g. by smp_call_function_on()
803 * The original mod_timer adds the timer if it is not pending. For compatibility
804 * we do the same. The timer will be added on the current CPU as a oneshot timer.
806 * returns whether it has modified a pending timer (1) or not (0)
808 int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
810 struct vtimer_queue *vt_list;
814 if (check_vtimer(timer) || !timer->function) {
815 printk("mod_virt_timer: uninitialized timer\n");
819 if (!expires || expires > VTIMER_MAX_SLICE) {
820 printk("mod_virt_timer: invalid expire range\n");
825 * This is a common optimization triggered by the
826 * networking code - if the timer is re-modified
827 * to be the same thing then just return:
829 if (timer->expires == expires && vtimer_pending(timer))
833 vt_list = &per_cpu(virt_cpu_timer, cpu);
835 /* disable interrupts before test if timer is pending */
836 spin_lock_irqsave(&vt_list->lock, flags);
838 /* if timer isn't pending add it on the current CPU */
839 if (!vtimer_pending(timer)) {
840 spin_unlock_irqrestore(&vt_list->lock, flags);
841 /* we do not activate an interval timer with mod_virt_timer */
843 timer->expires = expires;
845 internal_add_vtimer(timer);
849 /* check if we run on the right CPU */
850 if (timer->cpu != cpu) {
851 printk("mod_virt_timer: running on wrong CPU, check your code\n");
852 spin_unlock_irqrestore(&vt_list->lock, flags);
857 list_del_init(&timer->entry);
858 timer->expires = expires;
860 /* also change the interval if we have an interval timer */
862 timer->interval = expires;
864 /* the timer can't expire anymore so we can release the lock */
865 spin_unlock_irqrestore(&vt_list->lock, flags);
866 internal_add_vtimer(timer);
871 * delete a virtual timer
873 * returns whether the deleted timer was pending (1) or not (0)
875 int del_virt_timer(struct vtimer_list *timer)
878 struct vtimer_queue *vt_list;
880 if (check_vtimer(timer)) {
881 printk("del_virt_timer: timer not initialized\n");
885 /* check if timer is pending */
886 if (!vtimer_pending(timer))
889 if (!cpu_online(timer->cpu)) {
890 printk("del_virt_timer: CPU not present!\n");
894 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
895 spin_lock_irqsave(&vt_list->lock, flags);
897 /* we don't interrupt a running timer, just let it expire! */
898 list_del_init(&timer->entry);
900 /* last timer removed */
901 if (list_empty(&vt_list->list)) {
902 vt_list->to_expire = 0;
906 spin_unlock_irqrestore(&vt_list->lock, flags);