4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/vs_base.h>
38 #include <linux/vs_cvirt.h>
39 #include <linux/vserver/sched.h>
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
47 #ifdef CONFIG_TIME_INTERPOLATION
48 static void time_interpolator_update(long delta_nsec);
50 #define time_interpolator_update(x)
53 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
55 EXPORT_SYMBOL(jiffies_64);
58 * per-CPU timer vector definitions:
60 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62 #define TVN_SIZE (1 << TVN_BITS)
63 #define TVR_SIZE (1 << TVR_BITS)
64 #define TVN_MASK (TVN_SIZE - 1)
65 #define TVR_MASK (TVR_SIZE - 1)
67 typedef struct tvec_s {
68 struct list_head vec[TVN_SIZE];
71 typedef struct tvec_root_s {
72 struct list_head vec[TVR_SIZE];
75 struct tvec_t_base_s {
77 struct timer_list *running_timer;
78 unsigned long timer_jiffies;
84 } ____cacheline_aligned_in_smp;
86 typedef struct tvec_t_base_s tvec_base_t;
88 tvec_base_t boot_tvec_bases;
89 EXPORT_SYMBOL(boot_tvec_bases);
90 static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
92 static inline void set_running_timer(tvec_base_t *base,
93 struct timer_list *timer)
96 base->running_timer = timer;
100 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
102 unsigned long expires = timer->expires;
103 unsigned long idx = expires - base->timer_jiffies;
104 struct list_head *vec;
106 if (idx < TVR_SIZE) {
107 int i = expires & TVR_MASK;
108 vec = base->tv1.vec + i;
109 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
110 int i = (expires >> TVR_BITS) & TVN_MASK;
111 vec = base->tv2.vec + i;
112 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
113 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
114 vec = base->tv3.vec + i;
115 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
116 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
117 vec = base->tv4.vec + i;
118 } else if ((signed long) idx < 0) {
120 * Can happen if you add a timer with expires == jiffies,
121 * or you set a timer to go off in the past
123 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
126 /* If the timeout is larger than 0xffffffff on 64-bit
127 * architectures then we use the maximum timeout:
129 if (idx > 0xffffffffUL) {
131 expires = idx + base->timer_jiffies;
133 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
134 vec = base->tv5.vec + i;
139 list_add_tail(&timer->entry, vec);
143 * init_timer - initialize a timer.
144 * @timer: the timer to be initialized
146 * init_timer() must be done to a timer prior calling *any* of the
147 * other timer functions.
149 void fastcall init_timer(struct timer_list *timer)
151 timer->entry.next = NULL;
152 timer->base = __raw_get_cpu_var(tvec_bases);
154 EXPORT_SYMBOL(init_timer);
156 static inline void detach_timer(struct timer_list *timer,
159 struct list_head *entry = &timer->entry;
161 __list_del(entry->prev, entry->next);
164 entry->prev = LIST_POISON2;
168 * We are using hashed locking: holding per_cpu(tvec_bases).lock
169 * means that all timers which are tied to this base via timer->base are
170 * locked, and the base itself is locked too.
172 * So __run_timers/migrate_timers can safely modify all timers which could
173 * be found on ->tvX lists.
175 * When the timer's base is locked, and the timer removed from list, it is
176 * possible to set timer->base = NULL and drop the lock: the timer remains
179 static tvec_base_t *lock_timer_base(struct timer_list *timer,
180 unsigned long *flags)
186 if (likely(base != NULL)) {
187 spin_lock_irqsave(&base->lock, *flags);
188 if (likely(base == timer->base))
190 /* The timer has migrated to another CPU */
191 spin_unlock_irqrestore(&base->lock, *flags);
197 int __mod_timer(struct timer_list *timer, unsigned long expires)
199 tvec_base_t *base, *new_base;
203 BUG_ON(!timer->function);
205 base = lock_timer_base(timer, &flags);
207 if (timer_pending(timer)) {
208 detach_timer(timer, 0);
212 new_base = __get_cpu_var(tvec_bases);
214 if (base != new_base) {
216 * We are trying to schedule the timer on the local CPU.
217 * However we can't change timer's base while it is running,
218 * otherwise del_timer_sync() can't detect that the timer's
219 * handler yet has not finished. This also guarantees that
220 * the timer is serialized wrt itself.
222 if (likely(base->running_timer != timer)) {
223 /* See the comment in lock_timer_base() */
225 spin_unlock(&base->lock);
227 spin_lock(&base->lock);
232 timer->expires = expires;
233 internal_add_timer(base, timer);
234 spin_unlock_irqrestore(&base->lock, flags);
239 EXPORT_SYMBOL(__mod_timer);
242 * add_timer_on - start a timer on a particular CPU
243 * @timer: the timer to be added
244 * @cpu: the CPU to start it on
246 * This is not very scalable on SMP. Double adds are not possible.
248 void add_timer_on(struct timer_list *timer, int cpu)
250 tvec_base_t *base = per_cpu(tvec_bases, cpu);
253 BUG_ON(timer_pending(timer) || !timer->function);
254 spin_lock_irqsave(&base->lock, flags);
256 internal_add_timer(base, timer);
257 spin_unlock_irqrestore(&base->lock, flags);
262 * mod_timer - modify a timer's timeout
263 * @timer: the timer to be modified
265 * mod_timer is a more efficient way to update the expire field of an
266 * active timer (if the timer is inactive it will be activated)
268 * mod_timer(timer, expires) is equivalent to:
270 * del_timer(timer); timer->expires = expires; add_timer(timer);
272 * Note that if there are multiple unserialized concurrent users of the
273 * same timer, then mod_timer() is the only safe way to modify the timeout,
274 * since add_timer() cannot modify an already running timer.
276 * The function returns whether it has modified a pending timer or not.
277 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
278 * active timer returns 1.)
280 int mod_timer(struct timer_list *timer, unsigned long expires)
282 BUG_ON(!timer->function);
285 * This is a common optimization triggered by the
286 * networking code - if the timer is re-modified
287 * to be the same thing then just return:
289 if (timer->expires == expires && timer_pending(timer))
292 return __mod_timer(timer, expires);
295 EXPORT_SYMBOL(mod_timer);
298 * del_timer - deactive a timer.
299 * @timer: the timer to be deactivated
301 * del_timer() deactivates a timer - this works on both active and inactive
304 * The function returns whether it has deactivated a pending timer or not.
305 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
306 * active timer returns 1.)
308 int del_timer(struct timer_list *timer)
314 if (timer_pending(timer)) {
315 base = lock_timer_base(timer, &flags);
316 if (timer_pending(timer)) {
317 detach_timer(timer, 1);
320 spin_unlock_irqrestore(&base->lock, flags);
326 EXPORT_SYMBOL(del_timer);
330 * This function tries to deactivate a timer. Upon successful (ret >= 0)
331 * exit the timer is not queued and the handler is not running on any CPU.
333 * It must not be called from interrupt contexts.
335 int try_to_del_timer_sync(struct timer_list *timer)
341 base = lock_timer_base(timer, &flags);
343 if (base->running_timer == timer)
347 if (timer_pending(timer)) {
348 detach_timer(timer, 1);
352 spin_unlock_irqrestore(&base->lock, flags);
358 * del_timer_sync - deactivate a timer and wait for the handler to finish.
359 * @timer: the timer to be deactivated
361 * This function only differs from del_timer() on SMP: besides deactivating
362 * the timer it also makes sure the handler has finished executing on other
365 * Synchronization rules: callers must prevent restarting of the timer,
366 * otherwise this function is meaningless. It must not be called from
367 * interrupt contexts. The caller must not hold locks which would prevent
368 * completion of the timer's handler. The timer's handler must not call
369 * add_timer_on(). Upon exit the timer is not queued and the handler is
370 * not running on any CPU.
372 * The function returns whether it has deactivated a pending timer or not.
374 int del_timer_sync(struct timer_list *timer)
377 int ret = try_to_del_timer_sync(timer);
384 EXPORT_SYMBOL(del_timer_sync);
387 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
389 /* cascade all the timers from tv up one level */
390 struct timer_list *timer, *tmp;
391 struct list_head tv_list;
393 list_replace_init(tv->vec + index, &tv_list);
396 * We are removing _all_ timers from the list, so we
397 * don't have to detach them individually.
399 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
400 BUG_ON(timer->base != base);
401 internal_add_timer(base, timer);
408 * __run_timers - run all expired timers (if any) on this CPU.
409 * @base: the timer vector to be processed.
411 * This function cascades all vectors and executes all expired timer
414 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
416 static inline void __run_timers(tvec_base_t *base)
418 struct timer_list *timer;
420 spin_lock_irq(&base->lock);
421 while (time_after_eq(jiffies, base->timer_jiffies)) {
422 struct list_head work_list;
423 struct list_head *head = &work_list;
424 int index = base->timer_jiffies & TVR_MASK;
430 (!cascade(base, &base->tv2, INDEX(0))) &&
431 (!cascade(base, &base->tv3, INDEX(1))) &&
432 !cascade(base, &base->tv4, INDEX(2)))
433 cascade(base, &base->tv5, INDEX(3));
434 ++base->timer_jiffies;
435 list_replace_init(base->tv1.vec + index, &work_list);
436 while (!list_empty(head)) {
437 void (*fn)(unsigned long);
440 timer = list_entry(head->next,struct timer_list,entry);
441 fn = timer->function;
444 set_running_timer(base, timer);
445 detach_timer(timer, 1);
446 spin_unlock_irq(&base->lock);
448 int preempt_count = preempt_count();
450 if (preempt_count != preempt_count()) {
451 printk(KERN_WARNING "huh, entered %p "
452 "with preempt_count %08x, exited"
459 spin_lock_irq(&base->lock);
462 set_running_timer(base, NULL);
463 spin_unlock_irq(&base->lock);
466 #ifdef CONFIG_NO_IDLE_HZ
468 * Find out when the next timer event is due to happen. This
469 * is used on S/390 to stop all activity when a cpus is idle.
470 * This functions needs to be called disabled.
472 unsigned long next_timer_interrupt(void)
475 struct list_head *list;
476 struct timer_list *nte;
477 unsigned long expires;
478 unsigned long hr_expires = MAX_JIFFY_OFFSET;
483 hr_delta = hrtimer_get_next_event();
484 if (hr_delta.tv64 != KTIME_MAX) {
485 struct timespec tsdelta;
486 tsdelta = ktime_to_timespec(hr_delta);
487 hr_expires = timespec_to_jiffies(&tsdelta);
489 return hr_expires + jiffies;
491 hr_expires += jiffies;
493 base = __get_cpu_var(tvec_bases);
494 spin_lock(&base->lock);
495 expires = base->timer_jiffies + (LONG_MAX >> 1);
498 /* Look for timer events in tv1. */
499 j = base->timer_jiffies & TVR_MASK;
501 list_for_each_entry(nte, base->tv1.vec + j, entry) {
502 expires = nte->expires;
503 if (j < (base->timer_jiffies & TVR_MASK))
504 list = base->tv2.vec + (INDEX(0));
507 j = (j + 1) & TVR_MASK;
508 } while (j != (base->timer_jiffies & TVR_MASK));
511 varray[0] = &base->tv2;
512 varray[1] = &base->tv3;
513 varray[2] = &base->tv4;
514 varray[3] = &base->tv5;
515 for (i = 0; i < 4; i++) {
518 if (list_empty(varray[i]->vec + j)) {
519 j = (j + 1) & TVN_MASK;
522 list_for_each_entry(nte, varray[i]->vec + j, entry)
523 if (time_before(nte->expires, expires))
524 expires = nte->expires;
525 if (j < (INDEX(i)) && i < 3)
526 list = varray[i + 1]->vec + (INDEX(i + 1));
528 } while (j != (INDEX(i)));
533 * The search wrapped. We need to look at the next list
534 * from next tv element that would cascade into tv element
535 * where we found the timer element.
537 list_for_each_entry(nte, list, entry) {
538 if (time_before(nte->expires, expires))
539 expires = nte->expires;
542 spin_unlock(&base->lock);
545 * It can happen that other CPUs service timer IRQs and increment
546 * jiffies, but we have not yet got a local timer tick to process
547 * the timer wheels. In that case, the expiry time can be before
548 * jiffies, but since the high-resolution timer here is relative to
549 * jiffies, the default expression when high-resolution timers are
552 * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
554 * would falsely evaluate to true. If that is the case, just
555 * return jiffies so that we can immediately fire the local timer
557 if (time_before(expires, jiffies))
560 if (time_before(hr_expires, expires))
567 /******************************************************************/
570 * Timekeeping variables
572 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
573 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
577 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
578 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
579 * at zero at system boot time, so wall_to_monotonic will be negative,
580 * however, we will ALWAYS keep the tv_nsec part positive so we can use
581 * the usual normalization.
583 struct timespec xtime __attribute__ ((aligned (16)));
584 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
586 EXPORT_SYMBOL(xtime);
588 /* Don't completely fail for HZ > 500. */
589 int tickadj = 500/HZ ? : 1; /* microsecs */
593 * phase-lock loop variables
595 /* TIME_ERROR prevents overwriting the CMOS clock */
596 int time_state = TIME_OK; /* clock synchronization status */
597 int time_status = STA_UNSYNC; /* clock status bits */
598 long time_offset; /* time adjustment (us) */
599 long time_constant = 2; /* pll time constant */
600 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
601 long time_precision = 1; /* clock precision (us) */
602 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
603 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
604 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
605 /* frequency offset (scaled ppm)*/
606 static long time_adj; /* tick adjust (scaled 1 / HZ) */
607 long time_reftime; /* time at last adjustment (s) */
609 long time_next_adjust;
612 * this routine handles the overflow of the microsecond field
614 * The tricky bits of code to handle the accurate clock support
615 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
616 * They were originally developed for SUN and DEC kernels.
617 * All the kudos should go to Dave for this stuff.
620 static void second_overflow(void)
624 /* Bump the maxerror field */
625 time_maxerror += time_tolerance >> SHIFT_USEC;
626 if (time_maxerror > NTP_PHASE_LIMIT) {
627 time_maxerror = NTP_PHASE_LIMIT;
628 time_status |= STA_UNSYNC;
632 * Leap second processing. If in leap-insert state at the end of the
633 * day, the system clock is set back one second; if in leap-delete
634 * state, the system clock is set ahead one second. The microtime()
635 * routine or external clock driver will insure that reported time is
636 * always monotonic. The ugly divides should be replaced.
638 switch (time_state) {
640 if (time_status & STA_INS)
641 time_state = TIME_INS;
642 else if (time_status & STA_DEL)
643 time_state = TIME_DEL;
646 if (xtime.tv_sec % 86400 == 0) {
648 wall_to_monotonic.tv_sec++;
650 * The timer interpolator will make time change
651 * gradually instead of an immediate jump by one second
653 time_interpolator_update(-NSEC_PER_SEC);
654 time_state = TIME_OOP;
656 printk(KERN_NOTICE "Clock: inserting leap second "
661 if ((xtime.tv_sec + 1) % 86400 == 0) {
663 wall_to_monotonic.tv_sec--;
665 * Use of time interpolator for a gradual change of
668 time_interpolator_update(NSEC_PER_SEC);
669 time_state = TIME_WAIT;
671 printk(KERN_NOTICE "Clock: deleting leap second "
676 time_state = TIME_WAIT;
679 if (!(time_status & (STA_INS | STA_DEL)))
680 time_state = TIME_OK;
684 * Compute the phase adjustment for the next second. In PLL mode, the
685 * offset is reduced by a fixed factor times the time constant. In FLL
686 * mode the offset is used directly. In either mode, the maximum phase
687 * adjustment for each second is clamped so as to spread the adjustment
688 * over not more than the number of seconds between updates.
691 if (!(time_status & STA_FLL))
692 ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
693 ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
694 ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
695 time_offset -= ltemp;
696 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
699 * Compute the frequency estimate and additional phase adjustment due
700 * to frequency error for the next second.
703 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
707 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
708 * get 128.125; => only 0.125% error (p. 14)
710 time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
714 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
715 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
717 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
721 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
722 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
724 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
729 * Returns how many microseconds we need to add to xtime this tick
730 * in doing an adjustment requested with adjtime.
732 static long adjtime_adjustment(void)
734 long time_adjust_step;
736 time_adjust_step = time_adjust;
737 if (time_adjust_step) {
739 * We are doing an adjtime thing. Prepare time_adjust_step to
740 * be within bounds. Note that a positive time_adjust means we
741 * want the clock to run faster.
743 * Limit the amount of the step to be in the range
744 * -tickadj .. +tickadj
746 time_adjust_step = min(time_adjust_step, (long)tickadj);
747 time_adjust_step = max(time_adjust_step, (long)-tickadj);
749 return time_adjust_step;
752 /* in the NTP reference this is called "hardclock()" */
753 static void update_ntp_one_tick(void)
755 long time_adjust_step;
757 time_adjust_step = adjtime_adjustment();
758 if (time_adjust_step)
759 /* Reduce by this step the amount of time left */
760 time_adjust -= time_adjust_step;
762 /* Changes by adjtime() do not take effect till next tick. */
763 if (time_next_adjust != 0) {
764 time_adjust = time_next_adjust;
765 time_next_adjust = 0;
770 * Return how long ticks are at the moment, that is, how much time
771 * update_wall_time_one_tick will add to xtime next time we call it
772 * (assuming no calls to do_adjtimex in the meantime).
773 * The return value is in fixed-point nanoseconds shifted by the
774 * specified number of bits to the right of the binary point.
775 * This function has no side-effects.
777 u64 current_tick_length(void)
782 /* calculate the finest interval NTP will allow.
783 * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
785 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
786 ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
787 ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
792 /* XXX - all of this timekeeping code should be later moved to time.c */
793 #include <linux/clocksource.h>
794 static struct clocksource *clock; /* pointer to current clocksource */
796 #ifdef CONFIG_GENERIC_TIME
798 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
800 * private function, must hold xtime_lock lock when being
801 * called. Returns the number of nanoseconds since the
802 * last call to update_wall_time() (adjusted by NTP scaling)
804 static inline s64 __get_nsec_offset(void)
806 cycle_t cycle_now, cycle_delta;
809 /* read clocksource: */
810 cycle_now = clocksource_read(clock);
812 /* calculate the delta since the last update_wall_time: */
813 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
815 /* convert to nanoseconds: */
816 ns_offset = cyc2ns(clock, cycle_delta);
822 * __get_realtime_clock_ts - Returns the time of day in a timespec
823 * @ts: pointer to the timespec to be set
825 * Returns the time of day in a timespec. Used by
826 * do_gettimeofday() and get_realtime_clock_ts().
828 static inline void __get_realtime_clock_ts(struct timespec *ts)
834 seq = read_seqbegin(&xtime_lock);
837 nsecs = __get_nsec_offset();
839 } while (read_seqretry(&xtime_lock, seq));
841 timespec_add_ns(ts, nsecs);
845 * getnstimeofday - Returns the time of day in a timespec
846 * @ts: pointer to the timespec to be set
848 * Returns the time of day in a timespec.
850 void getnstimeofday(struct timespec *ts)
852 __get_realtime_clock_ts(ts);
855 EXPORT_SYMBOL(getnstimeofday);
858 * do_gettimeofday - Returns the time of day in a timeval
859 * @tv: pointer to the timeval to be set
861 * NOTE: Users should be converted to using get_realtime_clock_ts()
863 void do_gettimeofday(struct timeval *tv)
867 __get_realtime_clock_ts(&now);
868 tv->tv_sec = now.tv_sec;
869 tv->tv_usec = now.tv_nsec/1000;
872 EXPORT_SYMBOL(do_gettimeofday);
874 * do_settimeofday - Sets the time of day
875 * @tv: pointer to the timespec variable containing the new time
877 * Sets the time of day to the new time and update NTP and notify hrtimers
879 int do_settimeofday(struct timespec *tv)
882 time_t wtm_sec, sec = tv->tv_sec;
883 long wtm_nsec, nsec = tv->tv_nsec;
885 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
888 write_seqlock_irqsave(&xtime_lock, flags);
890 nsec -= __get_nsec_offset();
892 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
893 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
895 set_normalized_timespec(&xtime, sec, nsec);
896 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
901 write_sequnlock_irqrestore(&xtime_lock, flags);
903 /* signal hrtimers about time change */
909 EXPORT_SYMBOL(do_settimeofday);
912 * change_clocksource - Swaps clocksources if a new one is available
914 * Accumulates current time interval and initializes new clocksource
916 static int change_clocksource(void)
918 struct clocksource *new;
921 new = clocksource_get_next();
923 now = clocksource_read(new);
924 nsec = __get_nsec_offset();
925 timespec_add_ns(&xtime, nsec);
928 clock->cycle_last = now;
929 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
932 } else if (clock->update_callback) {
933 return clock->update_callback();
938 #define change_clocksource() (0)
942 * timeofday_is_continuous - check to see if timekeeping is free running
944 int timekeeping_is_continuous(void)
950 seq = read_seqbegin(&xtime_lock);
952 ret = clock->is_continuous;
954 } while (read_seqretry(&xtime_lock, seq));
960 * timekeeping_init - Initializes the clocksource and common timekeeping values
962 void __init timekeeping_init(void)
966 write_seqlock_irqsave(&xtime_lock, flags);
967 clock = clocksource_get_next();
968 clocksource_calculate_interval(clock, tick_nsec);
969 clock->cycle_last = clocksource_read(clock);
971 write_sequnlock_irqrestore(&xtime_lock, flags);
975 static int timekeeping_suspended;
977 * timekeeping_resume - Resumes the generic timekeeping subsystem.
980 * This is for the generic clocksource timekeeping.
981 * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
982 * still managed by arch specific suspend/resume code.
984 static int timekeeping_resume(struct sys_device *dev)
988 write_seqlock_irqsave(&xtime_lock, flags);
989 /* restart the last cycle value */
990 clock->cycle_last = clocksource_read(clock);
992 timekeeping_suspended = 0;
993 write_sequnlock_irqrestore(&xtime_lock, flags);
997 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
1001 write_seqlock_irqsave(&xtime_lock, flags);
1002 timekeeping_suspended = 1;
1003 write_sequnlock_irqrestore(&xtime_lock, flags);
1007 /* sysfs resume/suspend bits for timekeeping */
1008 static struct sysdev_class timekeeping_sysclass = {
1009 .resume = timekeeping_resume,
1010 .suspend = timekeeping_suspend,
1011 set_kset_name("timekeeping"),
1014 static struct sys_device device_timer = {
1016 .cls = &timekeeping_sysclass,
1019 static int __init timekeeping_init_device(void)
1021 int error = sysdev_class_register(&timekeeping_sysclass);
1023 error = sysdev_register(&device_timer);
1027 device_initcall(timekeeping_init_device);
1030 * If the error is already larger, we look ahead even further
1031 * to compensate for late or lost adjustments.
1033 static __always_inline int clocksource_bigadjust(s64 error, s64 *interval, s64 *offset)
1036 u32 look_ahead, adj;
1040 * Use the current error value to determine how much to look ahead.
1041 * The larger the error the slower we adjust for it to avoid problems
1042 * with losing too many ticks, otherwise we would overadjust and
1043 * produce an even larger error. The smaller the adjustment the
1044 * faster we try to adjust for it, as lost ticks can do less harm
1045 * here. This is tuned so that an error of about 1 msec is adusted
1046 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1048 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
1049 error2 = abs(error2);
1050 for (look_ahead = 0; error2 > 0; look_ahead++)
1054 * Now calculate the error in (1 << look_ahead) ticks, but first
1055 * remove the single look ahead already included in the error.
1057 tick_error = current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
1058 tick_error -= clock->xtime_interval >> 1;
1059 error = ((error - tick_error) >> look_ahead) + tick_error;
1061 /* Finally calculate the adjustment shift value. */
1066 *interval = -*interval;
1070 for (adj = 0; error > i; adj++)
1079 * Adjust the multiplier to reduce the error value,
1080 * this is optimized for the most common adjustments of -1,0,1,
1081 * for other values we can do a bit more work.
1083 static void clocksource_adjust(struct clocksource *clock, s64 offset)
1085 s64 error, interval = clock->cycle_interval;
1088 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
1089 if (error > interval) {
1091 if (likely(error <= interval))
1094 adj = clocksource_bigadjust(error, &interval, &offset);
1095 } else if (error < -interval) {
1097 if (likely(error >= -interval)) {
1099 interval = -interval;
1102 adj = clocksource_bigadjust(error, &interval, &offset);
1107 clock->xtime_interval += interval;
1108 clock->xtime_nsec -= offset;
1109 clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
1113 * update_wall_time - Uses the current clocksource to increment the wall time
1115 * Called from the timer interrupt, must hold a write on xtime_lock.
1117 static void update_wall_time(void)
1121 /* Make sure we're fully resumed: */
1122 if (unlikely(timekeeping_suspended))
1125 #ifdef CONFIG_GENERIC_TIME
1126 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
1128 offset = clock->cycle_interval;
1130 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
1132 /* normally this loop will run just once, however in the
1133 * case of lost or late ticks, it will accumulate correctly.
1135 while (offset >= clock->cycle_interval) {
1136 /* accumulate one interval */
1137 clock->xtime_nsec += clock->xtime_interval;
1138 clock->cycle_last += clock->cycle_interval;
1139 offset -= clock->cycle_interval;
1141 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1142 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
1147 /* interpolator bits */
1148 time_interpolator_update(clock->xtime_interval
1150 /* increment the NTP state machine */
1151 update_ntp_one_tick();
1153 /* accumulate error between NTP and clock interval */
1154 clock->error += current_tick_length();
1155 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
1158 /* correct the clock when NTP error is too big */
1159 clocksource_adjust(clock, offset);
1161 /* store full nanoseconds into xtime */
1162 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
1163 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1165 /* check to see if there is a new clocksource to use */
1166 if (change_clocksource()) {
1168 clock->xtime_nsec = 0;
1169 clocksource_calculate_interval(clock, tick_nsec);
1174 * Called from the timer interrupt handler to charge one tick to the current
1175 * process. user_tick is 1 if the tick is user time, 0 for system.
1177 void update_process_times(int user_tick)
1179 struct task_struct *p = current;
1180 int cpu = smp_processor_id();
1182 /* Note: this timer irq context must be accounted for as well. */
1184 account_user_time(p, jiffies_to_cputime(1));
1186 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
1188 if (rcu_pending(cpu))
1189 rcu_check_callbacks(cpu, user_tick);
1191 run_posix_cpu_timers(p);
1195 * Nr of active tasks - counted in fixed-point numbers
1197 static unsigned long count_active_tasks(void)
1199 return nr_active() * FIXED_1;
1203 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1204 * imply that avenrun[] is the standard name for this kind of thing.
1205 * Nothing else seems to be standardized: the fractional size etc
1206 * all seem to differ on different machines.
1208 * Requires xtime_lock to access.
1210 unsigned long avenrun[3];
1212 EXPORT_SYMBOL(avenrun);
1215 * calc_load - given tick count, update the avenrun load estimates.
1216 * This is called while holding a write_lock on xtime_lock.
1218 static inline void calc_load(unsigned long ticks)
1220 unsigned long active_tasks; /* fixed-point */
1221 static int count = LOAD_FREQ;
1226 active_tasks = count_active_tasks();
1227 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1228 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1229 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1233 /* jiffies at the most recent update of wall time */
1234 unsigned long wall_jiffies = INITIAL_JIFFIES;
1237 * This read-write spinlock protects us from races in SMP while
1238 * playing with xtime and avenrun.
1240 #ifndef ARCH_HAVE_XTIME_LOCK
1241 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
1243 EXPORT_SYMBOL(xtime_lock);
1247 * This function runs timers and the timer-tq in bottom half context.
1249 static void run_timer_softirq(struct softirq_action *h)
1251 tvec_base_t *base = __get_cpu_var(tvec_bases);
1253 hrtimer_run_queues();
1254 if (time_after_eq(jiffies, base->timer_jiffies))
1259 * Called by the local, per-CPU timer interrupt on SMP.
1261 void run_local_timers(void)
1263 raise_softirq(TIMER_SOFTIRQ);
1268 * Called by the timer interrupt. xtime_lock must already be taken
1271 static inline void update_times(void)
1273 unsigned long ticks;
1275 ticks = jiffies - wall_jiffies;
1276 wall_jiffies += ticks;
1282 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1283 * without sampling the sequence number in xtime_lock.
1284 * jiffies is defined in the linker script...
1287 void do_timer(struct pt_regs *regs)
1290 /* prevent loading jiffies before storing new jiffies_64 value. */
1295 #ifdef __ARCH_WANT_SYS_ALARM
1298 * For backwards compatibility? This can be done in libc so Alpha
1299 * and all newer ports shouldn't need it.
1301 asmlinkage unsigned long sys_alarm(unsigned int seconds)
1303 return alarm_setitimer(seconds);
1310 * sys_getpid - return the thread group id of the current process
1312 * Note, despite the name, this returns the tgid not the pid. The tgid and
1313 * the pid are identical unless CLONE_THREAD was specified on clone() in
1314 * which case the tgid is the same in all threads of the same group.
1316 * This is SMP safe as current->tgid does not change.
1318 asmlinkage long sys_getpid(void)
1320 return vx_map_tgid(current->tgid);
1324 * Accessing ->parent is not SMP-safe, it could
1325 * change from under us. However, we can use a stale
1326 * value of ->real_parent under rcu_read_lock(), see
1327 * release_task()->call_rcu(delayed_put_task_struct).
1329 asmlinkage long sys_getppid(void)
1334 pid = rcu_dereference(current->parent)->tgid;
1336 return vx_map_pid(pid);
1342 * The Alpha uses getxpid, getxuid, and getxgid instead.
1345 asmlinkage long do_getxpid(long *ppid)
1347 *ppid = sys_getppid();
1348 return sys_getpid();
1353 asmlinkage long sys_getuid(void)
1355 /* Only we change this so SMP safe */
1356 return current->uid;
1359 asmlinkage long sys_geteuid(void)
1361 /* Only we change this so SMP safe */
1362 return current->euid;
1365 asmlinkage long sys_getgid(void)
1367 /* Only we change this so SMP safe */
1368 return current->gid;
1371 asmlinkage long sys_getegid(void)
1373 /* Only we change this so SMP safe */
1374 return current->egid;
1379 static void process_timeout(unsigned long __data)
1381 wake_up_process((struct task_struct *)__data);
1385 * schedule_timeout - sleep until timeout
1386 * @timeout: timeout value in jiffies
1388 * Make the current task sleep until @timeout jiffies have
1389 * elapsed. The routine will return immediately unless
1390 * the current task state has been set (see set_current_state()).
1392 * You can set the task state as follows -
1394 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1395 * pass before the routine returns. The routine will return 0
1397 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1398 * delivered to the current task. In this case the remaining time
1399 * in jiffies will be returned, or 0 if the timer expired in time
1401 * The current task state is guaranteed to be TASK_RUNNING when this
1404 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1405 * the CPU away without a bound on the timeout. In this case the return
1406 * value will be %MAX_SCHEDULE_TIMEOUT.
1408 * In all cases the return value is guaranteed to be non-negative.
1410 fastcall signed long __sched schedule_timeout(signed long timeout)
1412 struct timer_list timer;
1413 unsigned long expire;
1417 case MAX_SCHEDULE_TIMEOUT:
1419 * These two special cases are useful to be comfortable
1420 * in the caller. Nothing more. We could take
1421 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1422 * but I' d like to return a valid offset (>=0) to allow
1423 * the caller to do everything it want with the retval.
1429 * Another bit of PARANOID. Note that the retval will be
1430 * 0 since no piece of kernel is supposed to do a check
1431 * for a negative retval of schedule_timeout() (since it
1432 * should never happens anyway). You just have the printk()
1433 * that will tell you if something is gone wrong and where.
1437 printk(KERN_ERR "schedule_timeout: wrong timeout "
1438 "value %lx from %p\n", timeout,
1439 __builtin_return_address(0));
1440 current->state = TASK_RUNNING;
1445 expire = timeout + jiffies;
1447 setup_timer(&timer, process_timeout, (unsigned long)current);
1448 __mod_timer(&timer, expire);
1450 del_singleshot_timer_sync(&timer);
1452 timeout = expire - jiffies;
1455 return timeout < 0 ? 0 : timeout;
1457 EXPORT_SYMBOL(schedule_timeout);
1460 * We can use __set_current_state() here because schedule_timeout() calls
1461 * schedule() unconditionally.
1463 signed long __sched schedule_timeout_interruptible(signed long timeout)
1465 __set_current_state(TASK_INTERRUPTIBLE);
1466 return schedule_timeout(timeout);
1468 EXPORT_SYMBOL(schedule_timeout_interruptible);
1470 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1472 __set_current_state(TASK_UNINTERRUPTIBLE);
1473 return schedule_timeout(timeout);
1475 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1477 /* Thread ID - the internal kernel "pid" */
1478 asmlinkage long sys_gettid(void)
1480 return current->pid;
1484 * sys_sysinfo - fill in sysinfo struct
1486 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1489 unsigned long mem_total, sav_total;
1490 unsigned int mem_unit, bitcount;
1493 memset((char *)&val, 0, sizeof(struct sysinfo));
1497 seq = read_seqbegin(&xtime_lock);
1500 * This is annoying. The below is the same thing
1501 * posix_get_clock_monotonic() does, but it wants to
1502 * take the lock which we want to cover the loads stuff
1506 getnstimeofday(&tp);
1507 tp.tv_sec += wall_to_monotonic.tv_sec;
1508 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1509 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1510 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1513 if (vx_flags(VXF_VIRT_UPTIME, 0))
1514 vx_vsi_uptime(&tp, NULL);
1515 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1517 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1518 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1519 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1521 val.procs = nr_threads;
1522 } while (read_seqretry(&xtime_lock, seq));
1528 * If the sum of all the available memory (i.e. ram + swap)
1529 * is less than can be stored in a 32 bit unsigned long then
1530 * we can be binary compatible with 2.2.x kernels. If not,
1531 * well, in that case 2.2.x was broken anyways...
1533 * -Erik Andersen <andersee@debian.org>
1536 mem_total = val.totalram + val.totalswap;
1537 if (mem_total < val.totalram || mem_total < val.totalswap)
1540 mem_unit = val.mem_unit;
1541 while (mem_unit > 1) {
1544 sav_total = mem_total;
1546 if (mem_total < sav_total)
1551 * If mem_total did not overflow, multiply all memory values by
1552 * val.mem_unit and set it to 1. This leaves things compatible
1553 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1558 val.totalram <<= bitcount;
1559 val.freeram <<= bitcount;
1560 val.sharedram <<= bitcount;
1561 val.bufferram <<= bitcount;
1562 val.totalswap <<= bitcount;
1563 val.freeswap <<= bitcount;
1564 val.totalhigh <<= bitcount;
1565 val.freehigh <<= bitcount;
1568 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1575 * lockdep: we want to track each per-CPU base as a separate lock-class,
1576 * but timer-bases are kmalloc()-ed, so we need to attach separate
1579 static struct lock_class_key base_lock_keys[NR_CPUS];
1581 static int __devinit init_timers_cpu(int cpu)
1585 static char __devinitdata tvec_base_done[NR_CPUS];
1587 if (!tvec_base_done[cpu]) {
1588 static char boot_done;
1592 * The APs use this path later in boot
1594 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1598 memset(base, 0, sizeof(*base));
1599 per_cpu(tvec_bases, cpu) = base;
1602 * This is for the boot CPU - we use compile-time
1603 * static initialisation because per-cpu memory isn't
1604 * ready yet and because the memory allocators are not
1605 * initialised either.
1608 base = &boot_tvec_bases;
1610 tvec_base_done[cpu] = 1;
1612 base = per_cpu(tvec_bases, cpu);
1615 spin_lock_init(&base->lock);
1616 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1618 for (j = 0; j < TVN_SIZE; j++) {
1619 INIT_LIST_HEAD(base->tv5.vec + j);
1620 INIT_LIST_HEAD(base->tv4.vec + j);
1621 INIT_LIST_HEAD(base->tv3.vec + j);
1622 INIT_LIST_HEAD(base->tv2.vec + j);
1624 for (j = 0; j < TVR_SIZE; j++)
1625 INIT_LIST_HEAD(base->tv1.vec + j);
1627 base->timer_jiffies = jiffies;
1631 #ifdef CONFIG_HOTPLUG_CPU
1632 static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1634 struct timer_list *timer;
1636 while (!list_empty(head)) {
1637 timer = list_entry(head->next, struct timer_list, entry);
1638 detach_timer(timer, 0);
1639 timer->base = new_base;
1640 internal_add_timer(new_base, timer);
1644 static void __devinit migrate_timers(int cpu)
1646 tvec_base_t *old_base;
1647 tvec_base_t *new_base;
1650 BUG_ON(cpu_online(cpu));
1651 old_base = per_cpu(tvec_bases, cpu);
1652 new_base = get_cpu_var(tvec_bases);
1654 local_irq_disable();
1655 spin_lock(&new_base->lock);
1656 spin_lock(&old_base->lock);
1658 BUG_ON(old_base->running_timer);
1660 for (i = 0; i < TVR_SIZE; i++)
1661 migrate_timer_list(new_base, old_base->tv1.vec + i);
1662 for (i = 0; i < TVN_SIZE; i++) {
1663 migrate_timer_list(new_base, old_base->tv2.vec + i);
1664 migrate_timer_list(new_base, old_base->tv3.vec + i);
1665 migrate_timer_list(new_base, old_base->tv4.vec + i);
1666 migrate_timer_list(new_base, old_base->tv5.vec + i);
1669 spin_unlock(&old_base->lock);
1670 spin_unlock(&new_base->lock);
1672 put_cpu_var(tvec_bases);
1674 #endif /* CONFIG_HOTPLUG_CPU */
1676 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1677 unsigned long action, void *hcpu)
1679 long cpu = (long)hcpu;
1681 case CPU_UP_PREPARE:
1682 if (init_timers_cpu(cpu) < 0)
1685 #ifdef CONFIG_HOTPLUG_CPU
1687 migrate_timers(cpu);
1696 static struct notifier_block __cpuinitdata timers_nb = {
1697 .notifier_call = timer_cpu_notify,
1701 void __init init_timers(void)
1703 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1704 (void *)(long)smp_processor_id());
1705 register_cpu_notifier(&timers_nb);
1706 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1709 #ifdef CONFIG_TIME_INTERPOLATION
1711 struct time_interpolator *time_interpolator __read_mostly;
1712 static struct time_interpolator *time_interpolator_list __read_mostly;
1713 static DEFINE_SPINLOCK(time_interpolator_lock);
1715 static inline u64 time_interpolator_get_cycles(unsigned int src)
1717 unsigned long (*x)(void);
1721 case TIME_SOURCE_FUNCTION:
1722 x = time_interpolator->addr;
1725 case TIME_SOURCE_MMIO64 :
1726 return readq_relaxed((void __iomem *)time_interpolator->addr);
1728 case TIME_SOURCE_MMIO32 :
1729 return readl_relaxed((void __iomem *)time_interpolator->addr);
1731 default: return get_cycles();
1735 static inline u64 time_interpolator_get_counter(int writelock)
1737 unsigned int src = time_interpolator->source;
1739 if (time_interpolator->jitter)
1745 lcycle = time_interpolator->last_cycle;
1746 now = time_interpolator_get_cycles(src);
1747 if (lcycle && time_after(lcycle, now))
1750 /* When holding the xtime write lock, there's no need
1751 * to add the overhead of the cmpxchg. Readers are
1752 * force to retry until the write lock is released.
1755 time_interpolator->last_cycle = now;
1758 /* Keep track of the last timer value returned. The use of cmpxchg here
1759 * will cause contention in an SMP environment.
1761 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1765 return time_interpolator_get_cycles(src);
1768 void time_interpolator_reset(void)
1770 time_interpolator->offset = 0;
1771 time_interpolator->last_counter = time_interpolator_get_counter(1);
1774 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1776 unsigned long time_interpolator_get_offset(void)
1778 /* If we do not have a time interpolator set up then just return zero */
1779 if (!time_interpolator)
1782 return time_interpolator->offset +
1783 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1786 #define INTERPOLATOR_ADJUST 65536
1787 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1789 static void time_interpolator_update(long delta_nsec)
1792 unsigned long offset;
1794 /* If there is no time interpolator set up then do nothing */
1795 if (!time_interpolator)
1799 * The interpolator compensates for late ticks by accumulating the late
1800 * time in time_interpolator->offset. A tick earlier than expected will
1801 * lead to a reset of the offset and a corresponding jump of the clock
1802 * forward. Again this only works if the interpolator clock is running
1803 * slightly slower than the regular clock and the tuning logic insures
1807 counter = time_interpolator_get_counter(1);
1808 offset = time_interpolator->offset +
1809 GET_TI_NSECS(counter, time_interpolator);
1811 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1812 time_interpolator->offset = offset - delta_nsec;
1814 time_interpolator->skips++;
1815 time_interpolator->ns_skipped += delta_nsec - offset;
1816 time_interpolator->offset = 0;
1818 time_interpolator->last_counter = counter;
1820 /* Tuning logic for time interpolator invoked every minute or so.
1821 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1822 * Increase interpolator clock speed if we skip too much time.
1824 if (jiffies % INTERPOLATOR_ADJUST == 0)
1826 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1827 time_interpolator->nsec_per_cyc--;
1828 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1829 time_interpolator->nsec_per_cyc++;
1830 time_interpolator->skips = 0;
1831 time_interpolator->ns_skipped = 0;
1836 is_better_time_interpolator(struct time_interpolator *new)
1838 if (!time_interpolator)
1840 return new->frequency > 2*time_interpolator->frequency ||
1841 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1845 register_time_interpolator(struct time_interpolator *ti)
1847 unsigned long flags;
1850 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1852 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1853 spin_lock(&time_interpolator_lock);
1854 write_seqlock_irqsave(&xtime_lock, flags);
1855 if (is_better_time_interpolator(ti)) {
1856 time_interpolator = ti;
1857 time_interpolator_reset();
1859 write_sequnlock_irqrestore(&xtime_lock, flags);
1861 ti->next = time_interpolator_list;
1862 time_interpolator_list = ti;
1863 spin_unlock(&time_interpolator_lock);
1867 unregister_time_interpolator(struct time_interpolator *ti)
1869 struct time_interpolator *curr, **prev;
1870 unsigned long flags;
1872 spin_lock(&time_interpolator_lock);
1873 prev = &time_interpolator_list;
1874 for (curr = *prev; curr; curr = curr->next) {
1882 write_seqlock_irqsave(&xtime_lock, flags);
1883 if (ti == time_interpolator) {
1884 /* we lost the best time-interpolator: */
1885 time_interpolator = NULL;
1886 /* find the next-best interpolator */
1887 for (curr = time_interpolator_list; curr; curr = curr->next)
1888 if (is_better_time_interpolator(curr))
1889 time_interpolator = curr;
1890 time_interpolator_reset();
1892 write_sequnlock_irqrestore(&xtime_lock, flags);
1893 spin_unlock(&time_interpolator_lock);
1895 #endif /* CONFIG_TIME_INTERPOLATION */
1898 * msleep - sleep safely even with waitqueue interruptions
1899 * @msecs: Time in milliseconds to sleep for
1901 void msleep(unsigned int msecs)
1903 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1906 timeout = schedule_timeout_uninterruptible(timeout);
1909 EXPORT_SYMBOL(msleep);
1912 * msleep_interruptible - sleep waiting for signals
1913 * @msecs: Time in milliseconds to sleep for
1915 unsigned long msleep_interruptible(unsigned int msecs)
1917 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1919 while (timeout && !signal_pending(current))
1920 timeout = schedule_timeout_interruptible(timeout);
1921 return jiffies_to_msecs(timeout);
1924 EXPORT_SYMBOL(msleep_interruptible);