4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/vs_base.h>
38 #include <linux/vs_cvirt.h>
39 #include <linux/vs_pid.h>
40 #include <linux/vserver/sched.h>
42 #include <asm/uaccess.h>
43 #include <asm/unistd.h>
44 #include <asm/div64.h>
45 #include <asm/timex.h>
48 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
50 EXPORT_SYMBOL(jiffies_64);
53 * per-CPU timer vector definitions:
55 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
56 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
57 #define TVN_SIZE (1 << TVN_BITS)
58 #define TVR_SIZE (1 << TVR_BITS)
59 #define TVN_MASK (TVN_SIZE - 1)
60 #define TVR_MASK (TVR_SIZE - 1)
62 typedef struct tvec_s {
63 struct list_head vec[TVN_SIZE];
66 typedef struct tvec_root_s {
67 struct list_head vec[TVR_SIZE];
70 struct tvec_t_base_s {
72 struct timer_list *running_timer;
73 unsigned long timer_jiffies;
79 } ____cacheline_aligned_in_smp;
81 typedef struct tvec_t_base_s tvec_base_t;
83 tvec_base_t boot_tvec_bases;
84 EXPORT_SYMBOL(boot_tvec_bases);
85 static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
88 * __round_jiffies - function to round jiffies to a full second
89 * @j: the time in (absolute) jiffies that should be rounded
90 * @cpu: the processor number on which the timeout will happen
92 * __round_jiffies rounds an absolute time in the future (in jiffies)
93 * up or down to (approximately) full seconds. This is useful for timers
94 * for which the exact time they fire does not matter too much, as long as
95 * they fire approximately every X seconds.
97 * By rounding these timers to whole seconds, all such timers will fire
98 * at the same time, rather than at various times spread out. The goal
99 * of this is to have the CPU wake up less, which saves power.
101 * The exact rounding is skewed for each processor to avoid all
102 * processors firing at the exact same time, which could lead
103 * to lock contention or spurious cache line bouncing.
105 * The return value is the rounded version of the "j" parameter.
107 unsigned long __round_jiffies(unsigned long j, int cpu)
110 unsigned long original = j;
113 * We don't want all cpus firing their timers at once hitting the
114 * same lock or cachelines, so we skew each extra cpu with an extra
115 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
117 * The skew is done by adding 3*cpunr, then round, then subtract this
118 * extra offset again.
125 * If the target jiffie is just after a whole second (which can happen
126 * due to delays of the timer irq, long irq off times etc etc) then
127 * we should round down to the whole second, not up. Use 1/4th second
128 * as cutoff for this rounding as an extreme upper bound for this.
130 if (rem < HZ/4) /* round down */
135 /* now that we have rounded, subtract the extra skew again */
138 if (j <= jiffies) /* rounding ate our timeout entirely; */
142 EXPORT_SYMBOL_GPL(__round_jiffies);
145 * __round_jiffies_relative - function to round jiffies to a full second
146 * @j: the time in (relative) jiffies that should be rounded
147 * @cpu: the processor number on which the timeout will happen
149 * __round_jiffies_relative rounds a time delta in the future (in jiffies)
150 * up or down to (approximately) full seconds. This is useful for timers
151 * for which the exact time they fire does not matter too much, as long as
152 * they fire approximately every X seconds.
154 * By rounding these timers to whole seconds, all such timers will fire
155 * at the same time, rather than at various times spread out. The goal
156 * of this is to have the CPU wake up less, which saves power.
158 * The exact rounding is skewed for each processor to avoid all
159 * processors firing at the exact same time, which could lead
160 * to lock contention or spurious cache line bouncing.
162 * The return value is the rounded version of the "j" parameter.
164 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
167 * In theory the following code can skip a jiffy in case jiffies
168 * increments right between the addition and the later subtraction.
169 * However since the entire point of this function is to use approximate
170 * timeouts, it's entirely ok to not handle that.
172 return __round_jiffies(j + jiffies, cpu) - jiffies;
174 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
177 * round_jiffies - function to round jiffies to a full second
178 * @j: the time in (absolute) jiffies that should be rounded
180 * round_jiffies rounds an absolute time in the future (in jiffies)
181 * up or down to (approximately) full seconds. This is useful for timers
182 * for which the exact time they fire does not matter too much, as long as
183 * they fire approximately every X seconds.
185 * By rounding these timers to whole seconds, all such timers will fire
186 * at the same time, rather than at various times spread out. The goal
187 * of this is to have the CPU wake up less, which saves power.
189 * The return value is the rounded version of the "j" parameter.
191 unsigned long round_jiffies(unsigned long j)
193 return __round_jiffies(j, raw_smp_processor_id());
195 EXPORT_SYMBOL_GPL(round_jiffies);
198 * round_jiffies_relative - function to round jiffies to a full second
199 * @j: the time in (relative) jiffies that should be rounded
201 * round_jiffies_relative rounds a time delta in the future (in jiffies)
202 * up or down to (approximately) full seconds. This is useful for timers
203 * for which the exact time they fire does not matter too much, as long as
204 * they fire approximately every X seconds.
206 * By rounding these timers to whole seconds, all such timers will fire
207 * at the same time, rather than at various times spread out. The goal
208 * of this is to have the CPU wake up less, which saves power.
210 * The return value is the rounded version of the "j" parameter.
212 unsigned long round_jiffies_relative(unsigned long j)
214 return __round_jiffies_relative(j, raw_smp_processor_id());
216 EXPORT_SYMBOL_GPL(round_jiffies_relative);
219 static inline void set_running_timer(tvec_base_t *base,
220 struct timer_list *timer)
223 base->running_timer = timer;
227 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
229 unsigned long expires = timer->expires;
230 unsigned long idx = expires - base->timer_jiffies;
231 struct list_head *vec;
233 if (idx < TVR_SIZE) {
234 int i = expires & TVR_MASK;
235 vec = base->tv1.vec + i;
236 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
237 int i = (expires >> TVR_BITS) & TVN_MASK;
238 vec = base->tv2.vec + i;
239 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
240 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
241 vec = base->tv3.vec + i;
242 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
243 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
244 vec = base->tv4.vec + i;
245 } else if ((signed long) idx < 0) {
247 * Can happen if you add a timer with expires == jiffies,
248 * or you set a timer to go off in the past
250 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
253 /* If the timeout is larger than 0xffffffff on 64-bit
254 * architectures then we use the maximum timeout:
256 if (idx > 0xffffffffUL) {
258 expires = idx + base->timer_jiffies;
260 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
261 vec = base->tv5.vec + i;
266 list_add_tail(&timer->entry, vec);
270 * init_timer - initialize a timer.
271 * @timer: the timer to be initialized
273 * init_timer() must be done to a timer prior calling *any* of the
274 * other timer functions.
276 void fastcall init_timer(struct timer_list *timer)
278 timer->entry.next = NULL;
279 timer->base = __raw_get_cpu_var(tvec_bases);
281 EXPORT_SYMBOL(init_timer);
283 static inline void detach_timer(struct timer_list *timer,
286 struct list_head *entry = &timer->entry;
288 __list_del(entry->prev, entry->next);
291 entry->prev = LIST_POISON2;
295 * We are using hashed locking: holding per_cpu(tvec_bases).lock
296 * means that all timers which are tied to this base via timer->base are
297 * locked, and the base itself is locked too.
299 * So __run_timers/migrate_timers can safely modify all timers which could
300 * be found on ->tvX lists.
302 * When the timer's base is locked, and the timer removed from list, it is
303 * possible to set timer->base = NULL and drop the lock: the timer remains
306 static tvec_base_t *lock_timer_base(struct timer_list *timer,
307 unsigned long *flags)
308 __acquires(timer->base->lock)
314 if (likely(base != NULL)) {
315 spin_lock_irqsave(&base->lock, *flags);
316 if (likely(base == timer->base))
318 /* The timer has migrated to another CPU */
319 spin_unlock_irqrestore(&base->lock, *flags);
325 int __mod_timer(struct timer_list *timer, unsigned long expires)
327 tvec_base_t *base, *new_base;
331 BUG_ON(!timer->function);
333 base = lock_timer_base(timer, &flags);
335 if (timer_pending(timer)) {
336 detach_timer(timer, 0);
340 new_base = __get_cpu_var(tvec_bases);
342 if (base != new_base) {
344 * We are trying to schedule the timer on the local CPU.
345 * However we can't change timer's base while it is running,
346 * otherwise del_timer_sync() can't detect that the timer's
347 * handler yet has not finished. This also guarantees that
348 * the timer is serialized wrt itself.
350 if (likely(base->running_timer != timer)) {
351 /* See the comment in lock_timer_base() */
353 spin_unlock(&base->lock);
355 spin_lock(&base->lock);
360 timer->expires = expires;
361 internal_add_timer(base, timer);
362 spin_unlock_irqrestore(&base->lock, flags);
367 EXPORT_SYMBOL(__mod_timer);
370 * add_timer_on - start a timer on a particular CPU
371 * @timer: the timer to be added
372 * @cpu: the CPU to start it on
374 * This is not very scalable on SMP. Double adds are not possible.
376 void add_timer_on(struct timer_list *timer, int cpu)
378 tvec_base_t *base = per_cpu(tvec_bases, cpu);
381 BUG_ON(timer_pending(timer) || !timer->function);
382 spin_lock_irqsave(&base->lock, flags);
384 internal_add_timer(base, timer);
385 spin_unlock_irqrestore(&base->lock, flags);
390 * mod_timer - modify a timer's timeout
391 * @timer: the timer to be modified
392 * @expires: new timeout in jiffies
394 * mod_timer is a more efficient way to update the expire field of an
395 * active timer (if the timer is inactive it will be activated)
397 * mod_timer(timer, expires) is equivalent to:
399 * del_timer(timer); timer->expires = expires; add_timer(timer);
401 * Note that if there are multiple unserialized concurrent users of the
402 * same timer, then mod_timer() is the only safe way to modify the timeout,
403 * since add_timer() cannot modify an already running timer.
405 * The function returns whether it has modified a pending timer or not.
406 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
407 * active timer returns 1.)
409 int mod_timer(struct timer_list *timer, unsigned long expires)
411 BUG_ON(!timer->function);
414 * This is a common optimization triggered by the
415 * networking code - if the timer is re-modified
416 * to be the same thing then just return:
418 if (timer->expires == expires && timer_pending(timer))
421 return __mod_timer(timer, expires);
424 EXPORT_SYMBOL(mod_timer);
427 * del_timer - deactive a timer.
428 * @timer: the timer to be deactivated
430 * del_timer() deactivates a timer - this works on both active and inactive
433 * The function returns whether it has deactivated a pending timer or not.
434 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
435 * active timer returns 1.)
437 int del_timer(struct timer_list *timer)
443 if (timer_pending(timer)) {
444 base = lock_timer_base(timer, &flags);
445 if (timer_pending(timer)) {
446 detach_timer(timer, 1);
449 spin_unlock_irqrestore(&base->lock, flags);
455 EXPORT_SYMBOL(del_timer);
459 * try_to_del_timer_sync - Try to deactivate a timer
460 * @timer: timer do del
462 * This function tries to deactivate a timer. Upon successful (ret >= 0)
463 * exit the timer is not queued and the handler is not running on any CPU.
465 * It must not be called from interrupt contexts.
467 int try_to_del_timer_sync(struct timer_list *timer)
473 base = lock_timer_base(timer, &flags);
475 if (base->running_timer == timer)
479 if (timer_pending(timer)) {
480 detach_timer(timer, 1);
484 spin_unlock_irqrestore(&base->lock, flags);
490 * del_timer_sync - deactivate a timer and wait for the handler to finish.
491 * @timer: the timer to be deactivated
493 * This function only differs from del_timer() on SMP: besides deactivating
494 * the timer it also makes sure the handler has finished executing on other
497 * Synchronization rules: callers must prevent restarting of the timer,
498 * otherwise this function is meaningless. It must not be called from
499 * interrupt contexts. The caller must not hold locks which would prevent
500 * completion of the timer's handler. The timer's handler must not call
501 * add_timer_on(). Upon exit the timer is not queued and the handler is
502 * not running on any CPU.
504 * The function returns whether it has deactivated a pending timer or not.
506 int del_timer_sync(struct timer_list *timer)
509 int ret = try_to_del_timer_sync(timer);
516 EXPORT_SYMBOL(del_timer_sync);
519 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
521 /* cascade all the timers from tv up one level */
522 struct timer_list *timer, *tmp;
523 struct list_head tv_list;
525 list_replace_init(tv->vec + index, &tv_list);
528 * We are removing _all_ timers from the list, so we
529 * don't have to detach them individually.
531 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
532 BUG_ON(timer->base != base);
533 internal_add_timer(base, timer);
539 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
542 * __run_timers - run all expired timers (if any) on this CPU.
543 * @base: the timer vector to be processed.
545 * This function cascades all vectors and executes all expired timer
548 static inline void __run_timers(tvec_base_t *base)
550 struct timer_list *timer;
552 spin_lock_irq(&base->lock);
553 while (time_after_eq(jiffies, base->timer_jiffies)) {
554 struct list_head work_list;
555 struct list_head *head = &work_list;
556 int index = base->timer_jiffies & TVR_MASK;
562 (!cascade(base, &base->tv2, INDEX(0))) &&
563 (!cascade(base, &base->tv3, INDEX(1))) &&
564 !cascade(base, &base->tv4, INDEX(2)))
565 cascade(base, &base->tv5, INDEX(3));
566 ++base->timer_jiffies;
567 list_replace_init(base->tv1.vec + index, &work_list);
568 while (!list_empty(head)) {
569 void (*fn)(unsigned long);
572 timer = list_entry(head->next,struct timer_list,entry);
573 fn = timer->function;
576 set_running_timer(base, timer);
577 detach_timer(timer, 1);
578 spin_unlock_irq(&base->lock);
580 int preempt_count = preempt_count();
582 if (preempt_count != preempt_count()) {
583 printk(KERN_WARNING "huh, entered %p "
584 "with preempt_count %08x, exited"
591 spin_lock_irq(&base->lock);
594 set_running_timer(base, NULL);
595 spin_unlock_irq(&base->lock);
598 #ifdef CONFIG_NO_IDLE_HZ
600 * Find out when the next timer event is due to happen. This
601 * is used on S/390 to stop all activity when a cpus is idle.
602 * This functions needs to be called disabled.
604 unsigned long next_timer_interrupt(void)
607 struct list_head *list;
608 struct timer_list *nte;
609 unsigned long expires;
610 unsigned long hr_expires = MAX_JIFFY_OFFSET;
615 hr_delta = hrtimer_get_next_event();
616 if (hr_delta.tv64 != KTIME_MAX) {
617 struct timespec tsdelta;
618 tsdelta = ktime_to_timespec(hr_delta);
619 hr_expires = timespec_to_jiffies(&tsdelta);
621 return hr_expires + jiffies;
623 hr_expires += jiffies;
625 base = __get_cpu_var(tvec_bases);
626 spin_lock(&base->lock);
627 expires = base->timer_jiffies + (LONG_MAX >> 1);
630 /* Look for timer events in tv1. */
631 j = base->timer_jiffies & TVR_MASK;
633 list_for_each_entry(nte, base->tv1.vec + j, entry) {
634 expires = nte->expires;
635 if (j < (base->timer_jiffies & TVR_MASK))
636 list = base->tv2.vec + (INDEX(0));
639 j = (j + 1) & TVR_MASK;
640 } while (j != (base->timer_jiffies & TVR_MASK));
643 varray[0] = &base->tv2;
644 varray[1] = &base->tv3;
645 varray[2] = &base->tv4;
646 varray[3] = &base->tv5;
647 for (i = 0; i < 4; i++) {
650 if (list_empty(varray[i]->vec + j)) {
651 j = (j + 1) & TVN_MASK;
654 list_for_each_entry(nte, varray[i]->vec + j, entry)
655 if (time_before(nte->expires, expires))
656 expires = nte->expires;
657 if (j < (INDEX(i)) && i < 3)
658 list = varray[i + 1]->vec + (INDEX(i + 1));
660 } while (j != (INDEX(i)));
665 * The search wrapped. We need to look at the next list
666 * from next tv element that would cascade into tv element
667 * where we found the timer element.
669 list_for_each_entry(nte, list, entry) {
670 if (time_before(nte->expires, expires))
671 expires = nte->expires;
674 spin_unlock(&base->lock);
677 * It can happen that other CPUs service timer IRQs and increment
678 * jiffies, but we have not yet got a local timer tick to process
679 * the timer wheels. In that case, the expiry time can be before
680 * jiffies, but since the high-resolution timer here is relative to
681 * jiffies, the default expression when high-resolution timers are
684 * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
686 * would falsely evaluate to true. If that is the case, just
687 * return jiffies so that we can immediately fire the local timer
689 if (time_before(expires, jiffies))
692 if (time_before(hr_expires, expires))
699 /******************************************************************/
703 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
704 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
705 * at zero at system boot time, so wall_to_monotonic will be negative,
706 * however, we will ALWAYS keep the tv_nsec part positive so we can use
707 * the usual normalization.
709 struct timespec xtime __attribute__ ((aligned (16)));
710 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
712 EXPORT_SYMBOL(xtime);
715 /* XXX - all of this timekeeping code should be later moved to time.c */
716 #include <linux/clocksource.h>
717 static struct clocksource *clock; /* pointer to current clocksource */
719 #ifdef CONFIG_GENERIC_TIME
721 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
723 * private function, must hold xtime_lock lock when being
724 * called. Returns the number of nanoseconds since the
725 * last call to update_wall_time() (adjusted by NTP scaling)
727 static inline s64 __get_nsec_offset(void)
729 cycle_t cycle_now, cycle_delta;
732 /* read clocksource: */
733 cycle_now = clocksource_read(clock);
735 /* calculate the delta since the last update_wall_time: */
736 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
738 /* convert to nanoseconds: */
739 ns_offset = cyc2ns(clock, cycle_delta);
745 * __get_realtime_clock_ts - Returns the time of day in a timespec
746 * @ts: pointer to the timespec to be set
748 * Returns the time of day in a timespec. Used by
749 * do_gettimeofday() and get_realtime_clock_ts().
751 static inline void __get_realtime_clock_ts(struct timespec *ts)
757 seq = read_seqbegin(&xtime_lock);
760 nsecs = __get_nsec_offset();
762 } while (read_seqretry(&xtime_lock, seq));
764 timespec_add_ns(ts, nsecs);
768 * getnstimeofday - Returns the time of day in a timespec
769 * @ts: pointer to the timespec to be set
771 * Returns the time of day in a timespec.
773 void getnstimeofday(struct timespec *ts)
775 __get_realtime_clock_ts(ts);
778 EXPORT_SYMBOL(getnstimeofday);
781 * do_gettimeofday - Returns the time of day in a timeval
782 * @tv: pointer to the timeval to be set
784 * NOTE: Users should be converted to using get_realtime_clock_ts()
786 void do_gettimeofday(struct timeval *tv)
790 __get_realtime_clock_ts(&now);
791 tv->tv_sec = now.tv_sec;
792 tv->tv_usec = now.tv_nsec/1000;
795 EXPORT_SYMBOL(do_gettimeofday);
797 * do_settimeofday - Sets the time of day
798 * @tv: pointer to the timespec variable containing the new time
800 * Sets the time of day to the new time and update NTP and notify hrtimers
802 int do_settimeofday(struct timespec *tv)
805 time_t wtm_sec, sec = tv->tv_sec;
806 long wtm_nsec, nsec = tv->tv_nsec;
808 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
811 write_seqlock_irqsave(&xtime_lock, flags);
813 nsec -= __get_nsec_offset();
815 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
816 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
818 set_normalized_timespec(&xtime, sec, nsec);
819 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
824 write_sequnlock_irqrestore(&xtime_lock, flags);
826 /* signal hrtimers about time change */
832 EXPORT_SYMBOL(do_settimeofday);
835 * change_clocksource - Swaps clocksources if a new one is available
837 * Accumulates current time interval and initializes new clocksource
839 static int change_clocksource(void)
841 struct clocksource *new;
844 new = clocksource_get_next();
846 now = clocksource_read(new);
847 nsec = __get_nsec_offset();
848 timespec_add_ns(&xtime, nsec);
851 clock->cycle_last = now;
852 printk(KERN_INFO "Time: %s clocksource has been installed.\n",
855 } else if (clock->update_callback) {
856 return clock->update_callback();
861 static inline int change_clocksource(void)
868 * timeofday_is_continuous - check to see if timekeeping is free running
870 int timekeeping_is_continuous(void)
876 seq = read_seqbegin(&xtime_lock);
878 ret = clock->is_continuous;
880 } while (read_seqretry(&xtime_lock, seq));
886 * timekeeping_init - Initializes the clocksource and common timekeeping values
888 void __init timekeeping_init(void)
892 write_seqlock_irqsave(&xtime_lock, flags);
896 clock = clocksource_get_next();
897 clocksource_calculate_interval(clock, tick_nsec);
898 clock->cycle_last = clocksource_read(clock);
900 write_sequnlock_irqrestore(&xtime_lock, flags);
904 static int timekeeping_suspended;
906 * timekeeping_resume - Resumes the generic timekeeping subsystem.
909 * This is for the generic clocksource timekeeping.
910 * xtime/wall_to_monotonic/jiffies/etc are
911 * still managed by arch specific suspend/resume code.
913 static int timekeeping_resume(struct sys_device *dev)
917 write_seqlock_irqsave(&xtime_lock, flags);
918 /* restart the last cycle value */
919 clock->cycle_last = clocksource_read(clock);
921 timekeeping_suspended = 0;
922 write_sequnlock_irqrestore(&xtime_lock, flags);
926 static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
930 write_seqlock_irqsave(&xtime_lock, flags);
931 timekeeping_suspended = 1;
932 write_sequnlock_irqrestore(&xtime_lock, flags);
936 /* sysfs resume/suspend bits for timekeeping */
937 static struct sysdev_class timekeeping_sysclass = {
938 .resume = timekeeping_resume,
939 .suspend = timekeeping_suspend,
940 set_kset_name("timekeeping"),
943 static struct sys_device device_timer = {
945 .cls = &timekeeping_sysclass,
948 static int __init timekeeping_init_device(void)
950 int error = sysdev_class_register(&timekeeping_sysclass);
952 error = sysdev_register(&device_timer);
956 device_initcall(timekeeping_init_device);
959 * If the error is already larger, we look ahead even further
960 * to compensate for late or lost adjustments.
962 static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
970 * Use the current error value to determine how much to look ahead.
971 * The larger the error the slower we adjust for it to avoid problems
972 * with losing too many ticks, otherwise we would overadjust and
973 * produce an even larger error. The smaller the adjustment the
974 * faster we try to adjust for it, as lost ticks can do less harm
975 * here. This is tuned so that an error of about 1 msec is adusted
976 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
978 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
979 error2 = abs(error2);
980 for (look_ahead = 0; error2 > 0; look_ahead++)
984 * Now calculate the error in (1 << look_ahead) ticks, but first
985 * remove the single look ahead already included in the error.
987 tick_error = current_tick_length() >>
988 (TICK_LENGTH_SHIFT - clock->shift + 1);
989 tick_error -= clock->xtime_interval >> 1;
990 error = ((error - tick_error) >> look_ahead) + tick_error;
992 /* Finally calculate the adjustment shift value. */
997 *interval = -*interval;
1001 for (adj = 0; error > i; adj++)
1010 * Adjust the multiplier to reduce the error value,
1011 * this is optimized for the most common adjustments of -1,0,1,
1012 * for other values we can do a bit more work.
1014 static void clocksource_adjust(struct clocksource *clock, s64 offset)
1016 s64 error, interval = clock->cycle_interval;
1019 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
1020 if (error > interval) {
1022 if (likely(error <= interval))
1025 adj = clocksource_bigadjust(error, &interval, &offset);
1026 } else if (error < -interval) {
1028 if (likely(error >= -interval)) {
1030 interval = -interval;
1033 adj = clocksource_bigadjust(error, &interval, &offset);
1038 clock->xtime_interval += interval;
1039 clock->xtime_nsec -= offset;
1040 clock->error -= (interval - offset) <<
1041 (TICK_LENGTH_SHIFT - clock->shift);
1045 * update_wall_time - Uses the current clocksource to increment the wall time
1047 * Called from the timer interrupt, must hold a write on xtime_lock.
1049 static void update_wall_time(void)
1053 /* Make sure we're fully resumed: */
1054 if (unlikely(timekeeping_suspended))
1057 #ifdef CONFIG_GENERIC_TIME
1058 offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
1060 offset = clock->cycle_interval;
1062 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
1064 /* normally this loop will run just once, however in the
1065 * case of lost or late ticks, it will accumulate correctly.
1067 while (offset >= clock->cycle_interval) {
1068 /* accumulate one interval */
1069 clock->xtime_nsec += clock->xtime_interval;
1070 clock->cycle_last += clock->cycle_interval;
1071 offset -= clock->cycle_interval;
1073 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1074 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
1079 /* interpolator bits */
1080 time_interpolator_update(clock->xtime_interval
1083 /* accumulate error between NTP and clock interval */
1084 clock->error += current_tick_length();
1085 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
1088 /* correct the clock when NTP error is too big */
1089 clocksource_adjust(clock, offset);
1091 /* store full nanoseconds into xtime */
1092 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
1093 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
1095 /* check to see if there is a new clocksource to use */
1096 if (change_clocksource()) {
1098 clock->xtime_nsec = 0;
1099 clocksource_calculate_interval(clock, tick_nsec);
1104 * Called from the timer interrupt handler to charge one tick to the current
1105 * process. user_tick is 1 if the tick is user time, 0 for system.
1107 void update_process_times(int user_tick)
1109 struct task_struct *p = current;
1110 int cpu = smp_processor_id();
1112 /* Note: this timer irq context must be accounted for as well. */
1114 account_user_time(p, jiffies_to_cputime(1));
1116 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
1118 if (rcu_pending(cpu))
1119 rcu_check_callbacks(cpu, user_tick);
1121 run_posix_cpu_timers(p);
1125 * Nr of active tasks - counted in fixed-point numbers
1127 static unsigned long count_active_tasks(void)
1129 return nr_active() * FIXED_1;
1133 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1134 * imply that avenrun[] is the standard name for this kind of thing.
1135 * Nothing else seems to be standardized: the fractional size etc
1136 * all seem to differ on different machines.
1138 * Requires xtime_lock to access.
1140 unsigned long avenrun[3];
1142 EXPORT_SYMBOL(avenrun);
1145 * calc_load - given tick count, update the avenrun load estimates.
1146 * This is called while holding a write_lock on xtime_lock.
1148 static inline void calc_load(unsigned long ticks)
1150 unsigned long active_tasks; /* fixed-point */
1151 static int count = LOAD_FREQ;
1154 if (unlikely(count < 0)) {
1155 active_tasks = count_active_tasks();
1157 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1158 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1159 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1161 } while (count < 0);
1166 * This read-write spinlock protects us from races in SMP while
1167 * playing with xtime and avenrun.
1169 #ifndef ARCH_HAVE_XTIME_LOCK
1170 __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
1172 EXPORT_SYMBOL(xtime_lock);
1176 * This function runs timers and the timer-tq in bottom half context.
1178 static void run_timer_softirq(struct softirq_action *h)
1180 tvec_base_t *base = __get_cpu_var(tvec_bases);
1182 hrtimer_run_queues();
1183 if (time_after_eq(jiffies, base->timer_jiffies))
1188 * Called by the local, per-CPU timer interrupt on SMP.
1190 void run_local_timers(void)
1192 raise_softirq(TIMER_SOFTIRQ);
1197 * Called by the timer interrupt. xtime_lock must already be taken
1200 static inline void update_times(unsigned long ticks)
1207 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1208 * without sampling the sequence number in xtime_lock.
1209 * jiffies is defined in the linker script...
1212 void do_timer(unsigned long ticks)
1214 jiffies_64 += ticks;
1215 update_times(ticks);
1218 #ifdef __ARCH_WANT_SYS_ALARM
1221 * For backwards compatibility? This can be done in libc so Alpha
1222 * and all newer ports shouldn't need it.
1224 asmlinkage unsigned long sys_alarm(unsigned int seconds)
1226 return alarm_setitimer(seconds);
1233 * sys_getpid - return the thread group id of the current process
1235 * Note, despite the name, this returns the tgid not the pid. The tgid and
1236 * the pid are identical unless CLONE_THREAD was specified on clone() in
1237 * which case the tgid is the same in all threads of the same group.
1239 * This is SMP safe as current->tgid does not change.
1241 asmlinkage long sys_getpid(void)
1243 return vx_map_tgid(current->tgid);
1247 * Accessing ->parent is not SMP-safe, it could
1248 * change from under us. However, we can use a stale
1249 * value of ->parent under rcu_read_lock(), see
1250 * release_task()->call_rcu(delayed_put_task_struct).
1252 asmlinkage long sys_getppid(void)
1257 pid = rcu_dereference(current->parent)->tgid;
1259 return vx_map_pid(pid);
1265 * The Alpha uses getxpid, getxuid, and getxgid instead.
1268 asmlinkage long do_getxpid(long *ppid)
1270 *ppid = sys_getppid();
1271 return sys_getpid();
1276 asmlinkage long sys_getuid(void)
1278 /* Only we change this so SMP safe */
1279 return current->uid;
1282 asmlinkage long sys_geteuid(void)
1284 /* Only we change this so SMP safe */
1285 return current->euid;
1288 asmlinkage long sys_getgid(void)
1290 /* Only we change this so SMP safe */
1291 return current->gid;
1294 asmlinkage long sys_getegid(void)
1296 /* Only we change this so SMP safe */
1297 return current->egid;
1302 static void process_timeout(unsigned long __data)
1304 wake_up_process((struct task_struct *)__data);
1308 * schedule_timeout - sleep until timeout
1309 * @timeout: timeout value in jiffies
1311 * Make the current task sleep until @timeout jiffies have
1312 * elapsed. The routine will return immediately unless
1313 * the current task state has been set (see set_current_state()).
1315 * You can set the task state as follows -
1317 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1318 * pass before the routine returns. The routine will return 0
1320 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1321 * delivered to the current task. In this case the remaining time
1322 * in jiffies will be returned, or 0 if the timer expired in time
1324 * The current task state is guaranteed to be TASK_RUNNING when this
1327 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1328 * the CPU away without a bound on the timeout. In this case the return
1329 * value will be %MAX_SCHEDULE_TIMEOUT.
1331 * In all cases the return value is guaranteed to be non-negative.
1333 fastcall signed long __sched schedule_timeout(signed long timeout)
1335 struct timer_list timer;
1336 unsigned long expire;
1340 case MAX_SCHEDULE_TIMEOUT:
1342 * These two special cases are useful to be comfortable
1343 * in the caller. Nothing more. We could take
1344 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1345 * but I' d like to return a valid offset (>=0) to allow
1346 * the caller to do everything it want with the retval.
1352 * Another bit of PARANOID. Note that the retval will be
1353 * 0 since no piece of kernel is supposed to do a check
1354 * for a negative retval of schedule_timeout() (since it
1355 * should never happens anyway). You just have the printk()
1356 * that will tell you if something is gone wrong and where.
1359 printk(KERN_ERR "schedule_timeout: wrong timeout "
1360 "value %lx\n", timeout);
1362 current->state = TASK_RUNNING;
1367 expire = timeout + jiffies;
1369 setup_timer(&timer, process_timeout, (unsigned long)current);
1370 __mod_timer(&timer, expire);
1372 del_singleshot_timer_sync(&timer);
1374 timeout = expire - jiffies;
1377 return timeout < 0 ? 0 : timeout;
1379 EXPORT_SYMBOL(schedule_timeout);
1382 * We can use __set_current_state() here because schedule_timeout() calls
1383 * schedule() unconditionally.
1385 signed long __sched schedule_timeout_interruptible(signed long timeout)
1387 __set_current_state(TASK_INTERRUPTIBLE);
1388 return schedule_timeout(timeout);
1390 EXPORT_SYMBOL(schedule_timeout_interruptible);
1392 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1394 __set_current_state(TASK_UNINTERRUPTIBLE);
1395 return schedule_timeout(timeout);
1397 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1399 /* Thread ID - the internal kernel "pid" */
1400 asmlinkage long sys_gettid(void)
1402 return current->pid;
1406 * sys_sysinfo - fill in sysinfo struct
1407 * @info: pointer to buffer to fill
1409 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1412 unsigned long mem_total, sav_total;
1413 unsigned int mem_unit, bitcount;
1416 memset((char *)&val, 0, sizeof(struct sysinfo));
1420 seq = read_seqbegin(&xtime_lock);
1423 * This is annoying. The below is the same thing
1424 * posix_get_clock_monotonic() does, but it wants to
1425 * take the lock which we want to cover the loads stuff
1429 getnstimeofday(&tp);
1430 tp.tv_sec += wall_to_monotonic.tv_sec;
1431 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1432 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1433 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1436 if (vx_flags(VXF_VIRT_UPTIME, 0))
1437 vx_vsi_uptime(&tp, NULL);
1438 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1440 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1441 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1442 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1444 val.procs = nr_threads;
1445 } while (read_seqretry(&xtime_lock, seq));
1451 * If the sum of all the available memory (i.e. ram + swap)
1452 * is less than can be stored in a 32 bit unsigned long then
1453 * we can be binary compatible with 2.2.x kernels. If not,
1454 * well, in that case 2.2.x was broken anyways...
1456 * -Erik Andersen <andersee@debian.org>
1459 mem_total = val.totalram + val.totalswap;
1460 if (mem_total < val.totalram || mem_total < val.totalswap)
1463 mem_unit = val.mem_unit;
1464 while (mem_unit > 1) {
1467 sav_total = mem_total;
1469 if (mem_total < sav_total)
1474 * If mem_total did not overflow, multiply all memory values by
1475 * val.mem_unit and set it to 1. This leaves things compatible
1476 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1481 val.totalram <<= bitcount;
1482 val.freeram <<= bitcount;
1483 val.sharedram <<= bitcount;
1484 val.bufferram <<= bitcount;
1485 val.totalswap <<= bitcount;
1486 val.freeswap <<= bitcount;
1487 val.totalhigh <<= bitcount;
1488 val.freehigh <<= bitcount;
1491 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1498 * lockdep: we want to track each per-CPU base as a separate lock-class,
1499 * but timer-bases are kmalloc()-ed, so we need to attach separate
1502 static struct lock_class_key base_lock_keys[NR_CPUS];
1504 static int __devinit init_timers_cpu(int cpu)
1508 static char __devinitdata tvec_base_done[NR_CPUS];
1510 if (!tvec_base_done[cpu]) {
1511 static char boot_done;
1515 * The APs use this path later in boot
1517 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1521 memset(base, 0, sizeof(*base));
1522 per_cpu(tvec_bases, cpu) = base;
1525 * This is for the boot CPU - we use compile-time
1526 * static initialisation because per-cpu memory isn't
1527 * ready yet and because the memory allocators are not
1528 * initialised either.
1531 base = &boot_tvec_bases;
1533 tvec_base_done[cpu] = 1;
1535 base = per_cpu(tvec_bases, cpu);
1538 spin_lock_init(&base->lock);
1539 lockdep_set_class(&base->lock, base_lock_keys + cpu);
1541 for (j = 0; j < TVN_SIZE; j++) {
1542 INIT_LIST_HEAD(base->tv5.vec + j);
1543 INIT_LIST_HEAD(base->tv4.vec + j);
1544 INIT_LIST_HEAD(base->tv3.vec + j);
1545 INIT_LIST_HEAD(base->tv2.vec + j);
1547 for (j = 0; j < TVR_SIZE; j++)
1548 INIT_LIST_HEAD(base->tv1.vec + j);
1550 base->timer_jiffies = jiffies;
1554 #ifdef CONFIG_HOTPLUG_CPU
1555 static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1557 struct timer_list *timer;
1559 while (!list_empty(head)) {
1560 timer = list_entry(head->next, struct timer_list, entry);
1561 detach_timer(timer, 0);
1562 timer->base = new_base;
1563 internal_add_timer(new_base, timer);
1567 static void __devinit migrate_timers(int cpu)
1569 tvec_base_t *old_base;
1570 tvec_base_t *new_base;
1573 BUG_ON(cpu_online(cpu));
1574 old_base = per_cpu(tvec_bases, cpu);
1575 new_base = get_cpu_var(tvec_bases);
1577 local_irq_disable();
1578 spin_lock(&new_base->lock);
1579 spin_lock(&old_base->lock);
1581 BUG_ON(old_base->running_timer);
1583 for (i = 0; i < TVR_SIZE; i++)
1584 migrate_timer_list(new_base, old_base->tv1.vec + i);
1585 for (i = 0; i < TVN_SIZE; i++) {
1586 migrate_timer_list(new_base, old_base->tv2.vec + i);
1587 migrate_timer_list(new_base, old_base->tv3.vec + i);
1588 migrate_timer_list(new_base, old_base->tv4.vec + i);
1589 migrate_timer_list(new_base, old_base->tv5.vec + i);
1592 spin_unlock(&old_base->lock);
1593 spin_unlock(&new_base->lock);
1595 put_cpu_var(tvec_bases);
1597 #endif /* CONFIG_HOTPLUG_CPU */
1599 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1600 unsigned long action, void *hcpu)
1602 long cpu = (long)hcpu;
1604 case CPU_UP_PREPARE:
1605 if (init_timers_cpu(cpu) < 0)
1608 #ifdef CONFIG_HOTPLUG_CPU
1610 migrate_timers(cpu);
1619 static struct notifier_block __cpuinitdata timers_nb = {
1620 .notifier_call = timer_cpu_notify,
1624 void __init init_timers(void)
1626 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1627 (void *)(long)smp_processor_id());
1629 BUG_ON(err == NOTIFY_BAD);
1630 register_cpu_notifier(&timers_nb);
1631 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1634 #ifdef CONFIG_TIME_INTERPOLATION
1636 struct time_interpolator *time_interpolator __read_mostly;
1637 static struct time_interpolator *time_interpolator_list __read_mostly;
1638 static DEFINE_SPINLOCK(time_interpolator_lock);
1640 static inline u64 time_interpolator_get_cycles(unsigned int src)
1642 unsigned long (*x)(void);
1646 case TIME_SOURCE_FUNCTION:
1647 x = time_interpolator->addr;
1650 case TIME_SOURCE_MMIO64 :
1651 return readq_relaxed((void __iomem *)time_interpolator->addr);
1653 case TIME_SOURCE_MMIO32 :
1654 return readl_relaxed((void __iomem *)time_interpolator->addr);
1656 default: return get_cycles();
1660 static inline u64 time_interpolator_get_counter(int writelock)
1662 unsigned int src = time_interpolator->source;
1664 if (time_interpolator->jitter)
1670 lcycle = time_interpolator->last_cycle;
1671 now = time_interpolator_get_cycles(src);
1672 if (lcycle && time_after(lcycle, now))
1675 /* When holding the xtime write lock, there's no need
1676 * to add the overhead of the cmpxchg. Readers are
1677 * force to retry until the write lock is released.
1680 time_interpolator->last_cycle = now;
1683 /* Keep track of the last timer value returned. The use of cmpxchg here
1684 * will cause contention in an SMP environment.
1686 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1690 return time_interpolator_get_cycles(src);
1693 void time_interpolator_reset(void)
1695 time_interpolator->offset = 0;
1696 time_interpolator->last_counter = time_interpolator_get_counter(1);
1699 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1701 unsigned long time_interpolator_get_offset(void)
1703 /* If we do not have a time interpolator set up then just return zero */
1704 if (!time_interpolator)
1707 return time_interpolator->offset +
1708 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1711 #define INTERPOLATOR_ADJUST 65536
1712 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1714 void time_interpolator_update(long delta_nsec)
1717 unsigned long offset;
1719 /* If there is no time interpolator set up then do nothing */
1720 if (!time_interpolator)
1724 * The interpolator compensates for late ticks by accumulating the late
1725 * time in time_interpolator->offset. A tick earlier than expected will
1726 * lead to a reset of the offset and a corresponding jump of the clock
1727 * forward. Again this only works if the interpolator clock is running
1728 * slightly slower than the regular clock and the tuning logic insures
1732 counter = time_interpolator_get_counter(1);
1733 offset = time_interpolator->offset +
1734 GET_TI_NSECS(counter, time_interpolator);
1736 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1737 time_interpolator->offset = offset - delta_nsec;
1739 time_interpolator->skips++;
1740 time_interpolator->ns_skipped += delta_nsec - offset;
1741 time_interpolator->offset = 0;
1743 time_interpolator->last_counter = counter;
1745 /* Tuning logic for time interpolator invoked every minute or so.
1746 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1747 * Increase interpolator clock speed if we skip too much time.
1749 if (jiffies % INTERPOLATOR_ADJUST == 0)
1751 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1752 time_interpolator->nsec_per_cyc--;
1753 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1754 time_interpolator->nsec_per_cyc++;
1755 time_interpolator->skips = 0;
1756 time_interpolator->ns_skipped = 0;
1761 is_better_time_interpolator(struct time_interpolator *new)
1763 if (!time_interpolator)
1765 return new->frequency > 2*time_interpolator->frequency ||
1766 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1770 register_time_interpolator(struct time_interpolator *ti)
1772 unsigned long flags;
1775 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1777 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1778 spin_lock(&time_interpolator_lock);
1779 write_seqlock_irqsave(&xtime_lock, flags);
1780 if (is_better_time_interpolator(ti)) {
1781 time_interpolator = ti;
1782 time_interpolator_reset();
1784 write_sequnlock_irqrestore(&xtime_lock, flags);
1786 ti->next = time_interpolator_list;
1787 time_interpolator_list = ti;
1788 spin_unlock(&time_interpolator_lock);
1792 unregister_time_interpolator(struct time_interpolator *ti)
1794 struct time_interpolator *curr, **prev;
1795 unsigned long flags;
1797 spin_lock(&time_interpolator_lock);
1798 prev = &time_interpolator_list;
1799 for (curr = *prev; curr; curr = curr->next) {
1807 write_seqlock_irqsave(&xtime_lock, flags);
1808 if (ti == time_interpolator) {
1809 /* we lost the best time-interpolator: */
1810 time_interpolator = NULL;
1811 /* find the next-best interpolator */
1812 for (curr = time_interpolator_list; curr; curr = curr->next)
1813 if (is_better_time_interpolator(curr))
1814 time_interpolator = curr;
1815 time_interpolator_reset();
1817 write_sequnlock_irqrestore(&xtime_lock, flags);
1818 spin_unlock(&time_interpolator_lock);
1820 #endif /* CONFIG_TIME_INTERPOLATION */
1823 * msleep - sleep safely even with waitqueue interruptions
1824 * @msecs: Time in milliseconds to sleep for
1826 void msleep(unsigned int msecs)
1828 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1831 timeout = schedule_timeout_uninterruptible(timeout);
1834 EXPORT_SYMBOL(msleep);
1837 * msleep_interruptible - sleep waiting for signals
1838 * @msecs: Time in milliseconds to sleep for
1840 unsigned long msleep_interruptible(unsigned int msecs)
1842 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1844 while (timeout && !signal_pending(current))
1845 timeout = schedule_timeout_interruptible(timeout);
1846 return jiffies_to_msecs(timeout);
1849 EXPORT_SYMBOL(msleep_interruptible);