4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/syscalls.h>
36 #include <linux/delay.h>
37 #include <linux/vs_base.h>
38 #include <linux/vs_cvirt.h>
39 #include <linux/vserver/sched.h>
41 #include <asm/uaccess.h>
42 #include <asm/unistd.h>
43 #include <asm/div64.h>
44 #include <asm/timex.h>
47 #ifdef CONFIG_TIME_INTERPOLATION
48 static void time_interpolator_update(long delta_nsec);
50 #define time_interpolator_update(x)
53 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
55 EXPORT_SYMBOL(jiffies_64);
58 * per-CPU timer vector definitions:
61 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
62 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
63 #define TVN_SIZE (1 << TVN_BITS)
64 #define TVR_SIZE (1 << TVR_BITS)
65 #define TVN_MASK (TVN_SIZE - 1)
66 #define TVR_MASK (TVR_SIZE - 1)
70 struct timer_list *running_timer;
73 typedef struct tvec_s {
74 struct list_head vec[TVN_SIZE];
77 typedef struct tvec_root_s {
78 struct list_head vec[TVR_SIZE];
81 struct tvec_t_base_s {
82 struct timer_base_s t_base;
83 unsigned long timer_jiffies;
89 } ____cacheline_aligned_in_smp;
91 typedef struct tvec_t_base_s tvec_base_t;
92 static DEFINE_PER_CPU(tvec_base_t, tvec_bases);
94 static inline void set_running_timer(tvec_base_t *base,
95 struct timer_list *timer)
98 base->t_base.running_timer = timer;
102 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
104 unsigned long expires = timer->expires;
105 unsigned long idx = expires - base->timer_jiffies;
106 struct list_head *vec;
108 if (idx < TVR_SIZE) {
109 int i = expires & TVR_MASK;
110 vec = base->tv1.vec + i;
111 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
112 int i = (expires >> TVR_BITS) & TVN_MASK;
113 vec = base->tv2.vec + i;
114 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
115 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
116 vec = base->tv3.vec + i;
117 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
118 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
119 vec = base->tv4.vec + i;
120 } else if ((signed long) idx < 0) {
122 * Can happen if you add a timer with expires == jiffies,
123 * or you set a timer to go off in the past
125 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
128 /* If the timeout is larger than 0xffffffff on 64-bit
129 * architectures then we use the maximum timeout:
131 if (idx > 0xffffffffUL) {
133 expires = idx + base->timer_jiffies;
135 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
136 vec = base->tv5.vec + i;
141 list_add_tail(&timer->entry, vec);
144 typedef struct timer_base_s timer_base_t;
146 * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
147 * at compile time, and we need timer->base to lock the timer.
149 timer_base_t __init_timer_base
150 ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
151 EXPORT_SYMBOL(__init_timer_base);
154 * init_timer - initialize a timer.
155 * @timer: the timer to be initialized
157 * init_timer() must be done to a timer prior calling *any* of the
158 * other timer functions.
160 void fastcall init_timer(struct timer_list *timer)
162 timer->entry.next = NULL;
163 timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base;
165 EXPORT_SYMBOL(init_timer);
167 static inline void detach_timer(struct timer_list *timer,
170 struct list_head *entry = &timer->entry;
172 __list_del(entry->prev, entry->next);
175 entry->prev = LIST_POISON2;
179 * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
180 * means that all timers which are tied to this base via timer->base are
181 * locked, and the base itself is locked too.
183 * So __run_timers/migrate_timers can safely modify all timers which could
184 * be found on ->tvX lists.
186 * When the timer's base is locked, and the timer removed from list, it is
187 * possible to set timer->base = NULL and drop the lock: the timer remains
190 static timer_base_t *lock_timer_base(struct timer_list *timer,
191 unsigned long *flags)
197 if (likely(base != NULL)) {
198 spin_lock_irqsave(&base->lock, *flags);
199 if (likely(base == timer->base))
201 /* The timer has migrated to another CPU */
202 spin_unlock_irqrestore(&base->lock, *flags);
208 int __mod_timer(struct timer_list *timer, unsigned long expires)
211 tvec_base_t *new_base;
215 BUG_ON(!timer->function);
217 base = lock_timer_base(timer, &flags);
219 if (timer_pending(timer)) {
220 detach_timer(timer, 0);
224 new_base = &__get_cpu_var(tvec_bases);
226 if (base != &new_base->t_base) {
228 * We are trying to schedule the timer on the local CPU.
229 * However we can't change timer's base while it is running,
230 * otherwise del_timer_sync() can't detect that the timer's
231 * handler yet has not finished. This also guarantees that
232 * the timer is serialized wrt itself.
234 if (unlikely(base->running_timer == timer)) {
235 /* The timer remains on a former base */
236 new_base = container_of(base, tvec_base_t, t_base);
238 /* See the comment in lock_timer_base() */
240 spin_unlock(&base->lock);
241 spin_lock(&new_base->t_base.lock);
242 timer->base = &new_base->t_base;
246 timer->expires = expires;
247 internal_add_timer(new_base, timer);
248 spin_unlock_irqrestore(&new_base->t_base.lock, flags);
253 EXPORT_SYMBOL(__mod_timer);
256 * add_timer_on - start a timer on a particular CPU
257 * @timer: the timer to be added
258 * @cpu: the CPU to start it on
260 * This is not very scalable on SMP. Double adds are not possible.
262 void add_timer_on(struct timer_list *timer, int cpu)
264 tvec_base_t *base = &per_cpu(tvec_bases, cpu);
267 BUG_ON(timer_pending(timer) || !timer->function);
268 spin_lock_irqsave(&base->t_base.lock, flags);
269 timer->base = &base->t_base;
270 internal_add_timer(base, timer);
271 spin_unlock_irqrestore(&base->t_base.lock, flags);
276 * mod_timer - modify a timer's timeout
277 * @timer: the timer to be modified
279 * mod_timer is a more efficient way to update the expire field of an
280 * active timer (if the timer is inactive it will be activated)
282 * mod_timer(timer, expires) is equivalent to:
284 * del_timer(timer); timer->expires = expires; add_timer(timer);
286 * Note that if there are multiple unserialized concurrent users of the
287 * same timer, then mod_timer() is the only safe way to modify the timeout,
288 * since add_timer() cannot modify an already running timer.
290 * The function returns whether it has modified a pending timer or not.
291 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
292 * active timer returns 1.)
294 int mod_timer(struct timer_list *timer, unsigned long expires)
296 BUG_ON(!timer->function);
299 * This is a common optimization triggered by the
300 * networking code - if the timer is re-modified
301 * to be the same thing then just return:
303 if (timer->expires == expires && timer_pending(timer))
306 return __mod_timer(timer, expires);
309 EXPORT_SYMBOL(mod_timer);
312 * del_timer - deactive a timer.
313 * @timer: the timer to be deactivated
315 * del_timer() deactivates a timer - this works on both active and inactive
318 * The function returns whether it has deactivated a pending timer or not.
319 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
320 * active timer returns 1.)
322 int del_timer(struct timer_list *timer)
328 if (timer_pending(timer)) {
329 base = lock_timer_base(timer, &flags);
330 if (timer_pending(timer)) {
331 detach_timer(timer, 1);
334 spin_unlock_irqrestore(&base->lock, flags);
340 EXPORT_SYMBOL(del_timer);
344 * This function tries to deactivate a timer. Upon successful (ret >= 0)
345 * exit the timer is not queued and the handler is not running on any CPU.
347 * It must not be called from interrupt contexts.
349 int try_to_del_timer_sync(struct timer_list *timer)
355 base = lock_timer_base(timer, &flags);
357 if (base->running_timer == timer)
361 if (timer_pending(timer)) {
362 detach_timer(timer, 1);
366 spin_unlock_irqrestore(&base->lock, flags);
372 * del_timer_sync - deactivate a timer and wait for the handler to finish.
373 * @timer: the timer to be deactivated
375 * This function only differs from del_timer() on SMP: besides deactivating
376 * the timer it also makes sure the handler has finished executing on other
379 * Synchronization rules: callers must prevent restarting of the timer,
380 * otherwise this function is meaningless. It must not be called from
381 * interrupt contexts. The caller must not hold locks which would prevent
382 * completion of the timer's handler. The timer's handler must not call
383 * add_timer_on(). Upon exit the timer is not queued and the handler is
384 * not running on any CPU.
386 * The function returns whether it has deactivated a pending timer or not.
388 int del_timer_sync(struct timer_list *timer)
391 int ret = try_to_del_timer_sync(timer);
397 EXPORT_SYMBOL(del_timer_sync);
400 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
402 /* cascade all the timers from tv up one level */
403 struct list_head *head, *curr;
405 head = tv->vec + index;
408 * We are removing _all_ timers from the list, so we don't have to
409 * detach them individually, just clear the list afterwards.
411 while (curr != head) {
412 struct timer_list *tmp;
414 tmp = list_entry(curr, struct timer_list, entry);
415 BUG_ON(tmp->base != &base->t_base);
417 internal_add_timer(base, tmp);
419 INIT_LIST_HEAD(head);
425 * __run_timers - run all expired timers (if any) on this CPU.
426 * @base: the timer vector to be processed.
428 * This function cascades all vectors and executes all expired timer
431 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
433 static inline void __run_timers(tvec_base_t *base)
435 struct timer_list *timer;
437 spin_lock_irq(&base->t_base.lock);
438 while (time_after_eq(jiffies, base->timer_jiffies)) {
439 struct list_head work_list = LIST_HEAD_INIT(work_list);
440 struct list_head *head = &work_list;
441 int index = base->timer_jiffies & TVR_MASK;
447 (!cascade(base, &base->tv2, INDEX(0))) &&
448 (!cascade(base, &base->tv3, INDEX(1))) &&
449 !cascade(base, &base->tv4, INDEX(2)))
450 cascade(base, &base->tv5, INDEX(3));
451 ++base->timer_jiffies;
452 list_splice_init(base->tv1.vec + index, &work_list);
453 while (!list_empty(head)) {
454 void (*fn)(unsigned long);
457 timer = list_entry(head->next,struct timer_list,entry);
458 fn = timer->function;
461 set_running_timer(base, timer);
462 detach_timer(timer, 1);
463 spin_unlock_irq(&base->t_base.lock);
465 int preempt_count = preempt_count();
467 if (preempt_count != preempt_count()) {
468 printk(KERN_WARNING "huh, entered %p "
469 "with preempt_count %08x, exited"
476 spin_lock_irq(&base->t_base.lock);
479 set_running_timer(base, NULL);
480 spin_unlock_irq(&base->t_base.lock);
483 #ifdef CONFIG_NO_IDLE_HZ
485 * Find out when the next timer event is due to happen. This
486 * is used on S/390 to stop all activity when a cpus is idle.
487 * This functions needs to be called disabled.
489 unsigned long next_timer_interrupt(void)
492 struct list_head *list;
493 struct timer_list *nte;
494 unsigned long expires;
495 unsigned long hr_expires = MAX_JIFFY_OFFSET;
500 hr_delta = hrtimer_get_next_event();
501 if (hr_delta.tv64 != KTIME_MAX) {
502 struct timespec tsdelta;
503 tsdelta = ktime_to_timespec(hr_delta);
504 hr_expires = timespec_to_jiffies(&tsdelta);
506 return hr_expires + jiffies;
508 hr_expires += jiffies;
510 base = &__get_cpu_var(tvec_bases);
511 spin_lock(&base->t_base.lock);
512 expires = base->timer_jiffies + (LONG_MAX >> 1);
515 /* Look for timer events in tv1. */
516 j = base->timer_jiffies & TVR_MASK;
518 list_for_each_entry(nte, base->tv1.vec + j, entry) {
519 expires = nte->expires;
520 if (j < (base->timer_jiffies & TVR_MASK))
521 list = base->tv2.vec + (INDEX(0));
524 j = (j + 1) & TVR_MASK;
525 } while (j != (base->timer_jiffies & TVR_MASK));
528 varray[0] = &base->tv2;
529 varray[1] = &base->tv3;
530 varray[2] = &base->tv4;
531 varray[3] = &base->tv5;
532 for (i = 0; i < 4; i++) {
535 if (list_empty(varray[i]->vec + j)) {
536 j = (j + 1) & TVN_MASK;
539 list_for_each_entry(nte, varray[i]->vec + j, entry)
540 if (time_before(nte->expires, expires))
541 expires = nte->expires;
542 if (j < (INDEX(i)) && i < 3)
543 list = varray[i + 1]->vec + (INDEX(i + 1));
545 } while (j != (INDEX(i)));
550 * The search wrapped. We need to look at the next list
551 * from next tv element that would cascade into tv element
552 * where we found the timer element.
554 list_for_each_entry(nte, list, entry) {
555 if (time_before(nte->expires, expires))
556 expires = nte->expires;
559 spin_unlock(&base->t_base.lock);
561 if (time_before(hr_expires, expires))
568 /******************************************************************/
571 * Timekeeping variables
573 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
574 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
578 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
579 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
580 * at zero at system boot time, so wall_to_monotonic will be negative,
581 * however, we will ALWAYS keep the tv_nsec part positive so we can use
582 * the usual normalization.
584 struct timespec xtime __attribute__ ((aligned (16)));
585 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
587 EXPORT_SYMBOL(xtime);
589 /* Don't completely fail for HZ > 500. */
590 int tickadj = 500/HZ ? : 1; /* microsecs */
594 * phase-lock loop variables
596 /* TIME_ERROR prevents overwriting the CMOS clock */
597 int time_state = TIME_OK; /* clock synchronization status */
598 int time_status = STA_UNSYNC; /* clock status bits */
599 long time_offset; /* time adjustment (us) */
600 long time_constant = 2; /* pll time constant */
601 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
602 long time_precision = 1; /* clock precision (us) */
603 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
604 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
605 static long time_phase; /* phase offset (scaled us) */
606 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
607 /* frequency offset (scaled ppm)*/
608 static long time_adj; /* tick adjust (scaled 1 / HZ) */
609 long time_reftime; /* time at last adjustment (s) */
611 long time_next_adjust;
614 * this routine handles the overflow of the microsecond field
616 * The tricky bits of code to handle the accurate clock support
617 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
618 * They were originally developed for SUN and DEC kernels.
619 * All the kudos should go to Dave for this stuff.
622 static void second_overflow(void)
626 /* Bump the maxerror field */
627 time_maxerror += time_tolerance >> SHIFT_USEC;
628 if (time_maxerror > NTP_PHASE_LIMIT) {
629 time_maxerror = NTP_PHASE_LIMIT;
630 time_status |= STA_UNSYNC;
634 * Leap second processing. If in leap-insert state at the end of the
635 * day, the system clock is set back one second; if in leap-delete
636 * state, the system clock is set ahead one second. The microtime()
637 * routine or external clock driver will insure that reported time is
638 * always monotonic. The ugly divides should be replaced.
640 switch (time_state) {
642 if (time_status & STA_INS)
643 time_state = TIME_INS;
644 else if (time_status & STA_DEL)
645 time_state = TIME_DEL;
648 if (xtime.tv_sec % 86400 == 0) {
650 wall_to_monotonic.tv_sec++;
652 * The timer interpolator will make time change
653 * gradually instead of an immediate jump by one second
655 time_interpolator_update(-NSEC_PER_SEC);
656 time_state = TIME_OOP;
658 printk(KERN_NOTICE "Clock: inserting leap second "
663 if ((xtime.tv_sec + 1) % 86400 == 0) {
665 wall_to_monotonic.tv_sec--;
667 * Use of time interpolator for a gradual change of
670 time_interpolator_update(NSEC_PER_SEC);
671 time_state = TIME_WAIT;
673 printk(KERN_NOTICE "Clock: deleting leap second "
678 time_state = TIME_WAIT;
681 if (!(time_status & (STA_INS | STA_DEL)))
682 time_state = TIME_OK;
686 * Compute the phase adjustment for the next second. In PLL mode, the
687 * offset is reduced by a fixed factor times the time constant. In FLL
688 * mode the offset is used directly. In either mode, the maximum phase
689 * adjustment for each second is clamped so as to spread the adjustment
690 * over not more than the number of seconds between updates.
693 if (!(time_status & STA_FLL))
694 ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
695 ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
696 ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
697 time_offset -= ltemp;
698 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
701 * Compute the frequency estimate and additional phase adjustment due
702 * to frequency error for the next second. When the PPS signal is
703 * engaged, gnaw on the watchdog counter and update the frequency
704 * computed by the pll and the PPS signal.
707 if (pps_valid == PPS_VALID) { /* PPS signal lost */
708 pps_jitter = MAXTIME;
709 pps_stabil = MAXFREQ;
710 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
711 STA_PPSWANDER | STA_PPSERROR);
713 ltemp = time_freq + pps_freq;
714 time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
718 * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
719 * get 128.125; => only 0.125% error (p. 14)
721 time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
725 * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
726 * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
728 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
732 * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
733 * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
735 time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
740 * Returns how many microseconds we need to add to xtime this tick
741 * in doing an adjustment requested with adjtime.
743 static long adjtime_adjustment(void)
745 long time_adjust_step;
747 time_adjust_step = time_adjust;
748 if (time_adjust_step) {
750 * We are doing an adjtime thing. Prepare time_adjust_step to
751 * be within bounds. Note that a positive time_adjust means we
752 * want the clock to run faster.
754 * Limit the amount of the step to be in the range
755 * -tickadj .. +tickadj
757 time_adjust_step = min(time_adjust_step, (long)tickadj);
758 time_adjust_step = max(time_adjust_step, (long)-tickadj);
760 return time_adjust_step;
763 /* in the NTP reference this is called "hardclock()" */
764 static void update_wall_time_one_tick(void)
766 long time_adjust_step, delta_nsec;
768 time_adjust_step = adjtime_adjustment();
769 if (time_adjust_step)
770 /* Reduce by this step the amount of time left */
771 time_adjust -= time_adjust_step;
772 delta_nsec = tick_nsec + time_adjust_step * 1000;
774 * Advance the phase, once it gets to one microsecond, then
775 * advance the tick more.
777 time_phase += time_adj;
778 if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
779 long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
780 time_phase -= ltemp << (SHIFT_SCALE - 10);
783 xtime.tv_nsec += delta_nsec;
784 time_interpolator_update(delta_nsec);
786 /* Changes by adjtime() do not take effect till next tick. */
787 if (time_next_adjust != 0) {
788 time_adjust = time_next_adjust;
789 time_next_adjust = 0;
794 * Return how long ticks are at the moment, that is, how much time
795 * update_wall_time_one_tick will add to xtime next time we call it
796 * (assuming no calls to do_adjtimex in the meantime).
797 * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
798 * bits to the right of the binary point.
799 * This function has no side-effects.
801 u64 current_tick_length(void)
805 delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
806 return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
810 * Using a loop looks inefficient, but "ticks" is
811 * usually just one (we shouldn't be losing ticks,
812 * we're doing this this way mainly for interrupt
813 * latency reasons, not because we think we'll
814 * have lots of lost timer ticks
816 static void update_wall_time(unsigned long ticks)
820 update_wall_time_one_tick();
821 if (xtime.tv_nsec >= 1000000000) {
822 xtime.tv_nsec -= 1000000000;
830 * Called from the timer interrupt handler to charge one tick to the current
831 * process. user_tick is 1 if the tick is user time, 0 for system.
833 void update_process_times(int user_tick)
835 struct task_struct *p = current;
836 int cpu = smp_processor_id();
838 /* Note: this timer irq context must be accounted for as well. */
840 account_user_time(p, jiffies_to_cputime(1));
842 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
844 if (rcu_pending(cpu))
845 rcu_check_callbacks(cpu, user_tick);
847 run_posix_cpu_timers(p);
851 * Nr of active tasks - counted in fixed-point numbers
853 static unsigned long count_active_tasks(void)
855 return (nr_running() + nr_uninterruptible()) * FIXED_1;
859 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
860 * imply that avenrun[] is the standard name for this kind of thing.
861 * Nothing else seems to be standardized: the fractional size etc
862 * all seem to differ on different machines.
864 * Requires xtime_lock to access.
866 unsigned long avenrun[3];
868 EXPORT_SYMBOL(avenrun);
871 * calc_load - given tick count, update the avenrun load estimates.
872 * This is called while holding a write_lock on xtime_lock.
874 static inline void calc_load(unsigned long ticks)
876 unsigned long active_tasks; /* fixed-point */
877 static int count = LOAD_FREQ;
882 active_tasks = count_active_tasks();
883 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
884 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
885 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
889 /* jiffies at the most recent update of wall time */
890 unsigned long wall_jiffies = INITIAL_JIFFIES;
893 * This read-write spinlock protects us from races in SMP while
894 * playing with xtime and avenrun.
896 #ifndef ARCH_HAVE_XTIME_LOCK
897 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
899 EXPORT_SYMBOL(xtime_lock);
903 * This function runs timers and the timer-tq in bottom half context.
905 static void run_timer_softirq(struct softirq_action *h)
907 tvec_base_t *base = &__get_cpu_var(tvec_bases);
909 hrtimer_run_queues();
910 if (time_after_eq(jiffies, base->timer_jiffies))
915 * Called by the local, per-CPU timer interrupt on SMP.
917 void run_local_timers(void)
919 raise_softirq(TIMER_SOFTIRQ);
923 * Called by the timer interrupt. xtime_lock must already be taken
926 static inline void update_times(void)
930 ticks = jiffies - wall_jiffies;
932 wall_jiffies += ticks;
933 update_wall_time(ticks);
939 * The 64-bit jiffies value is not atomic - you MUST NOT read it
940 * without sampling the sequence number in xtime_lock.
941 * jiffies is defined in the linker script...
944 void do_timer(struct pt_regs *regs)
947 /* prevent loading jiffies before storing new jiffies_64 value. */
950 softlockup_tick(regs);
953 #ifdef __ARCH_WANT_SYS_ALARM
956 * For backwards compatibility? This can be done in libc so Alpha
957 * and all newer ports shouldn't need it.
959 asmlinkage unsigned long sys_alarm(unsigned int seconds)
961 struct itimerval it_new, it_old;
962 unsigned int oldalarm;
964 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
965 it_new.it_value.tv_sec = seconds;
966 it_new.it_value.tv_usec = 0;
967 do_setitimer(ITIMER_REAL, &it_new, &it_old);
968 oldalarm = it_old.it_value.tv_sec;
969 /* ehhh.. We can't return 0 if we have an alarm pending.. */
970 /* And we'd better return too much than too little anyway */
971 if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
980 * sys_getpid - return the thread group id of the current process
982 * Note, despite the name, this returns the tgid not the pid. The tgid and
983 * the pid are identical unless CLONE_THREAD was specified on clone() in
984 * which case the tgid is the same in all threads of the same group.
986 * This is SMP safe as current->tgid does not change.
988 asmlinkage long sys_getpid(void)
990 return vx_map_tgid(current->tgid);
994 * Accessing ->real_parent is not SMP-safe, it could
995 * change from under us. However, we can use a stale
996 * value of ->real_parent under rcu_read_lock(), see
997 * release_task()->call_rcu(delayed_put_task_struct).
999 asmlinkage long sys_getppid(void)
1004 pid = rcu_dereference(current->real_parent)->tgid;
1007 return vx_map_pid(pid);
1013 * The Alpha uses getxpid, getxuid, and getxgid instead.
1016 asmlinkage long do_getxpid(long *ppid)
1018 *ppid = sys_getppid();
1019 return sys_getpid();
1024 asmlinkage long sys_getuid(void)
1026 /* Only we change this so SMP safe */
1027 return current->uid;
1030 asmlinkage long sys_geteuid(void)
1032 /* Only we change this so SMP safe */
1033 return current->euid;
1036 asmlinkage long sys_getgid(void)
1038 /* Only we change this so SMP safe */
1039 return current->gid;
1042 asmlinkage long sys_getegid(void)
1044 /* Only we change this so SMP safe */
1045 return current->egid;
1050 static void process_timeout(unsigned long __data)
1052 wake_up_process((task_t *)__data);
1056 * schedule_timeout - sleep until timeout
1057 * @timeout: timeout value in jiffies
1059 * Make the current task sleep until @timeout jiffies have
1060 * elapsed. The routine will return immediately unless
1061 * the current task state has been set (see set_current_state()).
1063 * You can set the task state as follows -
1065 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1066 * pass before the routine returns. The routine will return 0
1068 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1069 * delivered to the current task. In this case the remaining time
1070 * in jiffies will be returned, or 0 if the timer expired in time
1072 * The current task state is guaranteed to be TASK_RUNNING when this
1075 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1076 * the CPU away without a bound on the timeout. In this case the return
1077 * value will be %MAX_SCHEDULE_TIMEOUT.
1079 * In all cases the return value is guaranteed to be non-negative.
1081 fastcall signed long __sched schedule_timeout(signed long timeout)
1083 struct timer_list timer;
1084 unsigned long expire;
1088 case MAX_SCHEDULE_TIMEOUT:
1090 * These two special cases are useful to be comfortable
1091 * in the caller. Nothing more. We could take
1092 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1093 * but I' d like to return a valid offset (>=0) to allow
1094 * the caller to do everything it want with the retval.
1100 * Another bit of PARANOID. Note that the retval will be
1101 * 0 since no piece of kernel is supposed to do a check
1102 * for a negative retval of schedule_timeout() (since it
1103 * should never happens anyway). You just have the printk()
1104 * that will tell you if something is gone wrong and where.
1108 printk(KERN_ERR "schedule_timeout: wrong timeout "
1109 "value %lx from %p\n", timeout,
1110 __builtin_return_address(0));
1111 current->state = TASK_RUNNING;
1116 expire = timeout + jiffies;
1118 setup_timer(&timer, process_timeout, (unsigned long)current);
1119 __mod_timer(&timer, expire);
1121 del_singleshot_timer_sync(&timer);
1123 timeout = expire - jiffies;
1126 return timeout < 0 ? 0 : timeout;
1128 EXPORT_SYMBOL(schedule_timeout);
1131 * We can use __set_current_state() here because schedule_timeout() calls
1132 * schedule() unconditionally.
1134 signed long __sched schedule_timeout_interruptible(signed long timeout)
1136 __set_current_state(TASK_INTERRUPTIBLE);
1137 return schedule_timeout(timeout);
1139 EXPORT_SYMBOL(schedule_timeout_interruptible);
1141 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1143 __set_current_state(TASK_UNINTERRUPTIBLE);
1144 return schedule_timeout(timeout);
1146 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1148 /* Thread ID - the internal kernel "pid" */
1149 asmlinkage long sys_gettid(void)
1151 return current->pid;
1155 * sys_sysinfo - fill in sysinfo struct
1157 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1160 unsigned long mem_total, sav_total;
1161 unsigned int mem_unit, bitcount;
1164 memset((char *)&val, 0, sizeof(struct sysinfo));
1168 seq = read_seqbegin(&xtime_lock);
1171 * This is annoying. The below is the same thing
1172 * posix_get_clock_monotonic() does, but it wants to
1173 * take the lock which we want to cover the loads stuff
1177 getnstimeofday(&tp);
1178 tp.tv_sec += wall_to_monotonic.tv_sec;
1179 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1180 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1181 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1184 if (vx_flags(VXF_VIRT_UPTIME, 0))
1185 vx_vsi_uptime(&tp, NULL);
1186 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1188 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1189 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1190 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1192 val.procs = nr_threads;
1193 } while (read_seqretry(&xtime_lock, seq));
1199 * If the sum of all the available memory (i.e. ram + swap)
1200 * is less than can be stored in a 32 bit unsigned long then
1201 * we can be binary compatible with 2.2.x kernels. If not,
1202 * well, in that case 2.2.x was broken anyways...
1204 * -Erik Andersen <andersee@debian.org>
1207 mem_total = val.totalram + val.totalswap;
1208 if (mem_total < val.totalram || mem_total < val.totalswap)
1211 mem_unit = val.mem_unit;
1212 while (mem_unit > 1) {
1215 sav_total = mem_total;
1217 if (mem_total < sav_total)
1222 * If mem_total did not overflow, multiply all memory values by
1223 * val.mem_unit and set it to 1. This leaves things compatible
1224 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1229 val.totalram <<= bitcount;
1230 val.freeram <<= bitcount;
1231 val.sharedram <<= bitcount;
1232 val.bufferram <<= bitcount;
1233 val.totalswap <<= bitcount;
1234 val.freeswap <<= bitcount;
1235 val.totalhigh <<= bitcount;
1236 val.freehigh <<= bitcount;
1239 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1245 static void __devinit init_timers_cpu(int cpu)
1250 base = &per_cpu(tvec_bases, cpu);
1251 spin_lock_init(&base->t_base.lock);
1252 for (j = 0; j < TVN_SIZE; j++) {
1253 INIT_LIST_HEAD(base->tv5.vec + j);
1254 INIT_LIST_HEAD(base->tv4.vec + j);
1255 INIT_LIST_HEAD(base->tv3.vec + j);
1256 INIT_LIST_HEAD(base->tv2.vec + j);
1258 for (j = 0; j < TVR_SIZE; j++)
1259 INIT_LIST_HEAD(base->tv1.vec + j);
1261 base->timer_jiffies = jiffies;
1264 #ifdef CONFIG_HOTPLUG_CPU
1265 static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1267 struct timer_list *timer;
1269 while (!list_empty(head)) {
1270 timer = list_entry(head->next, struct timer_list, entry);
1271 detach_timer(timer, 0);
1272 timer->base = &new_base->t_base;
1273 internal_add_timer(new_base, timer);
1277 static void __devinit migrate_timers(int cpu)
1279 tvec_base_t *old_base;
1280 tvec_base_t *new_base;
1283 BUG_ON(cpu_online(cpu));
1284 old_base = &per_cpu(tvec_bases, cpu);
1285 new_base = &get_cpu_var(tvec_bases);
1287 local_irq_disable();
1288 spin_lock(&new_base->t_base.lock);
1289 spin_lock(&old_base->t_base.lock);
1291 if (old_base->t_base.running_timer)
1293 for (i = 0; i < TVR_SIZE; i++)
1294 migrate_timer_list(new_base, old_base->tv1.vec + i);
1295 for (i = 0; i < TVN_SIZE; i++) {
1296 migrate_timer_list(new_base, old_base->tv2.vec + i);
1297 migrate_timer_list(new_base, old_base->tv3.vec + i);
1298 migrate_timer_list(new_base, old_base->tv4.vec + i);
1299 migrate_timer_list(new_base, old_base->tv5.vec + i);
1302 spin_unlock(&old_base->t_base.lock);
1303 spin_unlock(&new_base->t_base.lock);
1305 put_cpu_var(tvec_bases);
1307 #endif /* CONFIG_HOTPLUG_CPU */
1309 static int __devinit timer_cpu_notify(struct notifier_block *self,
1310 unsigned long action, void *hcpu)
1312 long cpu = (long)hcpu;
1314 case CPU_UP_PREPARE:
1315 init_timers_cpu(cpu);
1317 #ifdef CONFIG_HOTPLUG_CPU
1319 migrate_timers(cpu);
1328 static struct notifier_block __devinitdata timers_nb = {
1329 .notifier_call = timer_cpu_notify,
1333 void __init init_timers(void)
1335 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1336 (void *)(long)smp_processor_id());
1337 register_cpu_notifier(&timers_nb);
1338 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1341 #ifdef CONFIG_TIME_INTERPOLATION
1343 struct time_interpolator *time_interpolator __read_mostly;
1344 static struct time_interpolator *time_interpolator_list __read_mostly;
1345 static DEFINE_SPINLOCK(time_interpolator_lock);
1347 static inline u64 time_interpolator_get_cycles(unsigned int src)
1349 unsigned long (*x)(void);
1353 case TIME_SOURCE_FUNCTION:
1354 x = time_interpolator->addr;
1357 case TIME_SOURCE_MMIO64 :
1358 return readq_relaxed((void __iomem *)time_interpolator->addr);
1360 case TIME_SOURCE_MMIO32 :
1361 return readl_relaxed((void __iomem *)time_interpolator->addr);
1363 default: return get_cycles();
1367 static inline u64 time_interpolator_get_counter(int writelock)
1369 unsigned int src = time_interpolator->source;
1371 if (time_interpolator->jitter)
1377 lcycle = time_interpolator->last_cycle;
1378 now = time_interpolator_get_cycles(src);
1379 if (lcycle && time_after(lcycle, now))
1382 /* When holding the xtime write lock, there's no need
1383 * to add the overhead of the cmpxchg. Readers are
1384 * force to retry until the write lock is released.
1387 time_interpolator->last_cycle = now;
1390 /* Keep track of the last timer value returned. The use of cmpxchg here
1391 * will cause contention in an SMP environment.
1393 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1397 return time_interpolator_get_cycles(src);
1400 void time_interpolator_reset(void)
1402 time_interpolator->offset = 0;
1403 time_interpolator->last_counter = time_interpolator_get_counter(1);
1406 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1408 unsigned long time_interpolator_get_offset(void)
1410 /* If we do not have a time interpolator set up then just return zero */
1411 if (!time_interpolator)
1414 return time_interpolator->offset +
1415 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1418 #define INTERPOLATOR_ADJUST 65536
1419 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1421 static void time_interpolator_update(long delta_nsec)
1424 unsigned long offset;
1426 /* If there is no time interpolator set up then do nothing */
1427 if (!time_interpolator)
1431 * The interpolator compensates for late ticks by accumulating the late
1432 * time in time_interpolator->offset. A tick earlier than expected will
1433 * lead to a reset of the offset and a corresponding jump of the clock
1434 * forward. Again this only works if the interpolator clock is running
1435 * slightly slower than the regular clock and the tuning logic insures
1439 counter = time_interpolator_get_counter(1);
1440 offset = time_interpolator->offset +
1441 GET_TI_NSECS(counter, time_interpolator);
1443 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1444 time_interpolator->offset = offset - delta_nsec;
1446 time_interpolator->skips++;
1447 time_interpolator->ns_skipped += delta_nsec - offset;
1448 time_interpolator->offset = 0;
1450 time_interpolator->last_counter = counter;
1452 /* Tuning logic for time interpolator invoked every minute or so.
1453 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1454 * Increase interpolator clock speed if we skip too much time.
1456 if (jiffies % INTERPOLATOR_ADJUST == 0)
1458 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
1459 time_interpolator->nsec_per_cyc--;
1460 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1461 time_interpolator->nsec_per_cyc++;
1462 time_interpolator->skips = 0;
1463 time_interpolator->ns_skipped = 0;
1468 is_better_time_interpolator(struct time_interpolator *new)
1470 if (!time_interpolator)
1472 return new->frequency > 2*time_interpolator->frequency ||
1473 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1477 register_time_interpolator(struct time_interpolator *ti)
1479 unsigned long flags;
1482 if (ti->frequency == 0 || ti->mask == 0)
1485 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1486 spin_lock(&time_interpolator_lock);
1487 write_seqlock_irqsave(&xtime_lock, flags);
1488 if (is_better_time_interpolator(ti)) {
1489 time_interpolator = ti;
1490 time_interpolator_reset();
1492 write_sequnlock_irqrestore(&xtime_lock, flags);
1494 ti->next = time_interpolator_list;
1495 time_interpolator_list = ti;
1496 spin_unlock(&time_interpolator_lock);
1500 unregister_time_interpolator(struct time_interpolator *ti)
1502 struct time_interpolator *curr, **prev;
1503 unsigned long flags;
1505 spin_lock(&time_interpolator_lock);
1506 prev = &time_interpolator_list;
1507 for (curr = *prev; curr; curr = curr->next) {
1515 write_seqlock_irqsave(&xtime_lock, flags);
1516 if (ti == time_interpolator) {
1517 /* we lost the best time-interpolator: */
1518 time_interpolator = NULL;
1519 /* find the next-best interpolator */
1520 for (curr = time_interpolator_list; curr; curr = curr->next)
1521 if (is_better_time_interpolator(curr))
1522 time_interpolator = curr;
1523 time_interpolator_reset();
1525 write_sequnlock_irqrestore(&xtime_lock, flags);
1526 spin_unlock(&time_interpolator_lock);
1528 #endif /* CONFIG_TIME_INTERPOLATION */
1531 * msleep - sleep safely even with waitqueue interruptions
1532 * @msecs: Time in milliseconds to sleep for
1534 void msleep(unsigned int msecs)
1536 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1539 timeout = schedule_timeout_uninterruptible(timeout);
1542 EXPORT_SYMBOL(msleep);
1545 * msleep_interruptible - sleep waiting for signals
1546 * @msecs: Time in milliseconds to sleep for
1548 unsigned long msleep_interruptible(unsigned int msecs)
1550 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1552 while (timeout && !signal_pending(current))
1553 timeout = schedule_timeout_interruptible(timeout);
1554 return jiffies_to_msecs(timeout);
1557 EXPORT_SYMBOL(msleep_interruptible);