4 * Kernel internal timers, kernel timekeeping, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/module.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/notifier.h>
30 #include <linux/thread_info.h>
31 #include <linux/time.h>
32 #include <linux/jiffies.h>
33 #include <linux/posix-timers.h>
34 #include <linux/cpu.h>
35 #include <linux/vs_cvirt.h>
36 #include <linux/vserver/sched.h>
37 #include <linux/syscalls.h>
38 #include <linux/delay.h>
39 #include <linux/diskdump.h>
40 #include <linux/vs_cvirt.h>
41 #include <linux/vserver/sched.h>
43 #include <asm/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/div64.h>
46 #include <asm/timex.h>
49 #ifdef CONFIG_TIME_INTERPOLATION
50 static void time_interpolator_update(long delta_nsec);
52 #define time_interpolator_update(x)
56 * per-CPU timer vector definitions:
59 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
60 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
61 #define TVN_SIZE (1 << TVN_BITS)
62 #define TVR_SIZE (1 << TVR_BITS)
63 #define TVN_MASK (TVN_SIZE - 1)
64 #define TVR_MASK (TVR_SIZE - 1)
66 typedef struct tvec_s {
67 struct list_head vec[TVN_SIZE];
70 typedef struct tvec_root_s {
71 struct list_head vec[TVR_SIZE];
74 struct tvec_t_base_s {
76 unsigned long timer_jiffies;
77 struct timer_list *running_timer;
83 } ____cacheline_aligned_in_smp;
85 typedef struct tvec_t_base_s tvec_base_t;
87 static inline void set_running_timer(tvec_base_t *base,
88 struct timer_list *timer)
91 base->running_timer = timer;
95 /* Fake initialization */
96 static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED };
98 static void check_timer_failed(struct timer_list *timer)
100 static int whine_count;
101 if (whine_count < 16) {
103 printk("Uninitialised timer!\n");
104 printk("This is just a warning. Your computer is OK\n");
105 printk("function=0x%p, data=0x%lx\n",
106 timer->function, timer->data);
112 spin_lock_init(&timer->lock);
113 timer->magic = TIMER_MAGIC;
116 static inline void check_timer(struct timer_list *timer)
118 if (timer->magic != TIMER_MAGIC)
119 check_timer_failed(timer);
123 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
125 unsigned long expires = timer->expires;
126 unsigned long idx = expires - base->timer_jiffies;
127 struct list_head *vec;
129 if (idx < TVR_SIZE) {
130 int i = expires & TVR_MASK;
131 vec = base->tv1.vec + i;
132 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
133 int i = (expires >> TVR_BITS) & TVN_MASK;
134 vec = base->tv2.vec + i;
135 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
136 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
137 vec = base->tv3.vec + i;
138 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
139 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
140 vec = base->tv4.vec + i;
141 } else if ((signed long) idx < 0) {
143 * Can happen if you add a timer with expires == jiffies,
144 * or you set a timer to go off in the past
146 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
149 /* If the timeout is larger than 0xffffffff on 64-bit
150 * architectures then we use the maximum timeout:
152 if (idx > 0xffffffffUL) {
154 expires = idx + base->timer_jiffies;
156 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
157 vec = base->tv5.vec + i;
162 list_add_tail(&timer->entry, vec);
165 int __mod_timer(struct timer_list *timer, unsigned long expires)
167 tvec_base_t *old_base, *new_base;
171 BUG_ON(!timer->function);
175 spin_lock_irqsave(&timer->lock, flags);
176 new_base = &__get_cpu_var(tvec_bases);
178 old_base = timer->base;
181 * Prevent deadlocks via ordering by old_base < new_base.
183 if (old_base && (new_base != old_base)) {
184 if (old_base < new_base) {
185 spin_lock(&new_base->lock);
186 spin_lock(&old_base->lock);
188 spin_lock(&old_base->lock);
189 spin_lock(&new_base->lock);
192 * The timer base might have been cancelled while we were
193 * trying to take the lock(s):
195 if (timer->base != old_base) {
196 spin_unlock(&new_base->lock);
197 spin_unlock(&old_base->lock);
201 spin_lock(&new_base->lock);
202 if (timer->base != old_base) {
203 spin_unlock(&new_base->lock);
209 * Delete the previous timeout (if there was any), and install
213 list_del(&timer->entry);
216 timer->expires = expires;
217 internal_add_timer(new_base, timer);
218 timer->base = new_base;
220 if (old_base && (new_base != old_base))
221 spin_unlock(&old_base->lock);
222 spin_unlock(&new_base->lock);
223 spin_unlock_irqrestore(&timer->lock, flags);
228 EXPORT_SYMBOL(__mod_timer);
231 * add_timer_on - start a timer on a particular CPU
232 * @timer: the timer to be added
233 * @cpu: the CPU to start it on
235 * This is not very scalable on SMP. Double adds are not possible.
237 void add_timer_on(struct timer_list *timer, int cpu)
239 tvec_base_t *base = &per_cpu(tvec_bases, cpu);
242 BUG_ON(timer_pending(timer) || !timer->function);
246 spin_lock_irqsave(&base->lock, flags);
247 internal_add_timer(base, timer);
249 spin_unlock_irqrestore(&base->lock, flags);
254 * mod_timer - modify a timer's timeout
255 * @timer: the timer to be modified
257 * mod_timer is a more efficient way to update the expire field of an
258 * active timer (if the timer is inactive it will be activated)
260 * mod_timer(timer, expires) is equivalent to:
262 * del_timer(timer); timer->expires = expires; add_timer(timer);
264 * Note that if there are multiple unserialized concurrent users of the
265 * same timer, then mod_timer() is the only safe way to modify the timeout,
266 * since add_timer() cannot modify an already running timer.
268 * The function returns whether it has modified a pending timer or not.
269 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
270 * active timer returns 1.)
272 int mod_timer(struct timer_list *timer, unsigned long expires)
274 BUG_ON(!timer->function);
279 * This is a common optimization triggered by the
280 * networking code - if the timer is re-modified
281 * to be the same thing then just return:
283 if (timer->expires == expires && timer_pending(timer))
286 return __mod_timer(timer, expires);
289 EXPORT_SYMBOL(mod_timer);
292 * del_timer - deactive a timer.
293 * @timer: the timer to be deactivated
295 * del_timer() deactivates a timer - this works on both active and inactive
298 * The function returns whether it has deactivated a pending timer or not.
299 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
300 * active timer returns 1.)
302 int del_timer(struct timer_list *timer)
313 spin_lock_irqsave(&base->lock, flags);
314 if (base != timer->base) {
315 spin_unlock_irqrestore(&base->lock, flags);
318 list_del(&timer->entry);
319 /* Need to make sure that anybody who sees a NULL base also sees the list ops */
322 spin_unlock_irqrestore(&base->lock, flags);
327 EXPORT_SYMBOL(del_timer);
331 * del_timer_sync - deactivate a timer and wait for the handler to finish.
332 * @timer: the timer to be deactivated
334 * This function only differs from del_timer() on SMP: besides deactivating
335 * the timer it also makes sure the handler has finished executing on other
338 * Synchronization rules: callers must prevent restarting of the timer,
339 * otherwise this function is meaningless. It must not be called from
340 * interrupt contexts. The caller must not hold locks which would prevent
341 * completion of the timer's handler. Upon exit the timer is not queued and
342 * the handler is not running on any CPU.
344 * The function returns whether it has deactivated a pending timer or not.
346 * del_timer_sync() is slow and complicated because it copes with timer
347 * handlers which re-arm the timer (periodic timers). If the timer handler
348 * is known to not do this (a single shot timer) then use
349 * del_singleshot_timer_sync() instead.
351 int del_timer_sync(struct timer_list *timer)
359 ret += del_timer(timer);
361 for_each_online_cpu(i) {
362 base = &per_cpu(tvec_bases, i);
363 if (base->running_timer == timer) {
364 while (base->running_timer == timer) {
366 preempt_check_resched();
372 if (timer_pending(timer))
377 EXPORT_SYMBOL(del_timer_sync);
380 * del_singleshot_timer_sync - deactivate a non-recursive timer
381 * @timer: the timer to be deactivated
383 * This function is an optimization of del_timer_sync for the case where the
384 * caller can guarantee the timer does not reschedule itself in its timer
387 * Synchronization rules: callers must prevent restarting of the timer,
388 * otherwise this function is meaningless. It must not be called from
389 * interrupt contexts. The caller must not hold locks which wold prevent
390 * completion of the timer's handler. Upon exit the timer is not queued and
391 * the handler is not running on any CPU.
393 * The function returns whether it has deactivated a pending timer or not.
395 int del_singleshot_timer_sync(struct timer_list *timer)
397 int ret = del_timer(timer);
400 ret = del_timer_sync(timer);
406 EXPORT_SYMBOL(del_singleshot_timer_sync);
409 static int cascade(tvec_base_t *base, tvec_t *tv, int index)
411 /* cascade all the timers from tv up one level */
412 struct list_head *head, *curr;
414 head = tv->vec + index;
417 * We are removing _all_ timers from the list, so we don't have to
418 * detach them individually, just clear the list afterwards.
420 while (curr != head) {
421 struct timer_list *tmp;
423 tmp = list_entry(curr, struct timer_list, entry);
424 BUG_ON(tmp->base != base);
426 internal_add_timer(base, tmp);
428 INIT_LIST_HEAD(head);
434 * __run_timers - run all expired timers (if any) on this CPU.
435 * @base: the timer vector to be processed.
437 * This function cascades all vectors and executes all expired timer
440 #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
442 static inline void __run_timers(tvec_base_t *base)
444 struct timer_list *timer;
447 spin_lock_irqsave(&base->lock, flags);
448 while (time_after_eq(jiffies, base->timer_jiffies)) {
449 struct list_head work_list = LIST_HEAD_INIT(work_list);
450 struct list_head *head = &work_list;
451 int index = base->timer_jiffies & TVR_MASK;
457 (!cascade(base, &base->tv2, INDEX(0))) &&
458 (!cascade(base, &base->tv3, INDEX(1))) &&
459 !cascade(base, &base->tv4, INDEX(2)))
460 cascade(base, &base->tv5, INDEX(3));
461 ++base->timer_jiffies;
462 list_splice_init(base->tv1.vec + index, &work_list);
464 if (!list_empty(head)) {
465 void (*fn)(unsigned long);
468 timer = list_entry(head->next,struct timer_list,entry);
469 fn = timer->function;
472 list_del(&timer->entry);
473 set_running_timer(base, timer);
476 spin_unlock_irqrestore(&base->lock, flags);
478 u32 preempt_count = preempt_count();
480 if (preempt_count != preempt_count()) {
481 printk("huh, entered %p with %08x, exited with %08x?\n", fn, preempt_count, preempt_count());
485 spin_lock_irq(&base->lock);
489 set_running_timer(base, NULL);
490 spin_unlock_irqrestore(&base->lock, flags);
493 #ifdef CONFIG_NO_IDLE_HZ
495 * Find out when the next timer event is due to happen. This
496 * is used on S/390 to stop all activity when a cpus is idle.
497 * This functions needs to be called disabled.
499 unsigned long next_timer_interrupt(void)
502 struct list_head *list;
503 struct timer_list *nte;
504 unsigned long expires;
508 base = &__get_cpu_var(tvec_bases);
509 spin_lock(&base->lock);
510 expires = base->timer_jiffies + (LONG_MAX >> 1);
513 /* Look for timer events in tv1. */
514 j = base->timer_jiffies & TVR_MASK;
516 list_for_each_entry(nte, base->tv1.vec + j, entry) {
517 expires = nte->expires;
518 if (j < (base->timer_jiffies & TVR_MASK))
519 list = base->tv2.vec + (INDEX(0));
522 j = (j + 1) & TVR_MASK;
523 } while (j != (base->timer_jiffies & TVR_MASK));
526 varray[0] = &base->tv2;
527 varray[1] = &base->tv3;
528 varray[2] = &base->tv4;
529 varray[3] = &base->tv5;
530 for (i = 0; i < 4; i++) {
533 if (list_empty(varray[i]->vec + j)) {
534 j = (j + 1) & TVN_MASK;
537 list_for_each_entry(nte, varray[i]->vec + j, entry)
538 if (time_before(nte->expires, expires))
539 expires = nte->expires;
540 if (j < (INDEX(i)) && i < 3)
541 list = varray[i + 1]->vec + (INDEX(i + 1));
543 } while (j != (INDEX(i)));
548 * The search wrapped. We need to look at the next list
549 * from next tv element that would cascade into tv element
550 * where we found the timer element.
552 list_for_each_entry(nte, list, entry) {
553 if (time_before(nte->expires, expires))
554 expires = nte->expires;
557 spin_unlock(&base->lock);
562 /******************************************************************/
565 * Timekeeping variables
567 unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
568 unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
572 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
573 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
574 * at zero at system boot time, so wall_to_monotonic will be negative,
575 * however, we will ALWAYS keep the tv_nsec part positive so we can use
576 * the usual normalization.
578 struct timespec xtime __attribute__ ((aligned (16)));
579 struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
581 EXPORT_SYMBOL(xtime);
583 /* Don't completely fail for HZ > 500. */
584 int tickadj = 500/HZ ? : 1; /* microsecs */
588 * phase-lock loop variables
590 /* TIME_ERROR prevents overwriting the CMOS clock */
591 int time_state = TIME_OK; /* clock synchronization status */
592 int time_status = STA_UNSYNC; /* clock status bits */
593 long time_offset; /* time adjustment (us) */
594 long time_constant = 2; /* pll time constant */
595 long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
596 long time_precision = 1; /* clock precision (us) */
597 long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
598 long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
599 static long time_phase; /* phase offset (scaled us) */
600 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
601 /* frequency offset (scaled ppm)*/
602 static long time_adj; /* tick adjust (scaled 1 / HZ) */
603 long time_reftime; /* time at last adjustment (s) */
605 long time_next_adjust;
608 * this routine handles the overflow of the microsecond field
610 * The tricky bits of code to handle the accurate clock support
611 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
612 * They were originally developed for SUN and DEC kernels.
613 * All the kudos should go to Dave for this stuff.
616 static void second_overflow(void)
620 /* Bump the maxerror field */
621 time_maxerror += time_tolerance >> SHIFT_USEC;
622 if ( time_maxerror > NTP_PHASE_LIMIT ) {
623 time_maxerror = NTP_PHASE_LIMIT;
624 time_status |= STA_UNSYNC;
628 * Leap second processing. If in leap-insert state at
629 * the end of the day, the system clock is set back one
630 * second; if in leap-delete state, the system clock is
631 * set ahead one second. The microtime() routine or
632 * external clock driver will insure that reported time
633 * is always monotonic. The ugly divides should be
636 switch (time_state) {
639 if (time_status & STA_INS)
640 time_state = TIME_INS;
641 else if (time_status & STA_DEL)
642 time_state = TIME_DEL;
646 if (xtime.tv_sec % 86400 == 0) {
648 wall_to_monotonic.tv_sec++;
649 /* The timer interpolator will make time change gradually instead
650 * of an immediate jump by one second.
652 time_interpolator_update(-NSEC_PER_SEC);
653 time_state = TIME_OOP;
655 printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
660 if ((xtime.tv_sec + 1) % 86400 == 0) {
662 wall_to_monotonic.tv_sec--;
663 /* Use of time interpolator for a gradual change of time */
664 time_interpolator_update(NSEC_PER_SEC);
665 time_state = TIME_WAIT;
667 printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
672 time_state = TIME_WAIT;
676 if (!(time_status & (STA_INS | STA_DEL)))
677 time_state = TIME_OK;
681 * Compute the phase adjustment for the next second. In
682 * PLL mode, the offset is reduced by a fixed factor
683 * times the time constant. In FLL mode the offset is
684 * used directly. In either mode, the maximum phase
685 * adjustment for each second is clamped so as to spread
686 * the adjustment over not more than the number of
687 * seconds between updates.
689 if (time_offset < 0) {
690 ltemp = -time_offset;
691 if (!(time_status & STA_FLL))
692 ltemp >>= SHIFT_KG + time_constant;
693 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
694 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
695 time_offset += ltemp;
696 #if SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE > 0
697 time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
699 time_adj = -ltemp >> (SHIFT_HZ + SHIFT_UPDATE - SHIFT_SCALE);
703 if (!(time_status & STA_FLL))
704 ltemp >>= SHIFT_KG + time_constant;
705 if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
706 ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
707 time_offset -= ltemp;
708 #if SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE > 0
709 time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
711 time_adj = ltemp >> (SHIFT_HZ + SHIFT_UPDATE - SHIFT_SCALE);
716 * Compute the frequency estimate and additional phase
717 * adjustment due to frequency error for the next
718 * second. When the PPS signal is engaged, gnaw on the
719 * watchdog counter and update the frequency computed by
720 * the pll and the PPS signal.
723 if (pps_valid == PPS_VALID) { /* PPS signal lost */
724 pps_jitter = MAXTIME;
725 pps_stabil = MAXFREQ;
726 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
727 STA_PPSWANDER | STA_PPSERROR);
729 ltemp = time_freq + pps_freq;
731 time_adj -= -ltemp >>
732 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
735 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
738 /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
739 * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
742 time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
744 time_adj += (time_adj >> 2) + (time_adj >> 5);
747 /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
748 * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
751 time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
753 time_adj += (time_adj >> 6) + (time_adj >> 7);
757 /* in the NTP reference this is called "hardclock()" */
758 static void update_wall_time_one_tick(void)
760 long time_adjust_step, delta_nsec;
762 if ( (time_adjust_step = time_adjust) != 0 ) {
763 /* We are doing an adjtime thing.
765 * Prepare time_adjust_step to be within bounds.
766 * Note that a positive time_adjust means we want the clock
769 * Limit the amount of the step to be in the range
770 * -tickadj .. +tickadj
772 if (time_adjust > tickadj)
773 time_adjust_step = tickadj;
774 else if (time_adjust < -tickadj)
775 time_adjust_step = -tickadj;
777 /* Reduce by this step the amount of time left */
778 time_adjust -= time_adjust_step;
780 delta_nsec = tick_nsec + time_adjust_step * 1000;
782 * Advance the phase, once it gets to one microsecond, then
783 * advance the tick more.
785 time_phase += time_adj;
786 if (time_phase <= -FINENSEC) {
787 long ltemp = -time_phase >> (SHIFT_SCALE - 10);
788 time_phase += ltemp << (SHIFT_SCALE - 10);
791 else if (time_phase >= FINENSEC) {
792 long ltemp = time_phase >> (SHIFT_SCALE - 10);
793 time_phase -= ltemp << (SHIFT_SCALE - 10);
796 xtime.tv_nsec += delta_nsec;
797 time_interpolator_update(delta_nsec);
799 /* Changes by adjtime() do not take effect till next tick. */
800 if (time_next_adjust != 0) {
801 time_adjust = time_next_adjust;
802 time_next_adjust = 0;
807 * Using a loop looks inefficient, but "ticks" is
808 * usually just one (we shouldn't be losing ticks,
809 * we're doing this this way mainly for interrupt
810 * latency reasons, not because we think we'll
811 * have lots of lost timer ticks
813 static void update_wall_time(unsigned long ticks)
817 update_wall_time_one_tick();
818 if (xtime.tv_nsec >= 1000000000) {
819 xtime.tv_nsec -= 1000000000;
827 * Called from the timer interrupt handler to charge one tick to the current
828 * process. user_tick is 1 if the tick is user time, 0 for system.
830 void update_process_times(int user_tick)
832 struct task_struct *p = current;
833 int cpu = smp_processor_id();
835 /* Note: this timer irq context must be accounted for as well. */
837 account_user_time(p, jiffies_to_cputime(1));
839 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
841 if (rcu_pending(cpu))
842 rcu_check_callbacks(cpu, user_tick);
844 run_posix_cpu_timers(p);
848 * Nr of active tasks - counted in fixed-point numbers
850 static unsigned long count_active_tasks(void)
852 return (nr_running() + nr_uninterruptible()) * FIXED_1;
856 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
857 * imply that avenrun[] is the standard name for this kind of thing.
858 * Nothing else seems to be standardized: the fractional size etc
859 * all seem to differ on different machines.
861 * Requires xtime_lock to access.
863 unsigned long avenrun[3];
865 EXPORT_SYMBOL(avenrun);
868 * calc_load - given tick count, update the avenrun load estimates.
869 * This is called while holding a write_lock on xtime_lock.
871 static inline void calc_load(unsigned long ticks)
873 unsigned long active_tasks; /* fixed-point */
874 static int count = LOAD_FREQ;
879 active_tasks = count_active_tasks();
880 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
881 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
882 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
886 /* jiffies at the most recent update of wall time */
887 unsigned long wall_jiffies = INITIAL_JIFFIES;
890 * This read-write spinlock protects us from races in SMP while
891 * playing with xtime and avenrun.
893 #ifndef ARCH_HAVE_XTIME_LOCK
894 seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
896 EXPORT_SYMBOL(xtime_lock);
900 * This function runs timers and the timer-tq in bottom half context.
902 static void run_timer_softirq(struct softirq_action *h)
904 tvec_base_t *base = &__get_cpu_var(tvec_bases);
906 if (time_after_eq(jiffies, base->timer_jiffies))
911 * Called by the local, per-CPU timer interrupt on SMP.
913 void run_local_timers(void)
915 raise_softirq(TIMER_SOFTIRQ);
919 * Called by the timer interrupt. xtime_lock must already be taken
922 static inline void update_times(void)
926 ticks = jiffies - wall_jiffies;
928 wall_jiffies += ticks;
929 update_wall_time(ticks);
935 * The 64-bit jiffies value is not atomic - you MUST NOT read it
936 * without sampling the sequence number in xtime_lock.
937 * jiffies is defined in the linker script...
940 void do_timer(struct pt_regs *regs)
946 #ifdef __ARCH_WANT_SYS_ALARM
949 * For backwards compatibility? This can be done in libc so Alpha
950 * and all newer ports shouldn't need it.
952 asmlinkage unsigned long sys_alarm(unsigned int seconds)
954 struct itimerval it_new, it_old;
955 unsigned int oldalarm;
957 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
958 it_new.it_value.tv_sec = seconds;
959 it_new.it_value.tv_usec = 0;
960 do_setitimer(ITIMER_REAL, &it_new, &it_old);
961 oldalarm = it_old.it_value.tv_sec;
962 /* ehhh.. We can't return 0 if we have an alarm pending.. */
963 /* And we'd better return too much than too little anyway */
964 if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
973 * sys_getpid - return the thread group id of the current process
975 * Note, despite the name, this returns the tgid not the pid. The tgid and
976 * the pid are identical unless CLONE_THREAD was specified on clone() in
977 * which case the tgid is the same in all threads of the same group.
979 * This is SMP safe as current->tgid does not change.
981 asmlinkage long sys_getpid(void)
983 return vx_map_tgid(current->tgid);
987 * Accessing ->group_leader->real_parent is not SMP-safe, it could
988 * change from under us. However, rather than getting any lock
989 * we can use an optimistic algorithm: get the parent
990 * pid, and go back and check that the parent is still
991 * the same. If it has changed (which is extremely unlikely
992 * indeed), we just try again..
994 * NOTE! This depends on the fact that even if we _do_
995 * get an old value of "parent", we can happily dereference
996 * the pointer (it was and remains a dereferencable kernel pointer
997 * no matter what): we just can't necessarily trust the result
998 * until we know that the parent pointer is valid.
1000 * NOTE2: ->group_leader never changes from under us.
1002 asmlinkage long sys_getppid(void)
1005 struct task_struct *me = current;
1006 struct task_struct *parent;
1008 parent = me->group_leader->real_parent;
1013 struct task_struct *old = parent;
1016 * Make sure we read the pid before re-reading the
1020 parent = me->group_leader->real_parent;
1027 return vx_map_pid(pid);
1033 * The Alpha uses getxpid, getxuid, and getxgid instead.
1036 asmlinkage long do_getxpid(long *ppid)
1038 *ppid = sys_getppid();
1039 return sys_getpid();
1044 asmlinkage long sys_getuid(void)
1046 /* Only we change this so SMP safe */
1047 return current->uid;
1050 asmlinkage long sys_geteuid(void)
1052 /* Only we change this so SMP safe */
1053 return current->euid;
1056 asmlinkage long sys_getgid(void)
1058 /* Only we change this so SMP safe */
1059 return current->gid;
1062 asmlinkage long sys_getegid(void)
1064 /* Only we change this so SMP safe */
1065 return current->egid;
1070 static void process_timeout(unsigned long __data)
1072 wake_up_process((task_t *)__data);
1076 * schedule_timeout - sleep until timeout
1077 * @timeout: timeout value in jiffies
1079 * Make the current task sleep until @timeout jiffies have
1080 * elapsed. The routine will return immediately unless
1081 * the current task state has been set (see set_current_state()).
1083 * You can set the task state as follows -
1085 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1086 * pass before the routine returns. The routine will return 0
1088 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1089 * delivered to the current task. In this case the remaining time
1090 * in jiffies will be returned, or 0 if the timer expired in time
1092 * The current task state is guaranteed to be TASK_RUNNING when this
1095 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1096 * the CPU away without a bound on the timeout. In this case the return
1097 * value will be %MAX_SCHEDULE_TIMEOUT.
1099 * In all cases the return value is guaranteed to be non-negative.
1101 fastcall signed long __sched schedule_timeout(signed long timeout)
1103 struct timer_list timer;
1104 unsigned long expire;
1106 if (crashdump_mode()) {
1107 diskdump_mdelay(timeout);
1108 set_current_state(TASK_RUNNING);
1114 case MAX_SCHEDULE_TIMEOUT:
1116 * These two special cases are useful to be comfortable
1117 * in the caller. Nothing more. We could take
1118 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1119 * but I' d like to return a valid offset (>=0) to allow
1120 * the caller to do everything it want with the retval.
1126 * Another bit of PARANOID. Note that the retval will be
1127 * 0 since no piece of kernel is supposed to do a check
1128 * for a negative retval of schedule_timeout() (since it
1129 * should never happens anyway). You just have the printk()
1130 * that will tell you if something is gone wrong and where.
1134 printk(KERN_ERR "schedule_timeout: wrong timeout "
1135 "value %lx from %p\n", timeout,
1136 __builtin_return_address(0));
1137 current->state = TASK_RUNNING;
1142 expire = timeout + jiffies;
1145 timer.expires = expire;
1146 timer.data = (unsigned long) current;
1147 timer.function = process_timeout;
1151 del_singleshot_timer_sync(&timer);
1153 timeout = expire - jiffies;
1156 return timeout < 0 ? 0 : timeout;
1159 EXPORT_SYMBOL(schedule_timeout);
1161 /* Thread ID - the internal kernel "pid" */
1162 asmlinkage long sys_gettid(void)
1164 return current->pid;
1167 static long __sched nanosleep_restart(struct restart_block *restart)
1169 unsigned long expire = restart->arg0, now = jiffies;
1170 struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
1173 /* Did it expire while we handled signals? */
1174 if (!time_after(expire, now))
1177 current->state = TASK_INTERRUPTIBLE;
1178 expire = schedule_timeout(expire - now);
1183 jiffies_to_timespec(expire, &t);
1185 ret = -ERESTART_RESTARTBLOCK;
1186 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1188 /* The 'restart' block is already filled in */
1193 asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
1196 unsigned long expire;
1199 if (copy_from_user(&t, rqtp, sizeof(t)))
1202 if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
1205 expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
1206 current->state = TASK_INTERRUPTIBLE;
1207 expire = schedule_timeout(expire);
1211 struct restart_block *restart;
1212 jiffies_to_timespec(expire, &t);
1213 if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
1216 restart = ¤t_thread_info()->restart_block;
1217 restart->fn = nanosleep_restart;
1218 restart->arg0 = jiffies + expire;
1219 restart->arg1 = (unsigned long) rmtp;
1220 ret = -ERESTART_RESTARTBLOCK;
1226 * sys_sysinfo - fill in sysinfo struct
1228 asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1231 unsigned long mem_total, sav_total;
1232 unsigned int mem_unit, bitcount;
1235 memset((char *)&val, 0, sizeof(struct sysinfo));
1239 seq = read_seqbegin(&xtime_lock);
1242 * This is annoying. The below is the same thing
1243 * posix_get_clock_monotonic() does, but it wants to
1244 * take the lock which we want to cover the loads stuff
1248 getnstimeofday(&tp);
1249 tp.tv_sec += wall_to_monotonic.tv_sec;
1250 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1251 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1252 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1255 if (vx_flags(VXF_VIRT_UPTIME, 0))
1256 vx_vsi_uptime(&tp, NULL);
1257 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1259 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1260 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1261 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1263 val.procs = nr_threads;
1264 } while (read_seqretry(&xtime_lock, seq));
1266 /* if (vx_flags(VXF_VIRT_CPU, 0))
1273 * If the sum of all the available memory (i.e. ram + swap)
1274 * is less than can be stored in a 32 bit unsigned long then
1275 * we can be binary compatible with 2.2.x kernels. If not,
1276 * well, in that case 2.2.x was broken anyways...
1278 * -Erik Andersen <andersee@debian.org>
1281 mem_total = val.totalram + val.totalswap;
1282 if (mem_total < val.totalram || mem_total < val.totalswap)
1285 mem_unit = val.mem_unit;
1286 while (mem_unit > 1) {
1289 sav_total = mem_total;
1291 if (mem_total < sav_total)
1296 * If mem_total did not overflow, multiply all memory values by
1297 * val.mem_unit and set it to 1. This leaves things compatible
1298 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1303 val.totalram <<= bitcount;
1304 val.freeram <<= bitcount;
1305 val.sharedram <<= bitcount;
1306 val.bufferram <<= bitcount;
1307 val.totalswap <<= bitcount;
1308 val.freeswap <<= bitcount;
1309 val.totalhigh <<= bitcount;
1310 val.freehigh <<= bitcount;
1313 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1319 static void /* __devinit */ init_timers_cpu(int cpu)
1324 base = &per_cpu(tvec_bases, cpu);
1325 spin_lock_init(&base->lock);
1326 for (j = 0; j < TVN_SIZE; j++) {
1327 INIT_LIST_HEAD(base->tv5.vec + j);
1328 INIT_LIST_HEAD(base->tv4.vec + j);
1329 INIT_LIST_HEAD(base->tv3.vec + j);
1330 INIT_LIST_HEAD(base->tv2.vec + j);
1332 for (j = 0; j < TVR_SIZE; j++)
1333 INIT_LIST_HEAD(base->tv1.vec + j);
1335 base->timer_jiffies = jiffies;
1338 static tvec_base_t saved_tvec_base;
1340 void dump_clear_timers(void)
1342 tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id());
1344 memcpy(&saved_tvec_base, base, sizeof(saved_tvec_base));
1345 init_timers_cpu(smp_processor_id());
1348 EXPORT_SYMBOL_GPL(dump_clear_timers);
1350 void dump_run_timers(void)
1352 tvec_base_t *base = &__get_cpu_var(tvec_bases);
1357 EXPORT_SYMBOL_GPL(dump_run_timers);
1359 #ifdef CONFIG_HOTPLUG_CPU
1360 static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
1362 struct timer_list *timer;
1364 while (!list_empty(head)) {
1365 timer = list_entry(head->next, struct timer_list, entry);
1366 /* We're locking backwards from __mod_timer order here,
1368 if (!spin_trylock(&timer->lock))
1370 list_del(&timer->entry);
1371 internal_add_timer(new_base, timer);
1372 timer->base = new_base;
1373 spin_unlock(&timer->lock);
1378 static void __devinit migrate_timers(int cpu)
1380 tvec_base_t *old_base;
1381 tvec_base_t *new_base;
1384 BUG_ON(cpu_online(cpu));
1385 old_base = &per_cpu(tvec_bases, cpu);
1386 new_base = &get_cpu_var(tvec_bases);
1388 local_irq_disable();
1390 /* Prevent deadlocks via ordering by old_base < new_base. */
1391 if (old_base < new_base) {
1392 spin_lock(&new_base->lock);
1393 spin_lock(&old_base->lock);
1395 spin_lock(&old_base->lock);
1396 spin_lock(&new_base->lock);
1399 if (old_base->running_timer)
1401 for (i = 0; i < TVR_SIZE; i++)
1402 if (!migrate_timer_list(new_base, old_base->tv1.vec + i))
1404 for (i = 0; i < TVN_SIZE; i++)
1405 if (!migrate_timer_list(new_base, old_base->tv2.vec + i)
1406 || !migrate_timer_list(new_base, old_base->tv3.vec + i)
1407 || !migrate_timer_list(new_base, old_base->tv4.vec + i)
1408 || !migrate_timer_list(new_base, old_base->tv5.vec + i))
1410 spin_unlock(&old_base->lock);
1411 spin_unlock(&new_base->lock);
1413 put_cpu_var(tvec_bases);
1417 /* Avoid deadlock with __mod_timer, by backing off. */
1418 spin_unlock(&old_base->lock);
1419 spin_unlock(&new_base->lock);
1423 #endif /* CONFIG_HOTPLUG_CPU */
1425 static int __devinit timer_cpu_notify(struct notifier_block *self,
1426 unsigned long action, void *hcpu)
1428 long cpu = (long)hcpu;
1430 case CPU_UP_PREPARE:
1431 init_timers_cpu(cpu);
1433 #ifdef CONFIG_HOTPLUG_CPU
1435 migrate_timers(cpu);
1444 static struct notifier_block __devinitdata timers_nb = {
1445 .notifier_call = timer_cpu_notify,
1449 void __init init_timers(void)
1451 timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1452 (void *)(long)smp_processor_id());
1453 register_cpu_notifier(&timers_nb);
1454 open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
1457 #ifdef CONFIG_TIME_INTERPOLATION
1459 struct time_interpolator *time_interpolator;
1460 static struct time_interpolator *time_interpolator_list;
1461 static DEFINE_SPINLOCK(time_interpolator_lock);
1463 static inline u64 time_interpolator_get_cycles(unsigned int src)
1465 unsigned long (*x)(void);
1469 case TIME_SOURCE_FUNCTION:
1470 x = time_interpolator->addr;
1473 case TIME_SOURCE_MMIO64 :
1474 return readq((void __iomem *) time_interpolator->addr);
1476 case TIME_SOURCE_MMIO32 :
1477 return readl((void __iomem *) time_interpolator->addr);
1479 default: return get_cycles();
1483 static inline u64 time_interpolator_get_counter(void)
1485 unsigned int src = time_interpolator->source;
1487 if (time_interpolator->jitter)
1493 lcycle = time_interpolator->last_cycle;
1494 now = time_interpolator_get_cycles(src);
1495 if (lcycle && time_after(lcycle, now))
1497 /* Keep track of the last timer value returned. The use of cmpxchg here
1498 * will cause contention in an SMP environment.
1500 } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
1504 return time_interpolator_get_cycles(src);
1507 void time_interpolator_reset(void)
1509 time_interpolator->offset = 0;
1510 time_interpolator->last_counter = time_interpolator_get_counter();
1513 #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
1515 unsigned long time_interpolator_get_offset(void)
1517 /* If we do not have a time interpolator set up then just return zero */
1518 if (!time_interpolator)
1521 return time_interpolator->offset +
1522 GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
1525 #define INTERPOLATOR_ADJUST 65536
1526 #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
1528 static void time_interpolator_update(long delta_nsec)
1531 unsigned long offset;
1533 /* If there is no time interpolator set up then do nothing */
1534 if (!time_interpolator)
1537 /* The interpolator compensates for late ticks by accumulating
1538 * the late time in time_interpolator->offset. A tick earlier than
1539 * expected will lead to a reset of the offset and a corresponding
1540 * jump of the clock forward. Again this only works if the
1541 * interpolator clock is running slightly slower than the regular clock
1542 * and the tuning logic insures that.
1545 counter = time_interpolator_get_counter();
1546 offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
1548 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
1549 time_interpolator->offset = offset - delta_nsec;
1551 time_interpolator->skips++;
1552 time_interpolator->ns_skipped += delta_nsec - offset;
1553 time_interpolator->offset = 0;
1555 time_interpolator->last_counter = counter;
1557 /* Tuning logic for time interpolator invoked every minute or so.
1558 * Decrease interpolator clock speed if no skips occurred and an offset is carried.
1559 * Increase interpolator clock speed if we skip too much time.
1561 if (jiffies % INTERPOLATOR_ADJUST == 0)
1563 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
1564 time_interpolator->nsec_per_cyc--;
1565 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1566 time_interpolator->nsec_per_cyc++;
1567 time_interpolator->skips = 0;
1568 time_interpolator->ns_skipped = 0;
1573 is_better_time_interpolator(struct time_interpolator *new)
1575 if (!time_interpolator)
1577 return new->frequency > 2*time_interpolator->frequency ||
1578 (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
1582 register_time_interpolator(struct time_interpolator *ti)
1584 unsigned long flags;
1587 if (ti->frequency == 0 || ti->mask == 0)
1590 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1591 spin_lock(&time_interpolator_lock);
1592 write_seqlock_irqsave(&xtime_lock, flags);
1593 if (is_better_time_interpolator(ti)) {
1594 time_interpolator = ti;
1595 time_interpolator_reset();
1597 write_sequnlock_irqrestore(&xtime_lock, flags);
1599 ti->next = time_interpolator_list;
1600 time_interpolator_list = ti;
1601 spin_unlock(&time_interpolator_lock);
1605 unregister_time_interpolator(struct time_interpolator *ti)
1607 struct time_interpolator *curr, **prev;
1608 unsigned long flags;
1610 spin_lock(&time_interpolator_lock);
1611 prev = &time_interpolator_list;
1612 for (curr = *prev; curr; curr = curr->next) {
1620 write_seqlock_irqsave(&xtime_lock, flags);
1621 if (ti == time_interpolator) {
1622 /* we lost the best time-interpolator: */
1623 time_interpolator = NULL;
1624 /* find the next-best interpolator */
1625 for (curr = time_interpolator_list; curr; curr = curr->next)
1626 if (is_better_time_interpolator(curr))
1627 time_interpolator = curr;
1628 time_interpolator_reset();
1630 write_sequnlock_irqrestore(&xtime_lock, flags);
1631 spin_unlock(&time_interpolator_lock);
1633 #endif /* CONFIG_TIME_INTERPOLATION */
1636 * msleep - sleep safely even with waitqueue interruptions
1637 * @msecs: Time in milliseconds to sleep for
1639 void msleep(unsigned int msecs)
1641 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1643 if (unlikely(crashdump_mode())) {
1644 while (msecs--) udelay(1000);
1649 set_current_state(TASK_UNINTERRUPTIBLE);
1650 timeout = schedule_timeout(timeout);
1654 EXPORT_SYMBOL(msleep);
1657 * msleep_interruptible - sleep waiting for waitqueue interruptions
1658 * @msecs: Time in milliseconds to sleep for
1660 unsigned long msleep_interruptible(unsigned int msecs)
1662 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1664 while (timeout && !signal_pending(current)) {
1665 set_current_state(TASK_INTERRUPTIBLE);
1666 timeout = schedule_timeout(timeout);
1668 return jiffies_to_msecs(timeout);
1671 EXPORT_SYMBOL(msleep_interruptible);