X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Ftimer.c;h=47b04c1bcb12dfb04bfae4bba280cee783c2d706;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=1bbbceea917e0ce33961c7560a22b2135c28178a;hpb=f7ed79d23a47594e7834d66a8f14449796d4f3e6;p=linux-2.6.git diff --git a/kernel/timer.c b/kernel/timer.c index 1bbbceea9..47b04c1bc 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -57,6 +57,7 @@ EXPORT_SYMBOL(jiffies_64); /* * per-CPU timer vector definitions: */ + #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) #define TVN_SIZE (1 << TVN_BITS) @@ -64,6 +65,11 @@ EXPORT_SYMBOL(jiffies_64); #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) +struct timer_base_s { + spinlock_t lock; + struct timer_list *running_timer; +}; + typedef struct tvec_s { struct list_head vec[TVN_SIZE]; } tvec_t; @@ -73,8 +79,7 @@ typedef struct tvec_root_s { } tvec_root_t; struct tvec_t_base_s { - spinlock_t lock; - struct timer_list *running_timer; + struct timer_base_s t_base; unsigned long timer_jiffies; tvec_root_t tv1; tvec_t tv2; @@ -84,16 +89,13 @@ struct tvec_t_base_s { } ____cacheline_aligned_in_smp; typedef struct tvec_t_base_s tvec_base_t; - -tvec_base_t boot_tvec_bases; -EXPORT_SYMBOL(boot_tvec_bases); -static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases }; +static DEFINE_PER_CPU(tvec_base_t, tvec_bases); static inline void set_running_timer(tvec_base_t *base, struct timer_list *timer) { #ifdef CONFIG_SMP - base->running_timer = timer; + base->t_base.running_timer = timer; #endif } @@ -139,6 +141,15 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) list_add_tail(&timer->entry, vec); } +typedef struct timer_base_s timer_base_t; +/* + * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) + * at compile time, and we need timer->base to lock the timer. + */ +timer_base_t __init_timer_base + ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; +EXPORT_SYMBOL(__init_timer_base); + /*** * init_timer - initialize a timer. * @timer: the timer to be initialized @@ -149,7 +160,7 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer) void fastcall init_timer(struct timer_list *timer) { timer->entry.next = NULL; - timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); + timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base; } EXPORT_SYMBOL(init_timer); @@ -165,7 +176,7 @@ static inline void detach_timer(struct timer_list *timer, } /* - * We are using hashed locking: holding per_cpu(tvec_bases).lock + * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock * means that all timers which are tied to this base via timer->base are * locked, and the base itself is locked too. * @@ -176,10 +187,10 @@ static inline void detach_timer(struct timer_list *timer, * possible to set timer->base = NULL and drop the lock: the timer remains * locked. */ -static tvec_base_t *lock_timer_base(struct timer_list *timer, +static timer_base_t *lock_timer_base(struct timer_list *timer, unsigned long *flags) { - tvec_base_t *base; + timer_base_t *base; for (;;) { base = timer->base; @@ -196,7 +207,8 @@ static tvec_base_t *lock_timer_base(struct timer_list *timer, int __mod_timer(struct timer_list *timer, unsigned long expires) { - tvec_base_t *base, *new_base; + timer_base_t *base; + tvec_base_t *new_base; unsigned long flags; int ret = 0; @@ -209,9 +221,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) ret = 1; } - new_base = __get_cpu_var(tvec_bases); + new_base = &__get_cpu_var(tvec_bases); - if (base != new_base) { + if (base != &new_base->t_base) { /* * We are trying to schedule the timer on the local CPU. * However we can't change timer's base while it is running, @@ -219,19 +231,21 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) * handler yet has not finished. This also guarantees that * the timer is serialized wrt itself. */ - if (likely(base->running_timer != timer)) { + if (unlikely(base->running_timer == timer)) { + /* The timer remains on a former base */ + new_base = container_of(base, tvec_base_t, t_base); + } else { /* See the comment in lock_timer_base() */ timer->base = NULL; spin_unlock(&base->lock); - base = new_base; - spin_lock(&base->lock); - timer->base = base; + spin_lock(&new_base->t_base.lock); + timer->base = &new_base->t_base; } } timer->expires = expires; - internal_add_timer(base, timer); - spin_unlock_irqrestore(&base->lock, flags); + internal_add_timer(new_base, timer); + spin_unlock_irqrestore(&new_base->t_base.lock, flags); return ret; } @@ -247,14 +261,14 @@ EXPORT_SYMBOL(__mod_timer); */ void add_timer_on(struct timer_list *timer, int cpu) { - tvec_base_t *base = per_cpu(tvec_bases, cpu); + tvec_base_t *base = &per_cpu(tvec_bases, cpu); unsigned long flags; BUG_ON(timer_pending(timer) || !timer->function); - spin_lock_irqsave(&base->lock, flags); - timer->base = base; + spin_lock_irqsave(&base->t_base.lock, flags); + timer->base = &base->t_base; internal_add_timer(base, timer); - spin_unlock_irqrestore(&base->lock, flags); + spin_unlock_irqrestore(&base->t_base.lock, flags); } @@ -307,7 +321,7 @@ EXPORT_SYMBOL(mod_timer); */ int del_timer(struct timer_list *timer) { - tvec_base_t *base; + timer_base_t *base; unsigned long flags; int ret = 0; @@ -334,7 +348,7 @@ EXPORT_SYMBOL(del_timer); */ int try_to_del_timer_sync(struct timer_list *timer) { - tvec_base_t *base; + timer_base_t *base; unsigned long flags; int ret = -1; @@ -398,7 +412,7 @@ static int cascade(tvec_base_t *base, tvec_t *tv, int index) struct timer_list *tmp; tmp = list_entry(curr, struct timer_list, entry); - BUG_ON(tmp->base != base); + BUG_ON(tmp->base != &base->t_base); curr = curr->next; internal_add_timer(base, tmp); } @@ -420,7 +434,7 @@ static inline void __run_timers(tvec_base_t *base) { struct timer_list *timer; - spin_lock_irq(&base->lock); + spin_lock_irq(&base->t_base.lock); while (time_after_eq(jiffies, base->timer_jiffies)) { struct list_head work_list = LIST_HEAD_INIT(work_list); struct list_head *head = &work_list; @@ -446,7 +460,7 @@ static inline void __run_timers(tvec_base_t *base) set_running_timer(base, timer); detach_timer(timer, 1); - spin_unlock_irq(&base->lock); + spin_unlock_irq(&base->t_base.lock); { int preempt_count = preempt_count(); fn(data); @@ -459,11 +473,11 @@ static inline void __run_timers(tvec_base_t *base) BUG(); } } - spin_lock_irq(&base->lock); + spin_lock_irq(&base->t_base.lock); } } set_running_timer(base, NULL); - spin_unlock_irq(&base->lock); + spin_unlock_irq(&base->t_base.lock); } #ifdef CONFIG_NO_IDLE_HZ @@ -493,8 +507,8 @@ unsigned long next_timer_interrupt(void) } hr_expires += jiffies; - base = __get_cpu_var(tvec_bases); - spin_lock(&base->lock); + base = &__get_cpu_var(tvec_bases); + spin_lock(&base->t_base.lock); expires = base->timer_jiffies + (LONG_MAX >> 1); list = NULL; @@ -542,39 +556,7 @@ found: expires = nte->expires; } } - spin_unlock(&base->lock); - - /* - * It can happen that other CPUs service timer IRQs and increment - * jiffies, but we have not yet got a local timer tick to process - * the timer wheels. In that case, the expiry time can be before - * jiffies, but since the high-resolution timer here is relative to - * jiffies, the default expression when high-resolution timers are - * not active, - * - * time_before(MAX_JIFFY_OFFSET + jiffies, expires) - * - * would falsely evaluate to true. If that is the case, just - * return jiffies so that we can immediately fire the local timer - */ - if (time_before(expires, jiffies)) - return jiffies; - - /* - * It can happen that other CPUs service timer IRQs and increment - * jiffies, but we have not yet got a local timer tick to process - * the timer wheels. In that case, the expiry time can be before - * jiffies, but since the high-resolution timer here is relative to - * jiffies, the default expression when high-resolution timers are - * not active, - * - * time_before(MAX_JIFFY_OFFSET + jiffies, expires) - * - * would falsely evaluate to true. If that is the case, just - * return jiffies so that we can immediately fire the local timer - */ - if (time_before(expires, jiffies)) - return jiffies; + spin_unlock(&base->t_base.lock); if (time_before(hr_expires, expires)) return hr_expires; @@ -717,9 +699,18 @@ static void second_overflow(void) /* * Compute the frequency estimate and additional phase adjustment due - * to frequency error for the next second. + * to frequency error for the next second. When the PPS signal is + * engaged, gnaw on the watchdog counter and update the frequency + * computed by the pll and the PPS signal. */ - ltemp = time_freq; + pps_valid++; + if (pps_valid == PPS_VALID) { /* PPS signal lost */ + pps_jitter = MAXTIME; + pps_stabil = MAXFREQ; + time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | + STA_PPSWANDER | STA_PPSERROR); + } + ltemp = time_freq + pps_freq; time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE)); #if HZ == 100 @@ -861,7 +852,7 @@ void update_process_times(int user_tick) */ static unsigned long count_active_tasks(void) { - return nr_active() * FIXED_1; + return (nr_running() + nr_uninterruptible()) * FIXED_1; } /* @@ -913,7 +904,7 @@ EXPORT_SYMBOL(xtime_lock); */ static void run_timer_softirq(struct softirq_action *h) { - tvec_base_t *base = __get_cpu_var(tvec_bases); + tvec_base_t *base = &__get_cpu_var(tvec_bases); hrtimer_run_queues(); if (time_after_eq(jiffies, base->timer_jiffies)) @@ -926,7 +917,6 @@ static void run_timer_softirq(struct softirq_action *h) void run_local_timers(void) { raise_softirq(TIMER_SOFTIRQ); - softlockup_tick(); } /* @@ -957,6 +947,7 @@ void do_timer(struct pt_regs *regs) /* prevent loading jiffies before storing new jiffies_64 value. */ barrier(); update_times(); + softlockup_tick(regs); } #ifdef __ARCH_WANT_SYS_ALARM @@ -967,7 +958,19 @@ void do_timer(struct pt_regs *regs) */ asmlinkage unsigned long sys_alarm(unsigned int seconds) { - return alarm_setitimer(seconds); + struct itimerval it_new, it_old; + unsigned int oldalarm; + + it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; + it_new.it_value.tv_sec = seconds; + it_new.it_value.tv_usec = 0; + do_setitimer(ITIMER_REAL, &it_new, &it_old); + oldalarm = it_old.it_value.tv_sec; + /* ehhh.. We can't return 0 if we have an alarm pending.. */ + /* And we'd better return too much than too little anyway */ + if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000) + oldalarm++; + return oldalarm; } #endif @@ -1000,6 +1003,7 @@ asmlinkage long sys_getppid(void) rcu_read_lock(); pid = rcu_dereference(current->real_parent)->tgid; rcu_read_unlock(); + return vx_map_pid(pid); } @@ -1238,41 +1242,13 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info) return 0; } -static int __devinit init_timers_cpu(int cpu) +static void __devinit init_timers_cpu(int cpu) { int j; tvec_base_t *base; - static char __devinitdata tvec_base_done[NR_CPUS]; - if (!tvec_base_done[cpu]) { - static char boot_done; - - if (boot_done) { - /* - * The APs use this path later in boot - */ - base = kmalloc_node(sizeof(*base), GFP_KERNEL, - cpu_to_node(cpu)); - if (!base) - return -ENOMEM; - memset(base, 0, sizeof(*base)); - per_cpu(tvec_bases, cpu) = base; - } else { - /* - * This is for the boot CPU - we use compile-time - * static initialisation because per-cpu memory isn't - * ready yet and because the memory allocators are not - * initialised either. - */ - boot_done = 1; - base = &boot_tvec_bases; - } - tvec_base_done[cpu] = 1; - } else { - base = per_cpu(tvec_bases, cpu); - } - - spin_lock_init(&base->lock); + base = &per_cpu(tvec_bases, cpu); + spin_lock_init(&base->t_base.lock); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); INIT_LIST_HEAD(base->tv4.vec + j); @@ -1283,7 +1259,6 @@ static int __devinit init_timers_cpu(int cpu) INIT_LIST_HEAD(base->tv1.vec + j); base->timer_jiffies = jiffies; - return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -1294,7 +1269,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head) while (!list_empty(head)) { timer = list_entry(head->next, struct timer_list, entry); detach_timer(timer, 0); - timer->base = new_base; + timer->base = &new_base->t_base; internal_add_timer(new_base, timer); } } @@ -1306,15 +1281,15 @@ static void __devinit migrate_timers(int cpu) int i; BUG_ON(cpu_online(cpu)); - old_base = per_cpu(tvec_bases, cpu); - new_base = get_cpu_var(tvec_bases); + old_base = &per_cpu(tvec_bases, cpu); + new_base = &get_cpu_var(tvec_bases); local_irq_disable(); - spin_lock(&new_base->lock); - spin_lock(&old_base->lock); - - BUG_ON(old_base->running_timer); + spin_lock(&new_base->t_base.lock); + spin_lock(&old_base->t_base.lock); + if (old_base->t_base.running_timer) + BUG(); for (i = 0; i < TVR_SIZE; i++) migrate_timer_list(new_base, old_base->tv1.vec + i); for (i = 0; i < TVN_SIZE; i++) { @@ -1324,21 +1299,20 @@ static void __devinit migrate_timers(int cpu) migrate_timer_list(new_base, old_base->tv5.vec + i); } - spin_unlock(&old_base->lock); - spin_unlock(&new_base->lock); + spin_unlock(&old_base->t_base.lock); + spin_unlock(&new_base->t_base.lock); local_irq_enable(); put_cpu_var(tvec_bases); } #endif /* CONFIG_HOTPLUG_CPU */ -static int timer_cpu_notify(struct notifier_block *self, +static int __devinit timer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; switch(action) { case CPU_UP_PREPARE: - if (init_timers_cpu(cpu) < 0) - return NOTIFY_BAD; + init_timers_cpu(cpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_DEAD: @@ -1351,7 +1325,7 @@ static int timer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block timers_nb = { +static struct notifier_block __devinitdata timers_nb = { .notifier_call = timer_cpu_notify, }; @@ -1481,7 +1455,7 @@ static void time_interpolator_update(long delta_nsec) */ if (jiffies % INTERPOLATOR_ADJUST == 0) { - if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) + if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) time_interpolator->nsec_per_cyc--; if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) time_interpolator->nsec_per_cyc++; @@ -1505,7 +1479,8 @@ register_time_interpolator(struct time_interpolator *ti) unsigned long flags; /* Sanity check */ - BUG_ON(ti->frequency == 0 || ti->mask == 0); + if (ti->frequency == 0 || ti->mask == 0) + BUG(); ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; spin_lock(&time_interpolator_lock);