X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fhrtimer.c;h=14bc9cfa63999eee7fa1b25e4129ee67463e53bc;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=01fa2ae98a8571d7e2d7a4e71c03a423792a0d67;hpb=f7ed79d23a47594e7834d66a8f14449796d4f3e6;p=linux-2.6.git diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 01fa2ae98..14bc9cfa6 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -122,26 +122,6 @@ void ktime_get_ts(struct timespec *ts) } EXPORT_SYMBOL_GPL(ktime_get_ts); -/* - * Get the coarse grained time at the softirq based on xtime and - * wall_to_monotonic. - */ -static void hrtimer_get_softirq_time(struct hrtimer_base *base) -{ - ktime_t xtim, tomono; - unsigned long seq; - - do { - seq = read_seqbegin(&xtime_lock); - xtim = timespec_to_ktime(xtime); - tomono = timespec_to_ktime(wall_to_monotonic); - - } while (read_seqretry(&xtime_lock, seq)); - - base[CLOCK_REALTIME].softirq_time = xtim; - base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); -} - /* * Functions and macros which are different for UP/SMP systems are kept in a * single place @@ -266,7 +246,7 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) /* * Divide a ktime value by a nanosecond value */ -static unsigned long ktime_divns(const ktime_t kt, s64 div) +static unsigned long ktime_divns(const ktime_t kt, nsec_t div) { u64 dclc, inc, dns; int sft = 0; @@ -301,17 +281,18 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) * hrtimer_forward - forward the timer expiry * * @timer: hrtimer to forward - * @now: forward past this time * @interval: the interval to forward * * Forward the timer expiry so it will expire in the future. * Returns the number of overruns. */ unsigned long -hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) +hrtimer_forward(struct hrtimer *timer, ktime_t interval) { unsigned long orun = 1; - ktime_t delta; + ktime_t delta, now; + + now = timer->base->get_time(); delta = ktime_sub(now, timer->expires); @@ -322,7 +303,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) interval.tv64 = timer->base->resolution.tv64; if (unlikely(delta.tv64 >= interval.tv64)) { - s64 incr = ktime_to_ns(interval); + nsec_t incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); timer->expires = ktime_add_ns(timer->expires, incr * orun); @@ -374,6 +355,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) rb_link_node(&timer->node, parent, link); rb_insert_color(&timer->node, &base->active); + timer->state = HRTIMER_PENDING; + if (!base->first || timer->expires.tv64 < rb_entry(base->first, struct hrtimer, node)->expires.tv64) base->first = &timer->node; @@ -393,7 +376,6 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) if (base->first == &timer->node) base->first = rb_next(&timer->node); rb_erase(&timer->node, &base->active); - timer->node.rb_parent = HRTIMER_INACTIVE; } /* @@ -404,6 +386,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) { if (hrtimer_active(timer)) { __remove_hrtimer(timer, base); + timer->state = HRTIMER_INACTIVE; return 1; } return 0; @@ -456,7 +439,6 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) return ret; } -EXPORT_SYMBOL_GPL(hrtimer_start); /** * hrtimer_try_to_cancel - try to deactivate a timer @@ -485,7 +467,6 @@ int hrtimer_try_to_cancel(struct hrtimer *timer) return ret; } -EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel); /** * hrtimer_cancel - cancel a timer and wait for the handler to finish. @@ -503,10 +484,8 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - cpu_relax(); } } -EXPORT_SYMBOL_GPL(hrtimer_cancel); /** * hrtimer_get_remaining - get remaining time for the timer @@ -525,7 +504,6 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer) return rem; } -EXPORT_SYMBOL_GPL(hrtimer_get_remaining); #ifdef CONFIG_NO_IDLE_HZ /** @@ -582,9 +560,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, clock_id = CLOCK_MONOTONIC; timer->base = &bases[clock_id]; - timer->node.rb_parent = HRTIMER_INACTIVE; } -EXPORT_SYMBOL_GPL(hrtimer_init); /** * hrtimer_get_res - get the timer resolution for a clock @@ -604,45 +580,54 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) return 0; } -EXPORT_SYMBOL_GPL(hrtimer_get_res); /* * Expire the per base hrtimer-queue: */ static inline void run_hrtimer_queue(struct hrtimer_base *base) { + ktime_t now = base->get_time(); struct rb_node *node; - if (!base->first) - return; - - if (base->get_softirq_time) - base->softirq_time = base->get_softirq_time(); - spin_lock_irq(&base->lock); while ((node = base->first)) { struct hrtimer *timer; - int (*fn)(struct hrtimer *); + int (*fn)(void *); int restart; + void *data; timer = rb_entry(node, struct hrtimer, node); - if (base->softirq_time.tv64 <= timer->expires.tv64) + if (now.tv64 <= timer->expires.tv64) break; fn = timer->function; + data = timer->data; set_curr_timer(base, timer); + timer->state = HRTIMER_RUNNING; __remove_hrtimer(timer, base); spin_unlock_irq(&base->lock); - restart = fn(timer); + /* + * fn == NULL is special case for the simplest timer + * variant - wake up process and do not restart: + */ + if (!fn) { + wake_up_process(data); + restart = HRTIMER_NORESTART; + } else + restart = fn(data); spin_lock_irq(&base->lock); - if (restart != HRTIMER_NORESTART) { - BUG_ON(hrtimer_active(timer)); + /* Another CPU has added back the timer */ + if (timer->state != HRTIMER_RUNNING) + continue; + + if (restart == HRTIMER_RESTART) enqueue_hrtimer(timer, base); - } + else + timer->state = HRTIMER_EXPIRED; } set_curr_timer(base, NULL); spin_unlock_irq(&base->lock); @@ -656,8 +641,6 @@ void hrtimer_run_queues(void) struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); int i; - hrtimer_get_softirq_time(base); - for (i = 0; i < MAX_HRTIMER_BASES; i++) run_hrtimer_queue(&base[i]); } @@ -665,69 +648,80 @@ void hrtimer_run_queues(void) /* * Sleep related functions: */ -static int hrtimer_wakeup(struct hrtimer *timer) + +/** + * schedule_hrtimer - sleep until timeout + * + * @timer: hrtimer variable initialized with the correct clock base + * @mode: timeout value is abs/rel + * + * Make the current task sleep until @timeout is + * elapsed. + * + * You can set the task state as follows - + * + * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to + * pass before the routine returns. The routine will return 0 + * + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is + * delivered to the current task. In this case the remaining time + * will be returned + * + * The current task state is guaranteed to be TASK_RUNNING when this + * routine returns. + */ +static ktime_t __sched +schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode) { - struct hrtimer_sleeper *t = - container_of(timer, struct hrtimer_sleeper, timer); - struct task_struct *task = t->task; + /* fn stays NULL, meaning single-shot wakeup: */ + timer->data = current; - t->task = NULL; - if (task) - wake_up_process(task); + hrtimer_start(timer, timer->expires, mode); - return HRTIMER_NORESTART; -} + schedule(); + hrtimer_cancel(timer); -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) -{ - sl->timer.function = hrtimer_wakeup; - sl->task = task; + /* Return the remaining time: */ + if (timer->state != HRTIMER_EXPIRED) + return ktime_sub(timer->expires, timer->base->get_time()); + else + return (ktime_t) {.tv64 = 0 }; } -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) +static inline ktime_t __sched +schedule_hrtimer_interruptible(struct hrtimer *timer, + const enum hrtimer_mode mode) { - hrtimer_init_sleeper(t, current); - - do { - set_current_state(TASK_INTERRUPTIBLE); - hrtimer_start(&t->timer, t->timer.expires, mode); - - schedule(); - - hrtimer_cancel(&t->timer); - mode = HRTIMER_ABS; + set_current_state(TASK_INTERRUPTIBLE); - } while (t->task && !signal_pending(current)); - - return t->task == NULL; + return schedule_hrtimer(timer, mode); } static long __sched nanosleep_restart(struct restart_block *restart) { - struct hrtimer_sleeper t; struct timespec __user *rmtp; struct timespec tu; - ktime_t time; + void *rfn_save = restart->fn; + struct hrtimer timer; + ktime_t rem; restart->fn = do_no_restart_syscall; - hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS); - t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; + hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS); + + timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; - if (do_nanosleep(&t, HRTIMER_ABS)) + rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS); + + if (rem.tv64 <= 0) return 0; rmtp = (struct timespec __user *) restart->arg2; - if (rmtp) { - time = ktime_sub(t.timer.expires, t.timer.base->get_time()); - if (time.tv64 <= 0) - return 0; - tu = ktime_to_timespec(time); - if (copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; - } + tu = ktime_to_timespec(rem); + if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; - restart->fn = nanosleep_restart; + restart->fn = rfn_save; /* The other values in restart are already filled in */ return -ERESTART_RESTARTBLOCK; @@ -737,34 +731,33 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, const enum hrtimer_mode mode, const clockid_t clockid) { struct restart_block *restart; - struct hrtimer_sleeper t; + struct hrtimer timer; struct timespec tu; ktime_t rem; - hrtimer_init(&t.timer, clockid, mode); - t.timer.expires = timespec_to_ktime(*rqtp); - if (do_nanosleep(&t, mode)) + hrtimer_init(&timer, clockid, mode); + + timer.expires = timespec_to_ktime(*rqtp); + + rem = schedule_hrtimer_interruptible(&timer, mode); + if (rem.tv64 <= 0) return 0; /* Absolute timers do not update the rmtp value and restart: */ if (mode == HRTIMER_ABS) return -ERESTARTNOHAND; - if (rmtp) { - rem = ktime_sub(t.timer.expires, t.timer.base->get_time()); - if (rem.tv64 <= 0) - return 0; - tu = ktime_to_timespec(rem); - if (copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; - } + tu = ktime_to_timespec(rem); + + if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) + return -EFAULT; restart = ¤t_thread_info()->restart_block; restart->fn = nanosleep_restart; - restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF; - restart->arg1 = t.timer.expires.tv64 >> 32; + restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; + restart->arg1 = timer.expires.tv64 >> 32; restart->arg2 = (unsigned long) rmtp; - restart->arg3 = (unsigned long) t.timer.base->index; + restart->arg3 = (unsigned long) timer.base->index; return -ERESTART_RESTARTBLOCK; } @@ -842,7 +835,7 @@ static void migrate_hrtimers(int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int hrtimer_cpu_notify(struct notifier_block *self, +static int __devinit hrtimer_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; @@ -866,7 +859,7 @@ static int hrtimer_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block hrtimers_nb = { +static struct notifier_block __devinitdata hrtimers_nb = { .notifier_call = hrtimer_cpu_notify, };