2 * linux/kernel/posix_timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/smp_lock.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/time.h>
39 #include <asm/uaccess.h>
40 #include <asm/semaphore.h>
41 #include <linux/list.h>
42 #include <linux/init.h>
43 #include <linux/compiler.h>
44 #include <linux/idr.h>
45 #include <linux/posix-timers.h>
46 #include <linux/syscalls.h>
47 #include <linux/wait.h>
48 #include <linux/workqueue.h>
49 #include <linux/module.h>
50 #include <linux/vs_cvirt.h>
52 #ifndef div_long_long_rem
53 #include <asm/div64.h>
55 #define div_long_long_rem(dividend,divisor,remainder) ({ \
56 u64 result = dividend; \
57 *remainder = do_div(result,divisor); \
61 #define CLOCK_REALTIME_RES TICK_NSEC /* In nano seconds. */
63 static inline u64 mpy_l_X_l_ll(unsigned long mpy1,unsigned long mpy2)
65 return (u64)mpy1 * mpy2;
68 * Management arrays for POSIX timers. Timers are kept in slab memory
69 * Timer ids are allocated by an external routine that keeps track of the
70 * id and the timer. The external interface is:
72 * void *idr_find(struct idr *idp, int id); to find timer_id <id>
73 * int idr_get_new(struct idr *idp, void *ptr); to get a new id and
75 * void idr_remove(struct idr *idp, int id); to release <id>
76 * void idr_init(struct idr *idp); to initialize <idp>
78 * The idr_get_new *may* call slab for more memory so it must not be
79 * called under a spin lock. Likewise idr_remore may release memory
80 * (but it may be ok to do this under a lock...).
81 * idr_find is just a memory look up and is quite fast. A -1 return
82 * indicates that the requested id does not exist.
86 * Lets keep our timers in a slab cache :-)
88 static kmem_cache_t *posix_timers_cache;
89 static struct idr posix_timers_id;
90 static DEFINE_SPINLOCK(idr_lock);
93 * Just because the timer is not in the timer list does NOT mean it is
94 * inactive. It could be in the "fire" routine getting a new expire time.
96 #define TIMER_INACTIVE 1
99 # define timer_active(tmr) \
100 ((tmr)->it.real.timer.entry.prev != (void *)TIMER_INACTIVE)
101 # define set_timer_inactive(tmr) \
103 (tmr)->it.real.timer.entry.prev = (void *)TIMER_INACTIVE; \
106 # define timer_active(tmr) BARFY // error to use outside of SMP
107 # define set_timer_inactive(tmr) do { } while (0)
110 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
111 * SIGEV values. Here we put out an error if this assumption fails.
113 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
114 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
115 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
120 * The timer ID is turned into a timer address by idr_find().
121 * Verifying a valid ID consists of:
123 * a) checking that idr_find() returns other than -1.
124 * b) checking that the timer id matches the one in the timer itself.
125 * c) that the timer owner is in the callers thread group.
129 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
130 * to implement others. This structure defines the various
131 * clocks and allows the possibility of adding others. We
132 * provide an interface to add clocks to the table and expect
133 * the "arch" code to add at least one clock that is high
134 * resolution. Here we define the standard CLOCK_REALTIME as a
135 * 1/HZ resolution clock.
137 * RESOLUTION: Clock resolution is used to round up timer and interval
138 * times, NOT to report clock times, which are reported with as
139 * much resolution as the system can muster. In some cases this
140 * resolution may depend on the underlying clock hardware and
141 * may not be quantifiable until run time, and only then is the
142 * necessary code is written. The standard says we should say
143 * something about this issue in the documentation...
145 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
146 * various clock functions. For clocks that use the standard
147 * system timer code these entries should be NULL. This will
148 * allow dispatch without the overhead of indirect function
149 * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
150 * must supply functions here, even if the function just returns
151 * ENOSYS. The standard POSIX timer management code assumes the
152 * following: 1.) The k_itimer struct (sched.h) is used for the
153 * timer. 2.) The list, it_lock, it_clock, it_id and it_process
154 * fields are not modified by timer code.
156 * At this time all functions EXCEPT clock_nanosleep can be
157 * redirected by the CLOCKS structure. Clock_nanosleep is in
158 * there, but the code ignores it.
160 * Permissions: It is assumed that the clock_settime() function defined
161 * for each clock will take care of permission checks. Some
162 * clocks may be set able by any user (i.e. local process
163 * clocks) others not. Currently the only set able clock we
164 * have is CLOCK_REALTIME and its high res counter part, both of
165 * which we beg off on and pass to do_sys_settimeofday().
168 static struct k_clock posix_clocks[MAX_CLOCKS];
170 * We only have one real clock that can be set so we need only one abs list,
171 * even if we should want to have several clocks with differing resolutions.
173 static struct k_clock_abs abs_list = {.list = LIST_HEAD_INIT(abs_list.list),
174 .lock = SPIN_LOCK_UNLOCKED};
176 static void posix_timer_fn(unsigned long);
177 static u64 do_posix_clock_monotonic_gettime_parts(
178 struct timespec *tp, struct timespec *mo);
179 int do_posix_clock_monotonic_gettime(struct timespec *tp);
180 static int do_posix_clock_monotonic_get(clockid_t, struct timespec *tp);
182 static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
184 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
186 spin_unlock_irqrestore(&timr->it_lock, flags);
190 * Call the k_clock hook function if non-null, or the default function.
192 #define CLOCK_DISPATCH(clock, call, arglist) \
193 ((clock) < 0 ? posix_cpu_##call arglist : \
194 (posix_clocks[clock].call != NULL \
195 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
198 * Default clock hook functions when the struct k_clock passed
199 * to register_posix_clock leaves a function pointer null.
201 * The function common_CALL is the default implementation for
202 * the function pointer CALL in struct k_clock.
205 static inline int common_clock_getres(clockid_t which_clock,
209 tp->tv_nsec = posix_clocks[which_clock].res;
213 static inline int common_clock_get(clockid_t which_clock, struct timespec *tp)
219 static inline int common_clock_set(clockid_t which_clock, struct timespec *tp)
221 return do_sys_settimeofday(tp, NULL);
224 static inline int common_timer_create(struct k_itimer *new_timer)
226 INIT_LIST_HEAD(&new_timer->it.real.abs_timer_entry);
227 init_timer(&new_timer->it.real.timer);
228 new_timer->it.real.timer.data = (unsigned long) new_timer;
229 new_timer->it.real.timer.function = posix_timer_fn;
230 set_timer_inactive(new_timer);
235 * These ones are defined below.
237 static int common_nsleep(clockid_t, int flags, struct timespec *t);
238 static void common_timer_get(struct k_itimer *, struct itimerspec *);
239 static int common_timer_set(struct k_itimer *, int,
240 struct itimerspec *, struct itimerspec *);
241 static int common_timer_del(struct k_itimer *timer);
244 * Return nonzero iff we know a priori this clockid_t value is bogus.
246 static inline int invalid_clockid(clockid_t which_clock)
248 if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */
250 if ((unsigned) which_clock >= MAX_CLOCKS)
252 if (posix_clocks[which_clock].clock_getres != NULL)
254 #ifndef CLOCK_DISPATCH_DIRECT
255 if (posix_clocks[which_clock].res != 0)
263 * Initialize everything, well, just everything in Posix clocks/timers ;)
265 static __init int init_posix_timers(void)
267 struct k_clock clock_realtime = {.res = CLOCK_REALTIME_RES,
268 .abs_struct = &abs_list
270 struct k_clock clock_monotonic = {.res = CLOCK_REALTIME_RES,
272 .clock_get = do_posix_clock_monotonic_get,
273 .clock_set = do_posix_clock_nosettime
276 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
277 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
279 posix_timers_cache = kmem_cache_create("posix_timers_cache",
280 sizeof (struct k_itimer), 0, 0, NULL, NULL);
281 idr_init(&posix_timers_id);
285 __initcall(init_posix_timers);
287 static void tstojiffie(struct timespec *tp, int res, u64 *jiff)
289 long sec = tp->tv_sec;
290 long nsec = tp->tv_nsec + res - 1;
292 if (nsec > NSEC_PER_SEC) {
294 nsec -= NSEC_PER_SEC;
298 * The scaling constants are defined in <linux/time.h>
299 * The difference between there and here is that we do the
300 * res rounding and compute a 64-bit result (well so does that
301 * but it then throws away the high bits).
303 *jiff = (mpy_l_X_l_ll(sec, SEC_CONVERSION) +
304 (mpy_l_X_l_ll(nsec, NSEC_CONVERSION) >>
305 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
309 * This function adjusts the timer as needed as a result of the clock
310 * being set. It should only be called for absolute timers, and then
311 * under the abs_list lock. It computes the time difference and sets
312 * the new jiffies value in the timer. It also updates the timers
313 * reference wall_to_monotonic value. It is complicated by the fact
314 * that tstojiffies() only handles positive times and it needs to work
315 * with both positive and negative times. Also, for negative offsets,
316 * we need to defeat the res round up.
318 * Return is true if there is a new time, else false.
320 static long add_clockset_delta(struct k_itimer *timr,
321 struct timespec *new_wall_to)
323 struct timespec delta;
327 set_normalized_timespec(&delta,
328 new_wall_to->tv_sec -
329 timr->it.real.wall_to_prev.tv_sec,
330 new_wall_to->tv_nsec -
331 timr->it.real.wall_to_prev.tv_nsec);
332 if (likely(!(delta.tv_sec | delta.tv_nsec)))
334 if (delta.tv_sec < 0) {
335 set_normalized_timespec(&delta,
338 posix_clocks[timr->it_clock].res);
341 tstojiffie(&delta, posix_clocks[timr->it_clock].res, &exp);
342 timr->it.real.wall_to_prev = *new_wall_to;
343 timr->it.real.timer.expires += (sign ? -exp : exp);
347 static void remove_from_abslist(struct k_itimer *timr)
349 if (!list_empty(&timr->it.real.abs_timer_entry)) {
350 spin_lock(&abs_list.lock);
351 list_del_init(&timr->it.real.abs_timer_entry);
352 spin_unlock(&abs_list.lock);
356 static void schedule_next_timer(struct k_itimer *timr)
358 struct timespec new_wall_to;
359 struct now_struct now;
363 * Set up the timer for the next interval (if there is one).
364 * Note: this code uses the abs_timer_lock to protect
365 * it.real.wall_to_prev and must hold it until exp is set, not exactly
368 * This function is used for CLOCK_REALTIME* and
369 * CLOCK_MONOTONIC* timers. If we ever want to handle other
370 * CLOCKs, the calling code (do_schedule_next_timer) would need
371 * to pull the "clock" info from the timer and dispatch the
372 * "other" CLOCKs "next timer" code (which, I suppose should
373 * also be added to the k_clock structure).
375 if (!timr->it.real.incr)
379 seq = read_seqbegin(&xtime_lock);
380 new_wall_to = wall_to_monotonic;
382 } while (read_seqretry(&xtime_lock, seq));
384 if (!list_empty(&timr->it.real.abs_timer_entry)) {
385 spin_lock(&abs_list.lock);
386 add_clockset_delta(timr, &new_wall_to);
388 posix_bump_timer(timr, now);
390 spin_unlock(&abs_list.lock);
392 posix_bump_timer(timr, now);
394 timr->it_overrun_last = timr->it_overrun;
395 timr->it_overrun = -1;
396 ++timr->it_requeue_pending;
397 add_timer(&timr->it.real.timer);
401 * This function is exported for use by the signal deliver code. It is
402 * called just prior to the info block being released and passes that
403 * block to us. It's function is to update the overrun entry AND to
404 * restart the timer. It should only be called if the timer is to be
405 * restarted (i.e. we have flagged this in the sys_private entry of the
408 * To protect aginst the timer going away while the interrupt is queued,
409 * we require that the it_requeue_pending flag be set.
411 void do_schedule_next_timer(struct siginfo *info)
413 struct k_itimer *timr;
416 timr = lock_timer(info->si_tid, &flags);
418 if (!timr || timr->it_requeue_pending != info->si_sys_private)
421 if (timr->it_clock < 0) /* CPU clock */
422 posix_cpu_timer_schedule(timr);
424 schedule_next_timer(timr);
425 info->si_overrun = timr->it_overrun_last;
428 unlock_timer(timr, flags);
431 int posix_timer_event(struct k_itimer *timr,int si_private)
433 memset(&timr->sigq->info, 0, sizeof(siginfo_t));
434 timr->sigq->info.si_sys_private = si_private;
436 * Send signal to the process that owns this timer.
438 * This code assumes that all the possible abs_lists share the
439 * same lock (there is only one list at this time). If this is
440 * not the case, the CLOCK info would need to be used to find
441 * the proper abs list lock.
444 timr->sigq->info.si_signo = timr->it_sigev_signo;
445 timr->sigq->info.si_errno = 0;
446 timr->sigq->info.si_code = SI_TIMER;
447 timr->sigq->info.si_tid = timr->it_id;
448 timr->sigq->info.si_value = timr->it_sigev_value;
449 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
450 if (unlikely(timr->it_process->flags & PF_EXITING)) {
451 timr->it_sigev_notify = SIGEV_SIGNAL;
452 put_task_struct(timr->it_process);
453 timr->it_process = timr->it_process->group_leader;
456 return send_sigqueue(timr->it_sigev_signo, timr->sigq,
461 return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
465 EXPORT_SYMBOL_GPL(posix_timer_event);
468 * This function gets called when a POSIX.1b interval timer expires. It
469 * is used as a callback from the kernel internal timer. The
470 * run_timer_list code ALWAYS calls with interrupts on.
472 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
474 static void posix_timer_fn(unsigned long __data)
476 struct k_itimer *timr = (struct k_itimer *) __data;
479 struct timespec delta, new_wall_to;
483 spin_lock_irqsave(&timr->it_lock, flags);
484 set_timer_inactive(timr);
485 if (!list_empty(&timr->it.real.abs_timer_entry)) {
486 spin_lock(&abs_list.lock);
488 seq = read_seqbegin(&xtime_lock);
489 new_wall_to = wall_to_monotonic;
490 } while (read_seqretry(&xtime_lock, seq));
491 set_normalized_timespec(&delta,
493 timr->it.real.wall_to_prev.tv_sec,
494 new_wall_to.tv_nsec -
495 timr->it.real.wall_to_prev.tv_nsec);
496 if (likely((delta.tv_sec | delta.tv_nsec ) == 0)) {
497 /* do nothing, timer is on time */
498 } else if (delta.tv_sec < 0) {
499 /* do nothing, timer is already late */
501 /* timer is early due to a clock set */
503 posix_clocks[timr->it_clock].res,
505 timr->it.real.wall_to_prev = new_wall_to;
506 timr->it.real.timer.expires += exp;
507 add_timer(&timr->it.real.timer);
510 spin_unlock(&abs_list.lock);
516 if (timr->it.real.incr)
517 si_private = ++timr->it_requeue_pending;
519 remove_from_abslist(timr);
522 if (posix_timer_event(timr, si_private))
524 * signal was not sent because of sig_ignor
525 * we will not get a call back to restart it AND
526 * it should be restarted.
528 schedule_next_timer(timr);
530 unlock_timer(timr, flags); /* hold thru abs lock to keep irq off */
534 static inline struct task_struct * good_sigevent(sigevent_t * event)
536 struct task_struct *rtn = current->group_leader;
538 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
539 (!(rtn = find_task_by_real_pid(event->sigev_notify_thread_id)) ||
540 rtn->tgid != current->tgid ||
541 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
544 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
545 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
551 void register_posix_clock(clockid_t clock_id, struct k_clock *new_clock)
553 if ((unsigned) clock_id >= MAX_CLOCKS) {
554 printk("POSIX clock register failed for clock_id %d\n",
559 posix_clocks[clock_id] = *new_clock;
561 EXPORT_SYMBOL_GPL(register_posix_clock);
563 static struct k_itimer * alloc_posix_timer(void)
565 struct k_itimer *tmr;
566 tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL);
569 memset(tmr, 0, sizeof (struct k_itimer));
570 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
571 kmem_cache_free(posix_timers_cache, tmr);
578 #define IT_ID_NOT_SET 0
579 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
583 spin_lock_irqsave(&idr_lock, flags);
584 idr_remove(&posix_timers_id, tmr->it_id);
585 spin_unlock_irqrestore(&idr_lock, flags);
587 sigqueue_free(tmr->sigq);
588 if (unlikely(tmr->it_process) &&
589 tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
590 put_task_struct(tmr->it_process);
591 kmem_cache_free(posix_timers_cache, tmr);
594 /* Create a POSIX.1b interval timer. */
597 sys_timer_create(clockid_t which_clock,
598 struct sigevent __user *timer_event_spec,
599 timer_t __user * created_timer_id)
602 struct k_itimer *new_timer = NULL;
604 struct task_struct *process = NULL;
607 int it_id_set = IT_ID_NOT_SET;
609 if (invalid_clockid(which_clock))
612 new_timer = alloc_posix_timer();
613 if (unlikely(!new_timer))
616 spin_lock_init(&new_timer->it_lock);
618 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
622 spin_lock_irq(&idr_lock);
623 error = idr_get_new(&posix_timers_id,
626 spin_unlock_irq(&idr_lock);
627 if (error == -EAGAIN)
631 * Wierd looking, but we return EAGAIN if the IDR is
632 * full (proper POSIX return value for this)
638 it_id_set = IT_ID_SET;
639 new_timer->it_id = (timer_t) new_timer_id;
640 new_timer->it_clock = which_clock;
641 new_timer->it_overrun = -1;
642 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
647 * return the timer_id now. The next step is hard to
648 * back out if there is an error.
650 if (copy_to_user(created_timer_id,
651 &new_timer_id, sizeof (new_timer_id))) {
655 if (timer_event_spec) {
656 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
660 new_timer->it_sigev_notify = event.sigev_notify;
661 new_timer->it_sigev_signo = event.sigev_signo;
662 new_timer->it_sigev_value = event.sigev_value;
664 read_lock(&tasklist_lock);
665 if ((process = good_sigevent(&event))) {
667 * We may be setting up this process for another
668 * thread. It may be exiting. To catch this
669 * case the we check the PF_EXITING flag. If
670 * the flag is not set, the siglock will catch
671 * him before it is too late (in exit_itimers).
673 * The exec case is a bit more invloved but easy
674 * to code. If the process is in our thread
675 * group (and it must be or we would not allow
676 * it here) and is doing an exec, it will cause
677 * us to be killed. In this case it will wait
678 * for us to die which means we can finish this
679 * linkage with our last gasp. I.e. no code :)
681 spin_lock_irqsave(&process->sighand->siglock, flags);
682 if (!(process->flags & PF_EXITING)) {
683 new_timer->it_process = process;
684 list_add(&new_timer->list,
685 &process->signal->posix_timers);
686 spin_unlock_irqrestore(&process->sighand->siglock, flags);
687 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
688 get_task_struct(process);
690 spin_unlock_irqrestore(&process->sighand->siglock, flags);
694 read_unlock(&tasklist_lock);
700 new_timer->it_sigev_notify = SIGEV_SIGNAL;
701 new_timer->it_sigev_signo = SIGALRM;
702 new_timer->it_sigev_value.sival_int = new_timer->it_id;
703 process = current->group_leader;
704 spin_lock_irqsave(&process->sighand->siglock, flags);
705 new_timer->it_process = process;
706 list_add(&new_timer->list, &process->signal->posix_timers);
707 spin_unlock_irqrestore(&process->sighand->siglock, flags);
711 * In the case of the timer belonging to another task, after
712 * the task is unlocked, the timer is owned by the other task
713 * and may cease to exist at any time. Don't use or modify
714 * new_timer after the unlock call.
719 release_posix_timer(new_timer, it_id_set);
727 * This function checks the elements of a timespec structure.
730 * ts : Pointer to the timespec structure to check
733 * If a NULL pointer was passed in, or the tv_nsec field was less than 0
734 * or greater than NSEC_PER_SEC, or the tv_sec field was less than 0,
735 * this function returns 0. Otherwise it returns 1.
737 static int good_timespec(const struct timespec *ts)
739 if ((!ts) || (ts->tv_sec < 0) ||
740 ((unsigned) ts->tv_nsec >= NSEC_PER_SEC))
746 * Locking issues: We need to protect the result of the id look up until
747 * we get the timer locked down so it is not deleted under us. The
748 * removal is done under the idr spinlock so we use that here to bridge
749 * the find to the timer lock. To avoid a dead lock, the timer id MUST
750 * be release with out holding the timer lock.
752 static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
754 struct k_itimer *timr;
756 * Watch out here. We do a irqsave on the idr_lock and pass the
757 * flags part over to the timer lock. Must not let interrupts in
758 * while we are moving the lock.
761 spin_lock_irqsave(&idr_lock, *flags);
762 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);
764 spin_lock(&timr->it_lock);
765 spin_unlock(&idr_lock);
767 if ((timr->it_id != timer_id) || !(timr->it_process) ||
768 timr->it_process->tgid != current->tgid) {
769 unlock_timer(timr, *flags);
773 spin_unlock_irqrestore(&idr_lock, *flags);
779 * Get the time remaining on a POSIX.1b interval timer. This function
780 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
783 * We have a couple of messes to clean up here. First there is the case
784 * of a timer that has a requeue pending. These timers should appear to
785 * be in the timer list with an expiry as if we were to requeue them
788 * The second issue is the SIGEV_NONE timer which may be active but is
789 * not really ever put in the timer list (to save system resources).
790 * This timer may be expired, and if so, we will do it here. Otherwise
791 * it is the same as a requeue pending timer WRT to what we should
795 common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
797 unsigned long expires;
798 struct now_struct now;
801 expires = timr->it.real.timer.expires;
802 while ((volatile long) (timr->it.real.timer.expires) != expires);
807 ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) &&
808 !timr->it.real.incr &&
809 posix_time_before(&timr->it.real.timer, &now))
810 timr->it.real.timer.expires = expires = 0;
812 if (timr->it_requeue_pending & REQUEUE_PENDING ||
813 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
814 posix_bump_timer(timr, now);
815 expires = timr->it.real.timer.expires;
818 if (!timer_pending(&timr->it.real.timer))
821 expires -= now.jiffies;
823 jiffies_to_timespec(expires, &cur_setting->it_value);
824 jiffies_to_timespec(timr->it.real.incr, &cur_setting->it_interval);
826 if (cur_setting->it_value.tv_sec < 0) {
827 cur_setting->it_value.tv_nsec = 1;
828 cur_setting->it_value.tv_sec = 0;
832 /* Get the time remaining on a POSIX.1b interval timer. */
834 sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
836 struct k_itimer *timr;
837 struct itimerspec cur_setting;
840 timr = lock_timer(timer_id, &flags);
844 CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
846 unlock_timer(timr, flags);
848 if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
854 * Get the number of overruns of a POSIX.1b interval timer. This is to
855 * be the overrun of the timer last delivered. At the same time we are
856 * accumulating overruns on the next timer. The overrun is frozen when
857 * the signal is delivered, either at the notify time (if the info block
858 * is not queued) or at the actual delivery time (as we are informed by
859 * the call back to do_schedule_next_timer(). So all we need to do is
860 * to pick up the frozen overrun.
864 sys_timer_getoverrun(timer_t timer_id)
866 struct k_itimer *timr;
870 timr = lock_timer(timer_id, &flags);
874 overrun = timr->it_overrun_last;
875 unlock_timer(timr, flags);
880 * Adjust for absolute time
882 * If absolute time is given and it is not CLOCK_MONOTONIC, we need to
883 * adjust for the offset between the timer clock (CLOCK_MONOTONIC) and
884 * what ever clock he is using.
886 * If it is relative time, we need to add the current (CLOCK_MONOTONIC)
887 * time to it to get the proper time for the timer.
889 static int adjust_abs_time(struct k_clock *clock, struct timespec *tp,
890 int abs, u64 *exp, struct timespec *wall_to)
893 struct timespec oc = *tp;
899 * The mask pick up the 4 basic clocks
901 if (!((clock - &posix_clocks[0]) & ~CLOCKS_MASK)) {
902 jiffies_64_f = do_posix_clock_monotonic_gettime_parts(
905 * If we are doing a MONOTONIC clock
907 if((clock - &posix_clocks[0]) & CLOCKS_MONO){
908 now.tv_sec += wall_to->tv_sec;
909 now.tv_nsec += wall_to->tv_nsec;
913 * Not one of the basic clocks
915 clock->clock_get(clock - posix_clocks, &now);
916 jiffies_64_f = get_jiffies_64();
919 * Take away now to get delta
921 oc.tv_sec -= now.tv_sec;
922 oc.tv_nsec -= now.tv_nsec;
926 while ((oc.tv_nsec - NSEC_PER_SEC) >= 0) {
927 oc.tv_nsec -= NSEC_PER_SEC;
930 while ((oc.tv_nsec) < 0) {
931 oc.tv_nsec += NSEC_PER_SEC;
935 jiffies_64_f = get_jiffies_64();
938 * Check if the requested time is prior to now (if so set now)
941 oc.tv_sec = oc.tv_nsec = 0;
943 if (oc.tv_sec | oc.tv_nsec)
944 set_normalized_timespec(&oc, oc.tv_sec,
945 oc.tv_nsec + clock->res);
946 tstojiffie(&oc, clock->res, exp);
949 * Check if the requested time is more than the timer code
950 * can handle (if so we error out but return the value too).
952 if (*exp > ((u64)MAX_JIFFY_OFFSET))
954 * This is a considered response, not exactly in
955 * line with the standard (in fact it is silent on
956 * possible overflows). We assume such a large
957 * value is ALMOST always a programming error and
958 * try not to compound it by setting a really dumb
963 * return the actual jiffies expire time, full 64 bits
965 *exp += jiffies_64_f;
969 /* Set a POSIX.1b interval timer. */
970 /* timr->it_lock is taken. */
972 common_timer_set(struct k_itimer *timr, int flags,
973 struct itimerspec *new_setting, struct itimerspec *old_setting)
975 struct k_clock *clock = &posix_clocks[timr->it_clock];
979 common_timer_get(timr, old_setting);
981 /* disable the timer */
982 timr->it.real.incr = 0;
984 * careful here. If smp we could be in the "fire" routine which will
985 * be spinning as we hold the lock. But this is ONLY an SMP issue.
988 if (timer_active(timr) && !del_timer(&timr->it.real.timer))
990 * It can only be active if on an other cpu. Since
991 * we have cleared the interval stuff above, it should
992 * clear once we release the spin lock. Of course once
993 * we do that anything could happen, including the
994 * complete melt down of the timer. So return with
995 * a "retry" exit status.
999 set_timer_inactive(timr);
1001 del_timer(&timr->it.real.timer);
1003 remove_from_abslist(timr);
1005 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
1007 timr->it_overrun_last = 0;
1008 timr->it_overrun = -1;
1010 *switch off the timer when it_value is zero
1012 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) {
1013 timr->it.real.timer.expires = 0;
1017 if (adjust_abs_time(clock,
1018 &new_setting->it_value, flags & TIMER_ABSTIME,
1019 &expire_64, &(timr->it.real.wall_to_prev))) {
1022 timr->it.real.timer.expires = (unsigned long)expire_64;
1023 tstojiffie(&new_setting->it_interval, clock->res, &expire_64);
1024 timr->it.real.incr = (unsigned long)expire_64;
1027 * We do not even queue SIGEV_NONE timers! But we do put them
1028 * in the abs list so we can do that right.
1030 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE))
1031 add_timer(&timr->it.real.timer);
1033 if (flags & TIMER_ABSTIME && clock->abs_struct) {
1034 spin_lock(&clock->abs_struct->lock);
1035 list_add_tail(&(timr->it.real.abs_timer_entry),
1036 &(clock->abs_struct->list));
1037 spin_unlock(&clock->abs_struct->lock);
1042 /* Set a POSIX.1b interval timer */
1044 sys_timer_settime(timer_t timer_id, int flags,
1045 const struct itimerspec __user *new_setting,
1046 struct itimerspec __user *old_setting)
1048 struct k_itimer *timr;
1049 struct itimerspec new_spec, old_spec;
1052 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
1057 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
1060 if ((!good_timespec(&new_spec.it_interval)) ||
1061 (!good_timespec(&new_spec.it_value)))
1064 timr = lock_timer(timer_id, &flag);
1068 error = CLOCK_DISPATCH(timr->it_clock, timer_set,
1069 (timr, flags, &new_spec, rtn));
1071 unlock_timer(timr, flag);
1072 if (error == TIMER_RETRY) {
1073 rtn = NULL; // We already got the old time...
1077 if (old_setting && !error && copy_to_user(old_setting,
1078 &old_spec, sizeof (old_spec)))
1084 static inline int common_timer_del(struct k_itimer *timer)
1086 timer->it.real.incr = 0;
1088 if (timer_active(timer) && !del_timer(&timer->it.real.timer))
1090 * It can only be active if on an other cpu. Since
1091 * we have cleared the interval stuff above, it should
1092 * clear once we release the spin lock. Of course once
1093 * we do that anything could happen, including the
1094 * complete melt down of the timer. So return with
1095 * a "retry" exit status.
1099 del_timer(&timer->it.real.timer);
1101 remove_from_abslist(timer);
1106 static inline int timer_delete_hook(struct k_itimer *timer)
1108 return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
1111 /* Delete a POSIX.1b interval timer. */
1113 sys_timer_delete(timer_t timer_id)
1115 struct k_itimer *timer;
1122 timer = lock_timer(timer_id, &flags);
1127 error = timer_delete_hook(timer);
1129 if (error == TIMER_RETRY) {
1130 unlock_timer(timer, flags);
1134 timer_delete_hook(timer);
1136 spin_lock(¤t->sighand->siglock);
1137 list_del(&timer->list);
1138 spin_unlock(¤t->sighand->siglock);
1140 * This keeps any tasks waiting on the spin lock from thinking
1141 * they got something (see the lock code above).
1143 if (timer->it_process) {
1144 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
1145 put_task_struct(timer->it_process);
1146 timer->it_process = NULL;
1148 unlock_timer(timer, flags);
1149 release_posix_timer(timer, IT_ID_SET);
1153 * return timer owned by the process, used by exit_itimers
1155 static inline void itimer_delete(struct k_itimer *timer)
1157 unsigned long flags;
1163 spin_lock_irqsave(&timer->it_lock, flags);
1166 error = timer_delete_hook(timer);
1168 if (error == TIMER_RETRY) {
1169 unlock_timer(timer, flags);
1173 timer_delete_hook(timer);
1175 list_del(&timer->list);
1177 * This keeps any tasks waiting on the spin lock from thinking
1178 * they got something (see the lock code above).
1180 if (timer->it_process) {
1181 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
1182 put_task_struct(timer->it_process);
1183 timer->it_process = NULL;
1185 unlock_timer(timer, flags);
1186 release_posix_timer(timer, IT_ID_SET);
1190 * This is called by __exit_signal, only when there are no more
1191 * references to the shared signal_struct.
1193 void exit_itimers(struct signal_struct *sig)
1195 struct k_itimer *tmr;
1197 while (!list_empty(&sig->posix_timers)) {
1198 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1201 del_timer_sync(&sig->real_timer);
1205 * And now for the "clock" calls
1207 * These functions are called both from timer functions (with the timer
1208 * spin_lock_irq() held and from clock calls with no locking. They must
1209 * use the save flags versions of locks.
1213 * We do ticks here to avoid the irq lock ( they take sooo long).
1214 * The seqlock is great here. Since we a reader, we don't really care
1215 * if we are interrupted since we don't take lock that will stall us or
1216 * any other cpu. Voila, no irq lock is needed.
1220 static u64 do_posix_clock_monotonic_gettime_parts(
1221 struct timespec *tp, struct timespec *mo)
1227 seq = read_seqbegin(&xtime_lock);
1229 *mo = wall_to_monotonic;
1232 } while(read_seqretry(&xtime_lock, seq));
1237 static int do_posix_clock_monotonic_get(clockid_t clock, struct timespec *tp)
1239 struct timespec wall_to_mono;
1241 do_posix_clock_monotonic_gettime_parts(tp, &wall_to_mono);
1243 tp->tv_sec += wall_to_mono.tv_sec;
1244 tp->tv_nsec += wall_to_mono.tv_nsec;
1246 if ((tp->tv_nsec - NSEC_PER_SEC) > 0) {
1247 tp->tv_nsec -= NSEC_PER_SEC;
1253 int do_posix_clock_monotonic_gettime(struct timespec *tp)
1255 return do_posix_clock_monotonic_get(CLOCK_MONOTONIC, tp);
1258 int do_posix_clock_nosettime(clockid_t clockid, struct timespec *tp)
1262 EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
1264 int do_posix_clock_notimer_create(struct k_itimer *timer)
1268 EXPORT_SYMBOL_GPL(do_posix_clock_notimer_create);
1270 int do_posix_clock_nonanosleep(clockid_t clock, int flags, struct timespec *t)
1273 return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */
1274 #else /* parisc does define it separately. */
1278 EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
1281 sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp)
1283 struct timespec new_tp;
1285 if (invalid_clockid(which_clock))
1287 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
1290 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
1294 sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp)
1296 struct timespec kernel_tp;
1299 if (invalid_clockid(which_clock))
1301 error = CLOCK_DISPATCH(which_clock, clock_get,
1302 (which_clock, &kernel_tp));
1303 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
1311 sys_clock_getres(clockid_t which_clock, struct timespec __user *tp)
1313 struct timespec rtn_tp;
1316 if (invalid_clockid(which_clock))
1319 error = CLOCK_DISPATCH(which_clock, clock_getres,
1320 (which_clock, &rtn_tp));
1322 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
1329 static void nanosleep_wake_up(unsigned long __data)
1331 struct task_struct *p = (struct task_struct *) __data;
1337 * The standard says that an absolute nanosleep call MUST wake up at
1338 * the requested time in spite of clock settings. Here is what we do:
1339 * For each nanosleep call that needs it (only absolute and not on
1340 * CLOCK_MONOTONIC* (as it can not be set)) we thread a little structure
1341 * into the "nanosleep_abs_list". All we need is the task_struct pointer.
1342 * When ever the clock is set we just wake up all those tasks. The rest
1343 * is done by the while loop in clock_nanosleep().
1345 * On locking, clock_was_set() is called from update_wall_clock which
1346 * holds (or has held for it) a write_lock_irq( xtime_lock) and is
1347 * called from the timer bh code. Thus we need the irq save locks.
1349 * Also, on the call from update_wall_clock, that is done as part of a
1350 * softirq thing. We don't want to delay the system that much (possibly
1351 * long list of timers to fix), so we defer that work to keventd.
1354 static DECLARE_WAIT_QUEUE_HEAD(nanosleep_abs_wqueue);
1355 static DECLARE_WORK(clock_was_set_work, (void(*)(void*))clock_was_set, NULL);
1357 static DECLARE_MUTEX(clock_was_set_lock);
1359 void clock_was_set(void)
1361 struct k_itimer *timr;
1362 struct timespec new_wall_to;
1363 LIST_HEAD(cws_list);
1367 if (unlikely(in_interrupt())) {
1368 schedule_work(&clock_was_set_work);
1371 wake_up_all(&nanosleep_abs_wqueue);
1374 * Check if there exist TIMER_ABSTIME timers to correct.
1376 * Notes on locking: This code is run in task context with irq
1377 * on. We CAN be interrupted! All other usage of the abs list
1378 * lock is under the timer lock which holds the irq lock as
1379 * well. We REALLY don't want to scan the whole list with the
1380 * interrupt system off, AND we would like a sequence lock on
1381 * this code as well. Since we assume that the clock will not
1382 * be set often, it seems ok to take and release the irq lock
1383 * for each timer. In fact add_timer will do this, so this is
1384 * not an issue. So we know when we are done, we will move the
1385 * whole list to a new location. Then as we process each entry,
1386 * we will move it to the actual list again. This way, when our
1387 * copy is empty, we are done. We are not all that concerned
1388 * about preemption so we will use a semaphore lock to protect
1389 * aginst reentry. This way we will not stall another
1390 * processor. It is possible that this may delay some timers
1391 * that should have expired, given the new clock, but even this
1392 * will be minimal as we will always update to the current time,
1393 * even if it was set by a task that is waiting for entry to
1394 * this code. Timers that expire too early will be caught by
1395 * the expire code and restarted.
1397 * Absolute timers that repeat are left in the abs list while
1398 * waiting for the task to pick up the signal. This means we
1399 * may find timers that are not in the "add_timer" list, but are
1400 * in the abs list. We do the same thing for these, save
1401 * putting them back in the "add_timer" list. (Note, these are
1402 * left in the abs list mainly to indicate that they are
1403 * ABSOLUTE timers, a fact that is used by the re-arm code, and
1404 * for which we have no other flag.)
1408 down(&clock_was_set_lock);
1409 spin_lock_irq(&abs_list.lock);
1410 list_splice_init(&abs_list.list, &cws_list);
1411 spin_unlock_irq(&abs_list.lock);
1414 seq = read_seqbegin(&xtime_lock);
1415 new_wall_to = wall_to_monotonic;
1416 } while (read_seqretry(&xtime_lock, seq));
1418 spin_lock_irq(&abs_list.lock);
1419 if (list_empty(&cws_list)) {
1420 spin_unlock_irq(&abs_list.lock);
1423 timr = list_entry(cws_list.next, struct k_itimer,
1424 it.real.abs_timer_entry);
1426 list_del_init(&timr->it.real.abs_timer_entry);
1427 if (add_clockset_delta(timr, &new_wall_to) &&
1428 del_timer(&timr->it.real.timer)) /* timer run yet? */
1429 add_timer(&timr->it.real.timer);
1430 list_add(&timr->it.real.abs_timer_entry, &abs_list.list);
1431 spin_unlock_irq(&abs_list.lock);
1434 up(&clock_was_set_lock);
1437 long clock_nanosleep_restart(struct restart_block *restart_block);
1440 sys_clock_nanosleep(clockid_t which_clock, int flags,
1441 const struct timespec __user *rqtp,
1442 struct timespec __user *rmtp)
1445 struct restart_block *restart_block =
1446 &(current_thread_info()->restart_block);
1449 if (invalid_clockid(which_clock))
1452 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1455 if ((unsigned) t.tv_nsec >= NSEC_PER_SEC || t.tv_sec < 0)
1459 * Do this here as nsleep function does not have the real address.
1461 restart_block->arg1 = (unsigned long)rmtp;
1463 ret = CLOCK_DISPATCH(which_clock, nsleep, (which_clock, flags, &t));
1465 if ((ret == -ERESTART_RESTARTBLOCK) && rmtp &&
1466 copy_to_user(rmtp, &t, sizeof (t)))
1472 static int common_nsleep(clockid_t which_clock,
1473 int flags, struct timespec *tsave)
1475 struct timespec t, dum;
1476 struct timer_list new_timer;
1477 DECLARE_WAITQUEUE(abs_wqueue, current);
1478 u64 rq_time = (u64)0;
1481 struct restart_block *restart_block =
1482 ¤t_thread_info()->restart_block;
1484 abs_wqueue.flags = 0;
1485 init_timer(&new_timer);
1486 new_timer.expires = 0;
1487 new_timer.data = (unsigned long) current;
1488 new_timer.function = nanosleep_wake_up;
1489 abs = flags & TIMER_ABSTIME;
1491 if (restart_block->fn == clock_nanosleep_restart) {
1493 * Interrupted by a non-delivered signal, pick up remaining
1494 * time and continue. Remaining time is in arg2 & 3.
1496 restart_block->fn = do_no_restart_syscall;
1498 rq_time = restart_block->arg3;
1499 rq_time = (rq_time << 32) + restart_block->arg2;
1502 left = rq_time - get_jiffies_64();
1504 return 0; /* Already passed */
1507 if (abs && (posix_clocks[which_clock].clock_get !=
1508 posix_clocks[CLOCK_MONOTONIC].clock_get))
1509 add_wait_queue(&nanosleep_abs_wqueue, &abs_wqueue);
1513 if (abs || !rq_time) {
1514 adjust_abs_time(&posix_clocks[which_clock], &t, abs,
1518 left = rq_time - get_jiffies_64();
1519 if (left >= (s64)MAX_JIFFY_OFFSET)
1520 left = (s64)MAX_JIFFY_OFFSET;
1524 new_timer.expires = jiffies + left;
1525 __set_current_state(TASK_INTERRUPTIBLE);
1526 add_timer(&new_timer);
1530 del_timer_sync(&new_timer);
1531 left = rq_time - get_jiffies_64();
1532 } while (left > (s64)0 && !test_thread_flag(TIF_SIGPENDING));
1534 if (abs_wqueue.task_list.next)
1535 finish_wait(&nanosleep_abs_wqueue, &abs_wqueue);
1537 if (left > (s64)0) {
1540 * Always restart abs calls from scratch to pick up any
1541 * clock shifting that happened while we are away.
1544 return -ERESTARTNOHAND;
1547 tsave->tv_sec = div_long_long_rem(left,
1551 * Restart works by saving the time remaing in
1552 * arg2 & 3 (it is 64-bits of jiffies). The other
1553 * info we need is the clock_id (saved in arg0).
1554 * The sys_call interface needs the users
1555 * timespec return address which _it_ saves in arg1.
1556 * Since we have cast the nanosleep call to a clock_nanosleep
1557 * both can be restarted with the same code.
1559 restart_block->fn = clock_nanosleep_restart;
1560 restart_block->arg0 = which_clock;
1564 restart_block->arg2 = rq_time & 0xffffffffLL;
1565 restart_block->arg3 = rq_time >> 32;
1567 return -ERESTART_RESTARTBLOCK;
1573 * This will restart clock_nanosleep.
1576 clock_nanosleep_restart(struct restart_block *restart_block)
1579 int ret = common_nsleep(restart_block->arg0, 0, &t);
1581 if ((ret == -ERESTART_RESTARTBLOCK) && restart_block->arg1 &&
1582 copy_to_user((struct timespec __user *)(restart_block->arg1), &t,