2 * linux/kernel/posix_timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/smp_lock.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/time.h>
38 #include <linux/mutex.h>
40 #include <asm/uaccess.h>
41 #include <asm/semaphore.h>
42 #include <linux/list.h>
43 #include <linux/init.h>
44 #include <linux/compiler.h>
45 #include <linux/idr.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/module.h>
51 #include <linux/vs_base.h>
52 #include <linux/vs_context.h>
55 * Management arrays for POSIX timers. Timers are kept in slab memory
56 * Timer ids are allocated by an external routine that keeps track of the
57 * id and the timer. The external interface is:
59 * void *idr_find(struct idr *idp, int id); to find timer_id <id>
60 * int idr_get_new(struct idr *idp, void *ptr); to get a new id and
62 * void idr_remove(struct idr *idp, int id); to release <id>
63 * void idr_init(struct idr *idp); to initialize <idp>
65 * The idr_get_new *may* call slab for more memory so it must not be
66 * called under a spin lock. Likewise idr_remore may release memory
67 * (but it may be ok to do this under a lock...).
68 * idr_find is just a memory look up and is quite fast. A -1 return
69 * indicates that the requested id does not exist.
73 * Lets keep our timers in a slab cache :-)
75 static kmem_cache_t *posix_timers_cache;
76 static struct idr posix_timers_id;
77 static DEFINE_SPINLOCK(idr_lock);
80 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
81 * SIGEV values. Here we put out an error if this assumption fails.
83 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
84 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
85 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
90 * The timer ID is turned into a timer address by idr_find().
91 * Verifying a valid ID consists of:
93 * a) checking that idr_find() returns other than -1.
94 * b) checking that the timer id matches the one in the timer itself.
95 * c) that the timer owner is in the callers thread group.
99 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
100 * to implement others. This structure defines the various
101 * clocks and allows the possibility of adding others. We
102 * provide an interface to add clocks to the table and expect
103 * the "arch" code to add at least one clock that is high
104 * resolution. Here we define the standard CLOCK_REALTIME as a
105 * 1/HZ resolution clock.
107 * RESOLUTION: Clock resolution is used to round up timer and interval
108 * times, NOT to report clock times, which are reported with as
109 * much resolution as the system can muster. In some cases this
110 * resolution may depend on the underlying clock hardware and
111 * may not be quantifiable until run time, and only then is the
112 * necessary code is written. The standard says we should say
113 * something about this issue in the documentation...
115 * FUNCTIONS: The CLOCKs structure defines possible functions to handle
116 * various clock functions. For clocks that use the standard
117 * system timer code these entries should be NULL. This will
118 * allow dispatch without the overhead of indirect function
119 * calls. CLOCKS that depend on other sources (e.g. WWV or GPS)
120 * must supply functions here, even if the function just returns
121 * ENOSYS. The standard POSIX timer management code assumes the
122 * following: 1.) The k_itimer struct (sched.h) is used for the
123 * timer. 2.) The list, it_lock, it_clock, it_id and it_process
124 * fields are not modified by timer code.
126 * At this time all functions EXCEPT clock_nanosleep can be
127 * redirected by the CLOCKS structure. Clock_nanosleep is in
128 * there, but the code ignores it.
130 * Permissions: It is assumed that the clock_settime() function defined
131 * for each clock will take care of permission checks. Some
132 * clocks may be set able by any user (i.e. local process
133 * clocks) others not. Currently the only set able clock we
134 * have is CLOCK_REALTIME and its high res counter part, both of
135 * which we beg off on and pass to do_sys_settimeofday().
138 static struct k_clock posix_clocks[MAX_CLOCKS];
141 * These ones are defined below.
143 static int common_nsleep(const clockid_t, int flags, struct timespec *t,
144 struct timespec __user *rmtp);
145 static void common_timer_get(struct k_itimer *, struct itimerspec *);
146 static int common_timer_set(struct k_itimer *, int,
147 struct itimerspec *, struct itimerspec *);
148 static int common_timer_del(struct k_itimer *timer);
150 static int posix_timer_fn(struct hrtimer *data);
152 static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags);
154 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
156 spin_unlock_irqrestore(&timr->it_lock, flags);
160 * Call the k_clock hook function if non-null, or the default function.
162 #define CLOCK_DISPATCH(clock, call, arglist) \
163 ((clock) < 0 ? posix_cpu_##call arglist : \
164 (posix_clocks[clock].call != NULL \
165 ? (*posix_clocks[clock].call) arglist : common_##call arglist))
168 * Default clock hook functions when the struct k_clock passed
169 * to register_posix_clock leaves a function pointer null.
171 * The function common_CALL is the default implementation for
172 * the function pointer CALL in struct k_clock.
175 static inline int common_clock_getres(const clockid_t which_clock,
179 tp->tv_nsec = posix_clocks[which_clock].res;
184 * Get real time for posix timers
186 static int common_clock_get(clockid_t which_clock, struct timespec *tp)
188 ktime_get_real_ts(tp);
192 static inline int common_clock_set(const clockid_t which_clock,
195 return do_sys_settimeofday(tp, NULL);
198 static int common_timer_create(struct k_itimer *new_timer)
200 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
205 * Return nonzero if we know a priori this clockid_t value is bogus.
207 static inline int invalid_clockid(const clockid_t which_clock)
209 if (which_clock < 0) /* CPU clock, posix_cpu_* will check it */
211 if ((unsigned) which_clock >= MAX_CLOCKS)
213 if (posix_clocks[which_clock].clock_getres != NULL)
215 if (posix_clocks[which_clock].res != 0)
221 * Get monotonic time for posix timers
223 static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
230 * Initialize everything, well, just everything in Posix clocks/timers ;)
232 static __init int init_posix_timers(void)
234 struct k_clock clock_realtime = {
235 .clock_getres = hrtimer_get_res,
237 struct k_clock clock_monotonic = {
238 .clock_getres = hrtimer_get_res,
239 .clock_get = posix_ktime_get_ts,
240 .clock_set = do_posix_clock_nosettime,
243 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
244 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
246 posix_timers_cache = kmem_cache_create("posix_timers_cache",
247 sizeof (struct k_itimer), 0, 0, NULL, NULL);
248 idr_init(&posix_timers_id);
252 __initcall(init_posix_timers);
254 static void schedule_next_timer(struct k_itimer *timr)
256 struct hrtimer *timer = &timr->it.real.timer;
258 if (timr->it.real.interval.tv64 == 0)
261 timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
262 timr->it.real.interval);
264 timr->it_overrun_last = timr->it_overrun;
265 timr->it_overrun = -1;
266 ++timr->it_requeue_pending;
267 hrtimer_restart(timer);
271 * This function is exported for use by the signal deliver code. It is
272 * called just prior to the info block being released and passes that
273 * block to us. It's function is to update the overrun entry AND to
274 * restart the timer. It should only be called if the timer is to be
275 * restarted (i.e. we have flagged this in the sys_private entry of the
278 * To protect aginst the timer going away while the interrupt is queued,
279 * we require that the it_requeue_pending flag be set.
281 void do_schedule_next_timer(struct siginfo *info)
283 struct k_itimer *timr;
286 timr = lock_timer(info->si_tid, &flags);
288 if (timr && timr->it_requeue_pending == info->si_sys_private) {
289 if (timr->it_clock < 0)
290 posix_cpu_timer_schedule(timr);
292 schedule_next_timer(timr);
294 info->si_overrun = timr->it_overrun_last;
298 unlock_timer(timr, flags);
301 int posix_timer_event(struct k_itimer *timr,int si_private)
303 struct vx_info_save vxis;
306 enter_vx_info(task_get_vx_info(timr->it_process), &vxis);
307 memset(&timr->sigq->info, 0, sizeof(siginfo_t));
308 timr->sigq->info.si_sys_private = si_private;
309 /* Send signal to the process that owns this timer.*/
311 timr->sigq->info.si_signo = timr->it_sigev_signo;
312 timr->sigq->info.si_errno = 0;
313 timr->sigq->info.si_code = SI_TIMER;
314 timr->sigq->info.si_tid = timr->it_id;
315 timr->sigq->info.si_value = timr->it_sigev_value;
317 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
318 struct task_struct *leader;
320 ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
322 if (likely(ret >= 0))
325 timr->it_sigev_notify = SIGEV_SIGNAL;
326 leader = timr->it_process->group_leader;
327 put_task_struct(timr->it_process);
328 timr->it_process = leader;
331 ret = send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
334 leave_vx_info(&vxis);
335 put_vx_info(vxis.vxi);
338 EXPORT_SYMBOL_GPL(posix_timer_event);
341 * This function gets called when a POSIX.1b interval timer expires. It
342 * is used as a callback from the kernel internal timer. The
343 * run_timer_list code ALWAYS calls with interrupts on.
345 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
347 static int posix_timer_fn(struct hrtimer *timer)
349 struct k_itimer *timr;
352 int ret = HRTIMER_NORESTART;
354 timr = container_of(timer, struct k_itimer, it.real.timer);
355 spin_lock_irqsave(&timr->it_lock, flags);
357 if (timr->it.real.interval.tv64 != 0)
358 si_private = ++timr->it_requeue_pending;
360 if (posix_timer_event(timr, si_private)) {
362 * signal was not sent because of sig_ignor
363 * we will not get a call back to restart it AND
364 * it should be restarted.
366 if (timr->it.real.interval.tv64 != 0) {
368 hrtimer_forward(timer,
369 timer->base->softirq_time,
370 timr->it.real.interval);
371 ret = HRTIMER_RESTART;
372 ++timr->it_requeue_pending;
376 unlock_timer(timr, flags);
380 static struct task_struct * good_sigevent(sigevent_t * event)
382 struct task_struct *rtn = current->group_leader;
384 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
385 (!(rtn = find_task_by_real_pid(event->sigev_notify_thread_id)) ||
386 rtn->tgid != current->tgid ||
387 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
390 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
391 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
397 void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
399 if ((unsigned) clock_id >= MAX_CLOCKS) {
400 printk("POSIX clock register failed for clock_id %d\n",
405 posix_clocks[clock_id] = *new_clock;
407 EXPORT_SYMBOL_GPL(register_posix_clock);
409 static struct k_itimer * alloc_posix_timer(void)
411 struct k_itimer *tmr;
412 tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL);
415 memset(tmr, 0, sizeof (struct k_itimer));
416 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
417 kmem_cache_free(posix_timers_cache, tmr);
424 #define IT_ID_NOT_SET 0
425 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
429 spin_lock_irqsave(&idr_lock, flags);
430 idr_remove(&posix_timers_id, tmr->it_id);
431 spin_unlock_irqrestore(&idr_lock, flags);
433 sigqueue_free(tmr->sigq);
434 if (unlikely(tmr->it_process) &&
435 tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
436 put_task_struct(tmr->it_process);
437 kmem_cache_free(posix_timers_cache, tmr);
440 /* Create a POSIX.1b interval timer. */
443 sys_timer_create(const clockid_t which_clock,
444 struct sigevent __user *timer_event_spec,
445 timer_t __user * created_timer_id)
448 struct k_itimer *new_timer = NULL;
450 struct task_struct *process = NULL;
453 int it_id_set = IT_ID_NOT_SET;
455 if (invalid_clockid(which_clock))
458 new_timer = alloc_posix_timer();
459 if (unlikely(!new_timer))
462 spin_lock_init(&new_timer->it_lock);
464 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
468 spin_lock_irq(&idr_lock);
469 error = idr_get_new(&posix_timers_id, (void *) new_timer,
471 spin_unlock_irq(&idr_lock);
472 if (error == -EAGAIN)
476 * Wierd looking, but we return EAGAIN if the IDR is
477 * full (proper POSIX return value for this)
483 it_id_set = IT_ID_SET;
484 new_timer->it_id = (timer_t) new_timer_id;
485 new_timer->it_clock = which_clock;
486 new_timer->it_overrun = -1;
487 error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
492 * return the timer_id now. The next step is hard to
493 * back out if there is an error.
495 if (copy_to_user(created_timer_id,
496 &new_timer_id, sizeof (new_timer_id))) {
500 if (timer_event_spec) {
501 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
505 new_timer->it_sigev_notify = event.sigev_notify;
506 new_timer->it_sigev_signo = event.sigev_signo;
507 new_timer->it_sigev_value = event.sigev_value;
509 read_lock(&tasklist_lock);
510 if ((process = good_sigevent(&event))) {
512 * We may be setting up this process for another
513 * thread. It may be exiting. To catch this
514 * case the we check the PF_EXITING flag. If
515 * the flag is not set, the siglock will catch
516 * him before it is too late (in exit_itimers).
518 * The exec case is a bit more invloved but easy
519 * to code. If the process is in our thread
520 * group (and it must be or we would not allow
521 * it here) and is doing an exec, it will cause
522 * us to be killed. In this case it will wait
523 * for us to die which means we can finish this
524 * linkage with our last gasp. I.e. no code :)
526 spin_lock_irqsave(&process->sighand->siglock, flags);
527 if (!(process->flags & PF_EXITING)) {
528 new_timer->it_process = process;
529 list_add(&new_timer->list,
530 &process->signal->posix_timers);
531 spin_unlock_irqrestore(&process->sighand->siglock, flags);
532 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
533 get_task_struct(process);
535 spin_unlock_irqrestore(&process->sighand->siglock, flags);
539 read_unlock(&tasklist_lock);
545 new_timer->it_sigev_notify = SIGEV_SIGNAL;
546 new_timer->it_sigev_signo = SIGALRM;
547 new_timer->it_sigev_value.sival_int = new_timer->it_id;
548 process = current->group_leader;
549 spin_lock_irqsave(&process->sighand->siglock, flags);
550 new_timer->it_process = process;
551 list_add(&new_timer->list, &process->signal->posix_timers);
552 spin_unlock_irqrestore(&process->sighand->siglock, flags);
556 * In the case of the timer belonging to another task, after
557 * the task is unlocked, the timer is owned by the other task
558 * and may cease to exist at any time. Don't use or modify
559 * new_timer after the unlock call.
564 release_posix_timer(new_timer, it_id_set);
570 * Locking issues: We need to protect the result of the id look up until
571 * we get the timer locked down so it is not deleted under us. The
572 * removal is done under the idr spinlock so we use that here to bridge
573 * the find to the timer lock. To avoid a dead lock, the timer id MUST
574 * be release with out holding the timer lock.
576 static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
578 struct k_itimer *timr;
580 * Watch out here. We do a irqsave on the idr_lock and pass the
581 * flags part over to the timer lock. Must not let interrupts in
582 * while we are moving the lock.
585 spin_lock_irqsave(&idr_lock, *flags);
586 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id);
588 spin_lock(&timr->it_lock);
589 spin_unlock(&idr_lock);
591 if ((timr->it_id != timer_id) || !(timr->it_process) ||
592 timr->it_process->tgid != current->tgid) {
593 unlock_timer(timr, *flags);
597 spin_unlock_irqrestore(&idr_lock, *flags);
603 * Get the time remaining on a POSIX.1b interval timer. This function
604 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
607 * We have a couple of messes to clean up here. First there is the case
608 * of a timer that has a requeue pending. These timers should appear to
609 * be in the timer list with an expiry as if we were to requeue them
612 * The second issue is the SIGEV_NONE timer which may be active but is
613 * not really ever put in the timer list (to save system resources).
614 * This timer may be expired, and if so, we will do it here. Otherwise
615 * it is the same as a requeue pending timer WRT to what we should
619 common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
621 ktime_t now, remaining, iv;
622 struct hrtimer *timer = &timr->it.real.timer;
624 memset(cur_setting, 0, sizeof(struct itimerspec));
626 iv = timr->it.real.interval;
628 /* interval timer ? */
630 cur_setting->it_interval = ktime_to_timespec(iv);
631 else if (!hrtimer_active(timer) &&
632 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
635 now = timer->base->get_time();
638 * When a requeue is pending or this is a SIGEV_NONE
639 * timer move the expiry time forward by intervals, so
642 if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
643 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
644 timr->it_overrun += hrtimer_forward(timer, now, iv);
646 remaining = ktime_sub(timer->expires, now);
647 /* Return 0 only, when the timer is expired and not pending */
648 if (remaining.tv64 <= 0) {
650 * A single shot SIGEV_NONE timer must return 0, when
653 if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
654 cur_setting->it_value.tv_nsec = 1;
656 cur_setting->it_value = ktime_to_timespec(remaining);
659 /* Get the time remaining on a POSIX.1b interval timer. */
661 sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
663 struct k_itimer *timr;
664 struct itimerspec cur_setting;
667 timr = lock_timer(timer_id, &flags);
671 CLOCK_DISPATCH(timr->it_clock, timer_get, (timr, &cur_setting));
673 unlock_timer(timr, flags);
675 if (copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
682 * Get the number of overruns of a POSIX.1b interval timer. This is to
683 * be the overrun of the timer last delivered. At the same time we are
684 * accumulating overruns on the next timer. The overrun is frozen when
685 * the signal is delivered, either at the notify time (if the info block
686 * is not queued) or at the actual delivery time (as we are informed by
687 * the call back to do_schedule_next_timer(). So all we need to do is
688 * to pick up the frozen overrun.
691 sys_timer_getoverrun(timer_t timer_id)
693 struct k_itimer *timr;
697 timr = lock_timer(timer_id, &flags);
701 overrun = timr->it_overrun_last;
702 unlock_timer(timr, flags);
707 /* Set a POSIX.1b interval timer. */
708 /* timr->it_lock is taken. */
710 common_timer_set(struct k_itimer *timr, int flags,
711 struct itimerspec *new_setting, struct itimerspec *old_setting)
713 struct hrtimer *timer = &timr->it.real.timer;
714 enum hrtimer_mode mode;
717 common_timer_get(timr, old_setting);
719 /* disable the timer */
720 timr->it.real.interval.tv64 = 0;
722 * careful here. If smp we could be in the "fire" routine which will
723 * be spinning as we hold the lock. But this is ONLY an SMP issue.
725 if (hrtimer_try_to_cancel(timer) < 0)
728 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
730 timr->it_overrun_last = 0;
732 /* switch off the timer when it_value is zero */
733 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
736 mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL;
737 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
738 timr->it.real.timer.function = posix_timer_fn;
740 timer->expires = timespec_to_ktime(new_setting->it_value);
742 /* Convert interval */
743 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
745 /* SIGEV_NONE timers are not queued ! See common_timer_get */
746 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
747 /* Setup correct expiry time for relative timers */
748 if (mode == HRTIMER_REL)
749 timer->expires = ktime_add(timer->expires,
750 timer->base->get_time());
754 hrtimer_start(timer, timer->expires, mode);
758 /* Set a POSIX.1b interval timer */
760 sys_timer_settime(timer_t timer_id, int flags,
761 const struct itimerspec __user *new_setting,
762 struct itimerspec __user *old_setting)
764 struct k_itimer *timr;
765 struct itimerspec new_spec, old_spec;
768 struct itimerspec *rtn = old_setting ? &old_spec : NULL;
773 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
776 if (!timespec_valid(&new_spec.it_interval) ||
777 !timespec_valid(&new_spec.it_value))
780 timr = lock_timer(timer_id, &flag);
784 error = CLOCK_DISPATCH(timr->it_clock, timer_set,
785 (timr, flags, &new_spec, rtn));
787 unlock_timer(timr, flag);
788 if (error == TIMER_RETRY) {
789 rtn = NULL; // We already got the old time...
793 if (old_setting && !error &&
794 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
800 static inline int common_timer_del(struct k_itimer *timer)
802 timer->it.real.interval.tv64 = 0;
804 if (hrtimer_try_to_cancel(&timer->it.real.timer) < 0)
809 static inline int timer_delete_hook(struct k_itimer *timer)
811 return CLOCK_DISPATCH(timer->it_clock, timer_del, (timer));
814 /* Delete a POSIX.1b interval timer. */
816 sys_timer_delete(timer_t timer_id)
818 struct k_itimer *timer;
822 timer = lock_timer(timer_id, &flags);
826 if (timer_delete_hook(timer) == TIMER_RETRY) {
827 unlock_timer(timer, flags);
831 spin_lock(¤t->sighand->siglock);
832 list_del(&timer->list);
833 spin_unlock(¤t->sighand->siglock);
835 * This keeps any tasks waiting on the spin lock from thinking
836 * they got something (see the lock code above).
838 if (timer->it_process) {
839 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
840 put_task_struct(timer->it_process);
841 timer->it_process = NULL;
843 unlock_timer(timer, flags);
844 release_posix_timer(timer, IT_ID_SET);
849 * return timer owned by the process, used by exit_itimers
851 static void itimer_delete(struct k_itimer *timer)
856 spin_lock_irqsave(&timer->it_lock, flags);
858 if (timer_delete_hook(timer) == TIMER_RETRY) {
859 unlock_timer(timer, flags);
862 list_del(&timer->list);
864 * This keeps any tasks waiting on the spin lock from thinking
865 * they got something (see the lock code above).
867 if (timer->it_process) {
868 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
869 put_task_struct(timer->it_process);
870 timer->it_process = NULL;
872 unlock_timer(timer, flags);
873 release_posix_timer(timer, IT_ID_SET);
877 * This is called by do_exit or de_thread, only when there are no more
878 * references to the shared signal_struct.
880 void exit_itimers(struct signal_struct *sig)
882 struct k_itimer *tmr;
884 while (!list_empty(&sig->posix_timers)) {
885 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
890 /* Not available / possible... functions */
891 int do_posix_clock_nosettime(const clockid_t clockid, struct timespec *tp)
895 EXPORT_SYMBOL_GPL(do_posix_clock_nosettime);
897 int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
898 struct timespec *t, struct timespec __user *r)
901 return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */
902 #else /* parisc does define it separately. */
906 EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
908 asmlinkage long sys_clock_settime(const clockid_t which_clock,
909 const struct timespec __user *tp)
911 struct timespec new_tp;
913 if (invalid_clockid(which_clock))
915 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
918 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
922 sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
924 struct timespec kernel_tp;
927 if (invalid_clockid(which_clock))
929 error = CLOCK_DISPATCH(which_clock, clock_get,
930 (which_clock, &kernel_tp));
931 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
939 sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp)
941 struct timespec rtn_tp;
944 if (invalid_clockid(which_clock))
947 error = CLOCK_DISPATCH(which_clock, clock_getres,
948 (which_clock, &rtn_tp));
950 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) {
958 * nanosleep for monotonic and realtime clocks
960 static int common_nsleep(const clockid_t which_clock, int flags,
961 struct timespec *tsave, struct timespec __user *rmtp)
963 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
964 HRTIMER_ABS : HRTIMER_REL, which_clock);
968 sys_clock_nanosleep(const clockid_t which_clock, int flags,
969 const struct timespec __user *rqtp,
970 struct timespec __user *rmtp)
974 if (invalid_clockid(which_clock))
977 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
980 if (!timespec_valid(&t))
983 return CLOCK_DISPATCH(which_clock, nsleep,
984 (which_clock, flags, &t, rmtp));