2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <asm/uaccess.h>
8 #include <linux/errno.h>
9 #include <linux/vs_cvirt.h>
11 static int check_clock(const clockid_t which_clock)
14 struct task_struct *p;
15 const pid_t pid = CPUCLOCK_PID(which_clock);
17 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
23 read_lock(&tasklist_lock);
24 p = find_task_by_pid(pid);
25 if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
26 p->tgid != current->tgid : p->tgid != pid)) {
29 read_unlock(&tasklist_lock);
34 static inline union cpu_time_count
35 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
37 union cpu_time_count ret;
38 ret.sched = 0; /* high half always zero when .cpu used */
39 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
40 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
42 ret.cpu = timespec_to_cputime(tp);
47 static void sample_to_timespec(const clockid_t which_clock,
48 union cpu_time_count cpu,
51 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
52 tp->tv_sec = div_long_long_rem(cpu.sched,
53 NSEC_PER_SEC, &tp->tv_nsec);
55 cputime_to_timespec(cpu.cpu, tp);
59 static inline int cpu_time_before(const clockid_t which_clock,
60 union cpu_time_count now,
61 union cpu_time_count then)
63 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
64 return now.sched < then.sched;
66 return cputime_lt(now.cpu, then.cpu);
69 static inline void cpu_time_add(const clockid_t which_clock,
70 union cpu_time_count *acc,
71 union cpu_time_count val)
73 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
74 acc->sched += val.sched;
76 acc->cpu = cputime_add(acc->cpu, val.cpu);
79 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
80 union cpu_time_count a,
81 union cpu_time_count b)
83 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
86 a.cpu = cputime_sub(a.cpu, b.cpu);
92 * Divide and limit the result to res >= 1
94 * This is necessary to prevent signal delivery starvation, when the result of
95 * the division would be rounded down to 0.
97 static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
99 cputime_t res = cputime_div(time, div);
101 return max_t(cputime_t, res, 1);
105 * Update expiry time from increment, and increase overrun count,
106 * given the current clock sample.
108 static void bump_cpu_timer(struct k_itimer *timer,
109 union cpu_time_count now)
113 if (timer->it.cpu.incr.sched == 0)
116 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
117 unsigned long long delta, incr;
119 if (now.sched < timer->it.cpu.expires.sched)
121 incr = timer->it.cpu.incr.sched;
122 delta = now.sched + incr - timer->it.cpu.expires.sched;
123 /* Don't use (incr*2 < delta), incr*2 might overflow. */
124 for (i = 0; incr < delta - incr; i++)
126 for (; i >= 0; incr >>= 1, i--) {
129 timer->it.cpu.expires.sched += incr;
130 timer->it_overrun += 1 << i;
134 cputime_t delta, incr;
136 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
138 incr = timer->it.cpu.incr.cpu;
139 delta = cputime_sub(cputime_add(now.cpu, incr),
140 timer->it.cpu.expires.cpu);
141 /* Don't use (incr*2 < delta), incr*2 might overflow. */
142 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
143 incr = cputime_add(incr, incr);
144 for (; i >= 0; incr = cputime_halve(incr), i--) {
145 if (cputime_lt(delta, incr))
147 timer->it.cpu.expires.cpu =
148 cputime_add(timer->it.cpu.expires.cpu, incr);
149 timer->it_overrun += 1 << i;
150 delta = cputime_sub(delta, incr);
155 static inline cputime_t prof_ticks(struct task_struct *p)
157 return cputime_add(p->utime, p->stime);
159 static inline cputime_t virt_ticks(struct task_struct *p)
163 static inline unsigned long long sched_ns(struct task_struct *p)
165 return (p == current) ? current_sched_time(p) : p->sched_time;
168 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
170 int error = check_clock(which_clock);
173 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
174 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
176 * If sched_clock is using a cycle counter, we
177 * don't have any idea of its true resolution
178 * exported, but it is much more than 1s/HZ.
186 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
189 * You can never reset a CPU clock, but we check for other errors
190 * in the call before failing with EPERM.
192 int error = check_clock(which_clock);
201 * Sample a per-thread clock for the given task.
203 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
204 union cpu_time_count *cpu)
206 switch (CPUCLOCK_WHICH(which_clock)) {
210 cpu->cpu = prof_ticks(p);
213 cpu->cpu = virt_ticks(p);
216 cpu->sched = sched_ns(p);
223 * Sample a process (thread group) clock for the given group_leader task.
224 * Must be called with tasklist_lock held for reading.
225 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
227 static int cpu_clock_sample_group_locked(unsigned int clock_idx,
228 struct task_struct *p,
229 union cpu_time_count *cpu)
231 struct task_struct *t = p;
236 cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
238 cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
243 cpu->cpu = p->signal->utime;
245 cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
250 cpu->sched = p->signal->sched_time;
251 /* Add in each other live thread. */
252 while ((t = next_thread(t)) != p) {
253 cpu->sched += t->sched_time;
255 cpu->sched += sched_ns(p);
262 * Sample a process (thread group) clock for the given group_leader task.
263 * Must be called with tasklist_lock held for reading.
265 static int cpu_clock_sample_group(const clockid_t which_clock,
266 struct task_struct *p,
267 union cpu_time_count *cpu)
271 spin_lock_irqsave(&p->sighand->siglock, flags);
272 ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
274 spin_unlock_irqrestore(&p->sighand->siglock, flags);
279 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
281 const pid_t pid = CPUCLOCK_PID(which_clock);
283 union cpu_time_count rtn;
287 * Special case constant value for our own clocks.
288 * We don't have to do any lookup to find ourselves.
290 if (CPUCLOCK_PERTHREAD(which_clock)) {
292 * Sampling just ourselves we can do with no locking.
294 error = cpu_clock_sample(which_clock,
297 read_lock(&tasklist_lock);
298 error = cpu_clock_sample_group(which_clock,
300 read_unlock(&tasklist_lock);
304 * Find the given PID, and validate that the caller
305 * should be able to see it.
307 struct task_struct *p;
308 read_lock(&tasklist_lock);
309 p = find_task_by_pid(pid);
311 if (CPUCLOCK_PERTHREAD(which_clock)) {
312 if (p->tgid == current->tgid) {
313 error = cpu_clock_sample(which_clock,
316 } else if (p->tgid == pid && p->signal) {
317 error = cpu_clock_sample_group(which_clock,
321 read_unlock(&tasklist_lock);
326 sample_to_timespec(which_clock, rtn, tp);
332 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
333 * This is called from sys_timer_create with the new timer already locked.
335 int posix_cpu_timer_create(struct k_itimer *new_timer)
338 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
339 struct task_struct *p;
341 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
344 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
345 new_timer->it.cpu.incr.sched = 0;
346 new_timer->it.cpu.expires.sched = 0;
348 read_lock(&tasklist_lock);
349 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
353 p = find_task_by_pid(pid);
354 if (p && p->tgid != current->tgid)
359 p = current->group_leader;
361 p = find_task_by_pid(pid);
362 if (p && p->tgid != pid)
366 new_timer->it.cpu.task = p;
372 read_unlock(&tasklist_lock);
378 * Clean up a CPU-clock timer that is about to be destroyed.
379 * This is called from timer deletion with the timer already locked.
380 * If we return TIMER_RETRY, it's necessary to release the timer's lock
381 * and try again. (This happens when the timer is in the middle of firing.)
383 int posix_cpu_timer_del(struct k_itimer *timer)
385 struct task_struct *p = timer->it.cpu.task;
388 if (likely(p != NULL)) {
389 read_lock(&tasklist_lock);
390 if (unlikely(p->signal == NULL)) {
392 * We raced with the reaping of the task.
393 * The deletion should have cleared us off the list.
395 BUG_ON(!list_empty(&timer->it.cpu.entry));
397 spin_lock(&p->sighand->siglock);
398 if (timer->it.cpu.firing)
401 list_del(&timer->it.cpu.entry);
402 spin_unlock(&p->sighand->siglock);
404 read_unlock(&tasklist_lock);
414 * Clean out CPU timers still ticking when a thread exited. The task
415 * pointer is cleared, and the expiry time is replaced with the residual
416 * time for later timer_gettime calls to return.
417 * This must be called with the siglock held.
419 static void cleanup_timers(struct list_head *head,
420 cputime_t utime, cputime_t stime,
421 unsigned long long sched_time)
423 struct cpu_timer_list *timer, *next;
424 cputime_t ptime = cputime_add(utime, stime);
426 list_for_each_entry_safe(timer, next, head, entry) {
427 list_del_init(&timer->entry);
428 if (cputime_lt(timer->expires.cpu, ptime)) {
429 timer->expires.cpu = cputime_zero;
431 timer->expires.cpu = cputime_sub(timer->expires.cpu,
437 list_for_each_entry_safe(timer, next, head, entry) {
438 list_del_init(&timer->entry);
439 if (cputime_lt(timer->expires.cpu, utime)) {
440 timer->expires.cpu = cputime_zero;
442 timer->expires.cpu = cputime_sub(timer->expires.cpu,
448 list_for_each_entry_safe(timer, next, head, entry) {
449 list_del_init(&timer->entry);
450 if (timer->expires.sched < sched_time) {
451 timer->expires.sched = 0;
453 timer->expires.sched -= sched_time;
459 * These are both called with the siglock held, when the current thread
460 * is being reaped. When the final (leader) thread in the group is reaped,
461 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
463 void posix_cpu_timers_exit(struct task_struct *tsk)
465 cleanup_timers(tsk->cpu_timers,
466 tsk->utime, tsk->stime, tsk->sched_time);
469 void posix_cpu_timers_exit_group(struct task_struct *tsk)
471 cleanup_timers(tsk->signal->cpu_timers,
472 cputime_add(tsk->utime, tsk->signal->utime),
473 cputime_add(tsk->stime, tsk->signal->stime),
474 tsk->sched_time + tsk->signal->sched_time);
479 * Set the expiry times of all the threads in the process so one of them
480 * will go off before the process cumulative expiry total is reached.
482 static void process_timer_rebalance(struct task_struct *p,
483 unsigned int clock_idx,
484 union cpu_time_count expires,
485 union cpu_time_count val)
487 cputime_t ticks, left;
488 unsigned long long ns, nsleft;
489 struct task_struct *t = p;
490 unsigned int nthreads = atomic_read(&p->signal->live);
500 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
503 if (likely(!(t->flags & PF_EXITING))) {
504 ticks = cputime_add(prof_ticks(t), left);
505 if (cputime_eq(t->it_prof_expires,
507 cputime_gt(t->it_prof_expires, ticks)) {
508 t->it_prof_expires = ticks;
515 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
518 if (likely(!(t->flags & PF_EXITING))) {
519 ticks = cputime_add(virt_ticks(t), left);
520 if (cputime_eq(t->it_virt_expires,
522 cputime_gt(t->it_virt_expires, ticks)) {
523 t->it_virt_expires = ticks;
530 nsleft = expires.sched - val.sched;
531 do_div(nsleft, nthreads);
532 nsleft = max_t(unsigned long long, nsleft, 1);
534 if (likely(!(t->flags & PF_EXITING))) {
535 ns = t->sched_time + nsleft;
536 if (t->it_sched_expires == 0 ||
537 t->it_sched_expires > ns) {
538 t->it_sched_expires = ns;
547 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
550 * That's all for this thread or process.
551 * We leave our residual in expires to be reported.
553 put_task_struct(timer->it.cpu.task);
554 timer->it.cpu.task = NULL;
555 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
556 timer->it.cpu.expires,
561 * Insert the timer on the appropriate list before any timers that
562 * expire later. This must be called with the tasklist_lock held
563 * for reading, and interrupts disabled.
565 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
567 struct task_struct *p = timer->it.cpu.task;
568 struct list_head *head, *listpos;
569 struct cpu_timer_list *const nt = &timer->it.cpu;
570 struct cpu_timer_list *next;
573 if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
576 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
577 p->cpu_timers : p->signal->cpu_timers);
578 head += CPUCLOCK_WHICH(timer->it_clock);
580 BUG_ON(!irqs_disabled());
581 spin_lock(&p->sighand->siglock);
584 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
585 list_for_each_entry(next, head, entry) {
586 if (next->expires.sched > nt->expires.sched)
588 listpos = &next->entry;
591 list_for_each_entry(next, head, entry) {
592 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
594 listpos = &next->entry;
597 list_add(&nt->entry, listpos);
599 if (listpos == head) {
601 * We are the new earliest-expiring timer.
602 * If we are a thread timer, there can always
603 * be a process timer telling us to stop earlier.
606 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
607 switch (CPUCLOCK_WHICH(timer->it_clock)) {
611 if (cputime_eq(p->it_prof_expires,
613 cputime_gt(p->it_prof_expires,
615 p->it_prof_expires = nt->expires.cpu;
618 if (cputime_eq(p->it_virt_expires,
620 cputime_gt(p->it_virt_expires,
622 p->it_virt_expires = nt->expires.cpu;
625 if (p->it_sched_expires == 0 ||
626 p->it_sched_expires > nt->expires.sched)
627 p->it_sched_expires = nt->expires.sched;
632 * For a process timer, we must balance
633 * all the live threads' expirations.
635 switch (CPUCLOCK_WHICH(timer->it_clock)) {
639 if (!cputime_eq(p->signal->it_virt_expires,
641 cputime_lt(p->signal->it_virt_expires,
642 timer->it.cpu.expires.cpu))
646 if (!cputime_eq(p->signal->it_prof_expires,
648 cputime_lt(p->signal->it_prof_expires,
649 timer->it.cpu.expires.cpu))
651 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
652 if (i != RLIM_INFINITY &&
653 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
658 process_timer_rebalance(
660 CPUCLOCK_WHICH(timer->it_clock),
661 timer->it.cpu.expires, now);
667 spin_unlock(&p->sighand->siglock);
671 * The timer is locked, fire it and arrange for its reload.
673 static void cpu_timer_fire(struct k_itimer *timer)
675 if (unlikely(timer->sigq == NULL)) {
677 * This a special case for clock_nanosleep,
678 * not a normal timer from sys_timer_create.
680 wake_up_process(timer->it_process);
681 timer->it.cpu.expires.sched = 0;
682 } else if (timer->it.cpu.incr.sched == 0) {
684 * One-shot timer. Clear it as soon as it's fired.
686 posix_timer_event(timer, 0);
687 timer->it.cpu.expires.sched = 0;
688 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
690 * The signal did not get queued because the signal
691 * was ignored, so we won't get any callback to
692 * reload the timer. But we need to keep it
693 * ticking in case the signal is deliverable next time.
695 posix_cpu_timer_schedule(timer);
700 * Guts of sys_timer_settime for CPU timers.
701 * This is called with the timer locked and interrupts disabled.
702 * If we return TIMER_RETRY, it's necessary to release the timer's lock
703 * and try again. (This happens when the timer is in the middle of firing.)
705 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
706 struct itimerspec *new, struct itimerspec *old)
708 struct task_struct *p = timer->it.cpu.task;
709 union cpu_time_count old_expires, new_expires, val;
712 if (unlikely(p == NULL)) {
714 * Timer refers to a dead task's clock.
719 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
721 read_lock(&tasklist_lock);
723 * We need the tasklist_lock to protect against reaping that
724 * clears p->signal. If p has just been reaped, we can no
725 * longer get any information about it at all.
727 if (unlikely(p->signal == NULL)) {
728 read_unlock(&tasklist_lock);
730 timer->it.cpu.task = NULL;
735 * Disarm any old timer after extracting its expiry time.
737 BUG_ON(!irqs_disabled());
740 spin_lock(&p->sighand->siglock);
741 old_expires = timer->it.cpu.expires;
742 if (unlikely(timer->it.cpu.firing)) {
743 timer->it.cpu.firing = -1;
746 list_del_init(&timer->it.cpu.entry);
747 spin_unlock(&p->sighand->siglock);
750 * We need to sample the current value to convert the new
751 * value from to relative and absolute, and to convert the
752 * old value from absolute to relative. To set a process
753 * timer, we need a sample to balance the thread expiry
754 * times (in arm_timer). With an absolute time, we must
755 * check if it's already passed. In short, we need a sample.
757 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
758 cpu_clock_sample(timer->it_clock, p, &val);
760 cpu_clock_sample_group(timer->it_clock, p, &val);
764 if (old_expires.sched == 0) {
765 old->it_value.tv_sec = 0;
766 old->it_value.tv_nsec = 0;
769 * Update the timer in case it has
770 * overrun already. If it has,
771 * we'll report it as having overrun
772 * and with the next reloaded timer
773 * already ticking, though we are
774 * swallowing that pending
775 * notification here to install the
778 bump_cpu_timer(timer, val);
779 if (cpu_time_before(timer->it_clock, val,
780 timer->it.cpu.expires)) {
781 old_expires = cpu_time_sub(
783 timer->it.cpu.expires, val);
784 sample_to_timespec(timer->it_clock,
788 old->it_value.tv_nsec = 1;
789 old->it_value.tv_sec = 0;
796 * We are colliding with the timer actually firing.
797 * Punt after filling in the timer's old value, and
798 * disable this firing since we are already reporting
799 * it as an overrun (thanks to bump_cpu_timer above).
801 read_unlock(&tasklist_lock);
805 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
806 cpu_time_add(timer->it_clock, &new_expires, val);
810 * Install the new expiry time (or zero).
811 * For a timer with no notification action, we don't actually
812 * arm the timer (we'll just fake it for timer_gettime).
814 timer->it.cpu.expires = new_expires;
815 if (new_expires.sched != 0 &&
816 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
817 cpu_time_before(timer->it_clock, val, new_expires)) {
818 arm_timer(timer, val);
821 read_unlock(&tasklist_lock);
824 * Install the new reload setting, and
825 * set up the signal and overrun bookkeeping.
827 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
831 * This acts as a modification timestamp for the timer,
832 * so any automatic reload attempt will punt on seeing
833 * that we have reset the timer manually.
835 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
837 timer->it_overrun_last = 0;
838 timer->it_overrun = -1;
840 if (new_expires.sched != 0 &&
841 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
842 !cpu_time_before(timer->it_clock, val, new_expires)) {
844 * The designated time already passed, so we notify
845 * immediately, even if the thread never runs to
846 * accumulate more time on this clock.
848 cpu_timer_fire(timer);
854 sample_to_timespec(timer->it_clock,
855 timer->it.cpu.incr, &old->it_interval);
860 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
862 union cpu_time_count now;
863 struct task_struct *p = timer->it.cpu.task;
867 * Easy part: convert the reload time.
869 sample_to_timespec(timer->it_clock,
870 timer->it.cpu.incr, &itp->it_interval);
872 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
873 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
877 if (unlikely(p == NULL)) {
879 * This task already died and the timer will never fire.
880 * In this case, expires is actually the dead value.
883 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
889 * Sample the clock to take the difference with the expiry time.
891 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
892 cpu_clock_sample(timer->it_clock, p, &now);
893 clear_dead = p->exit_state;
895 read_lock(&tasklist_lock);
896 if (unlikely(p->signal == NULL)) {
898 * The process has been reaped.
899 * We can't even collect a sample any more.
900 * Call the timer disarmed, nothing else to do.
903 timer->it.cpu.task = NULL;
904 timer->it.cpu.expires.sched = 0;
905 read_unlock(&tasklist_lock);
908 cpu_clock_sample_group(timer->it_clock, p, &now);
909 clear_dead = (unlikely(p->exit_state) &&
910 thread_group_empty(p));
912 read_unlock(&tasklist_lock);
915 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
916 if (timer->it.cpu.incr.sched == 0 &&
917 cpu_time_before(timer->it_clock,
918 timer->it.cpu.expires, now)) {
920 * Do-nothing timer expired and has no reload,
921 * so it's as if it was never set.
923 timer->it.cpu.expires.sched = 0;
924 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
928 * Account for any expirations and reloads that should
931 bump_cpu_timer(timer, now);
934 if (unlikely(clear_dead)) {
936 * We've noticed that the thread is dead, but
937 * not yet reaped. Take this opportunity to
940 clear_dead_task(timer, now);
944 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
945 sample_to_timespec(timer->it_clock,
946 cpu_time_sub(timer->it_clock,
947 timer->it.cpu.expires, now),
951 * The timer should have expired already, but the firing
952 * hasn't taken place yet. Say it's just about to expire.
954 itp->it_value.tv_nsec = 1;
955 itp->it_value.tv_sec = 0;
960 * Check for any per-thread CPU timers that have fired and move them off
961 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
962 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
964 static void check_thread_timers(struct task_struct *tsk,
965 struct list_head *firing)
968 struct list_head *timers = tsk->cpu_timers;
971 tsk->it_prof_expires = cputime_zero;
972 while (!list_empty(timers)) {
973 struct cpu_timer_list *t = list_entry(timers->next,
974 struct cpu_timer_list,
976 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
977 tsk->it_prof_expires = t->expires.cpu;
981 list_move_tail(&t->entry, firing);
986 tsk->it_virt_expires = cputime_zero;
987 while (!list_empty(timers)) {
988 struct cpu_timer_list *t = list_entry(timers->next,
989 struct cpu_timer_list,
991 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
992 tsk->it_virt_expires = t->expires.cpu;
996 list_move_tail(&t->entry, firing);
1001 tsk->it_sched_expires = 0;
1002 while (!list_empty(timers)) {
1003 struct cpu_timer_list *t = list_entry(timers->next,
1004 struct cpu_timer_list,
1006 if (!--maxfire || tsk->sched_time < t->expires.sched) {
1007 tsk->it_sched_expires = t->expires.sched;
1011 list_move_tail(&t->entry, firing);
1016 * Check for any per-thread CPU timers that have fired and move them
1017 * off the tsk->*_timers list onto the firing list. Per-thread timers
1018 * have already been taken off.
1020 static void check_process_timers(struct task_struct *tsk,
1021 struct list_head *firing)
1024 struct signal_struct *const sig = tsk->signal;
1025 cputime_t utime, stime, ptime, virt_expires, prof_expires;
1026 unsigned long long sched_time, sched_expires;
1027 struct task_struct *t;
1028 struct list_head *timers = sig->cpu_timers;
1031 * Don't sample the current process CPU clocks if there are no timers.
1033 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1034 cputime_eq(sig->it_prof_expires, cputime_zero) &&
1035 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1036 list_empty(&timers[CPUCLOCK_VIRT]) &&
1037 cputime_eq(sig->it_virt_expires, cputime_zero) &&
1038 list_empty(&timers[CPUCLOCK_SCHED]))
1042 * Collect the current process totals.
1046 sched_time = sig->sched_time;
1049 utime = cputime_add(utime, t->utime);
1050 stime = cputime_add(stime, t->stime);
1051 sched_time += t->sched_time;
1054 ptime = cputime_add(utime, stime);
1057 prof_expires = cputime_zero;
1058 while (!list_empty(timers)) {
1059 struct cpu_timer_list *t = list_entry(timers->next,
1060 struct cpu_timer_list,
1062 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
1063 prof_expires = t->expires.cpu;
1067 list_move_tail(&t->entry, firing);
1072 virt_expires = cputime_zero;
1073 while (!list_empty(timers)) {
1074 struct cpu_timer_list *t = list_entry(timers->next,
1075 struct cpu_timer_list,
1077 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
1078 virt_expires = t->expires.cpu;
1082 list_move_tail(&t->entry, firing);
1088 while (!list_empty(timers)) {
1089 struct cpu_timer_list *t = list_entry(timers->next,
1090 struct cpu_timer_list,
1092 if (!--maxfire || sched_time < t->expires.sched) {
1093 sched_expires = t->expires.sched;
1097 list_move_tail(&t->entry, firing);
1101 * Check for the special case process timers.
1103 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1104 if (cputime_ge(ptime, sig->it_prof_expires)) {
1105 /* ITIMER_PROF fires and reloads. */
1106 sig->it_prof_expires = sig->it_prof_incr;
1107 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1108 sig->it_prof_expires = cputime_add(
1109 sig->it_prof_expires, ptime);
1111 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1113 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1114 (cputime_eq(prof_expires, cputime_zero) ||
1115 cputime_lt(sig->it_prof_expires, prof_expires))) {
1116 prof_expires = sig->it_prof_expires;
1119 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1120 if (cputime_ge(utime, sig->it_virt_expires)) {
1121 /* ITIMER_VIRTUAL fires and reloads. */
1122 sig->it_virt_expires = sig->it_virt_incr;
1123 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1124 sig->it_virt_expires = cputime_add(
1125 sig->it_virt_expires, utime);
1127 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1129 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1130 (cputime_eq(virt_expires, cputime_zero) ||
1131 cputime_lt(sig->it_virt_expires, virt_expires))) {
1132 virt_expires = sig->it_virt_expires;
1135 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1136 unsigned long psecs = cputime_to_secs(ptime);
1138 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1140 * At the hard limit, we just die.
1141 * No need to calculate anything else now.
1143 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1146 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1148 * At the soft limit, send a SIGXCPU every second.
1150 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1151 if (sig->rlim[RLIMIT_CPU].rlim_cur
1152 < sig->rlim[RLIMIT_CPU].rlim_max) {
1153 sig->rlim[RLIMIT_CPU].rlim_cur++;
1156 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1157 if (cputime_eq(prof_expires, cputime_zero) ||
1158 cputime_lt(x, prof_expires)) {
1163 if (!cputime_eq(prof_expires, cputime_zero) ||
1164 !cputime_eq(virt_expires, cputime_zero) ||
1165 sched_expires != 0) {
1167 * Rebalance the threads' expiry times for the remaining
1168 * process CPU timers.
1171 cputime_t prof_left, virt_left, ticks;
1172 unsigned long long sched_left, sched;
1173 const unsigned int nthreads = atomic_read(&sig->live);
1178 prof_left = cputime_sub(prof_expires, utime);
1179 prof_left = cputime_sub(prof_left, stime);
1180 prof_left = cputime_div_non_zero(prof_left, nthreads);
1181 virt_left = cputime_sub(virt_expires, utime);
1182 virt_left = cputime_div_non_zero(virt_left, nthreads);
1183 if (sched_expires) {
1184 sched_left = sched_expires - sched_time;
1185 do_div(sched_left, nthreads);
1186 sched_left = max_t(unsigned long long, sched_left, 1);
1192 if (unlikely(t->flags & PF_EXITING))
1195 ticks = cputime_add(cputime_add(t->utime, t->stime),
1197 if (!cputime_eq(prof_expires, cputime_zero) &&
1198 (cputime_eq(t->it_prof_expires, cputime_zero) ||
1199 cputime_gt(t->it_prof_expires, ticks))) {
1200 t->it_prof_expires = ticks;
1203 ticks = cputime_add(t->utime, virt_left);
1204 if (!cputime_eq(virt_expires, cputime_zero) &&
1205 (cputime_eq(t->it_virt_expires, cputime_zero) ||
1206 cputime_gt(t->it_virt_expires, ticks))) {
1207 t->it_virt_expires = ticks;
1210 sched = t->sched_time + sched_left;
1211 if (sched_expires && (t->it_sched_expires == 0 ||
1212 t->it_sched_expires > sched)) {
1213 t->it_sched_expires = sched;
1215 } while ((t = next_thread(t)) != tsk);
1220 * This is called from the signal code (via do_schedule_next_timer)
1221 * when the last timer signal was delivered and we have to reload the timer.
1223 void posix_cpu_timer_schedule(struct k_itimer *timer)
1225 struct task_struct *p = timer->it.cpu.task;
1226 union cpu_time_count now;
1228 if (unlikely(p == NULL))
1230 * The task was cleaned up already, no future firings.
1235 * Fetch the current sample and update the timer's expiry time.
1237 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1238 cpu_clock_sample(timer->it_clock, p, &now);
1239 bump_cpu_timer(timer, now);
1240 if (unlikely(p->exit_state)) {
1241 clear_dead_task(timer, now);
1244 read_lock(&tasklist_lock); /* arm_timer needs it. */
1246 read_lock(&tasklist_lock);
1247 if (unlikely(p->signal == NULL)) {
1249 * The process has been reaped.
1250 * We can't even collect a sample any more.
1253 timer->it.cpu.task = p = NULL;
1254 timer->it.cpu.expires.sched = 0;
1256 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1258 * We've noticed that the thread is dead, but
1259 * not yet reaped. Take this opportunity to
1260 * drop our task ref.
1262 clear_dead_task(timer, now);
1265 cpu_clock_sample_group(timer->it_clock, p, &now);
1266 bump_cpu_timer(timer, now);
1267 /* Leave the tasklist_lock locked for the call below. */
1271 * Now re-arm for the new expiry time.
1273 arm_timer(timer, now);
1276 read_unlock(&tasklist_lock);
1279 timer->it_overrun_last = timer->it_overrun;
1280 timer->it_overrun = -1;
1281 ++timer->it_requeue_pending;
1285 * This is called from the timer interrupt handler. The irq handler has
1286 * already updated our counts. We need to check if any timers fire now.
1287 * Interrupts are disabled.
1289 void run_posix_cpu_timers(struct task_struct *tsk)
1292 struct k_itimer *timer, *next;
1294 BUG_ON(!irqs_disabled());
1296 #define UNEXPIRED(clock) \
1297 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
1298 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
1300 if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
1301 (tsk->it_sched_expires == 0 ||
1302 tsk->sched_time < tsk->it_sched_expires))
1308 * Double-check with locks held.
1310 read_lock(&tasklist_lock);
1311 if (likely(tsk->signal != NULL)) {
1312 spin_lock(&tsk->sighand->siglock);
1315 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1316 * all the timers that are firing, and put them on the firing list.
1318 check_thread_timers(tsk, &firing);
1319 check_process_timers(tsk, &firing);
1322 * We must release these locks before taking any timer's lock.
1323 * There is a potential race with timer deletion here, as the
1324 * siglock now protects our private firing list. We have set
1325 * the firing flag in each timer, so that a deletion attempt
1326 * that gets the timer lock before we do will give it up and
1327 * spin until we've taken care of that timer below.
1329 spin_unlock(&tsk->sighand->siglock);
1331 read_unlock(&tasklist_lock);
1334 * Now that all the timers on our list have the firing flag,
1335 * noone will touch their list entries but us. We'll take
1336 * each timer's lock before clearing its firing flag, so no
1337 * timer call will interfere.
1339 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1341 spin_lock(&timer->it_lock);
1342 list_del_init(&timer->it.cpu.entry);
1343 firing = timer->it.cpu.firing;
1344 timer->it.cpu.firing = 0;
1346 * The firing flag is -1 if we collided with a reset
1347 * of the timer, which already reported this
1348 * almost-firing as an overrun. So don't generate an event.
1350 if (likely(firing >= 0)) {
1351 cpu_timer_fire(timer);
1353 spin_unlock(&timer->it_lock);
1358 * Set one of the process-wide special case CPU timers.
1359 * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
1360 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
1361 * absolute; non-null for ITIMER_*, where *newval is relative and we update
1362 * it to be absolute, *oldval is absolute and we update it to be relative.
1364 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1365 cputime_t *newval, cputime_t *oldval)
1367 union cpu_time_count now;
1368 struct list_head *head;
1370 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1371 cpu_clock_sample_group_locked(clock_idx, tsk, &now);
1374 if (!cputime_eq(*oldval, cputime_zero)) {
1375 if (cputime_le(*oldval, now.cpu)) {
1376 /* Just about to fire. */
1377 *oldval = jiffies_to_cputime(1);
1379 *oldval = cputime_sub(*oldval, now.cpu);
1383 if (cputime_eq(*newval, cputime_zero))
1385 *newval = cputime_add(*newval, now.cpu);
1388 * If the RLIMIT_CPU timer will expire before the
1389 * ITIMER_PROF timer, we have nothing else to do.
1391 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1392 < cputime_to_secs(*newval))
1397 * Check whether there are any process timers already set to fire
1398 * before this one. If so, we don't have anything more to do.
1400 head = &tsk->signal->cpu_timers[clock_idx];
1401 if (list_empty(head) ||
1402 cputime_ge(list_entry(head->next,
1403 struct cpu_timer_list, entry)->expires.cpu,
1406 * Rejigger each thread's expiry time so that one will
1407 * notice before we hit the process-cumulative expiry time.
1409 union cpu_time_count expires = { .sched = 0 };
1410 expires.cpu = *newval;
1411 process_timer_rebalance(tsk, clock_idx, expires, now);
1415 static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
1417 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1418 struct timespec *rqtp, struct timespec __user *rmtp)
1420 struct restart_block *restart_block =
1421 ¤t_thread_info()->restart_block;
1422 struct k_itimer timer;
1426 * Diagnose required errors first.
1428 if (CPUCLOCK_PERTHREAD(which_clock) &&
1429 (CPUCLOCK_PID(which_clock) == 0 ||
1430 CPUCLOCK_PID(which_clock) == current->pid))
1434 * Set up a temporary timer and then wait for it to go off.
1436 memset(&timer, 0, sizeof timer);
1437 spin_lock_init(&timer.it_lock);
1438 timer.it_clock = which_clock;
1439 timer.it_overrun = -1;
1440 error = posix_cpu_timer_create(&timer);
1441 timer.it_process = current;
1443 static struct itimerspec zero_it;
1444 struct itimerspec it = { .it_value = *rqtp,
1445 .it_interval = {} };
1447 spin_lock_irq(&timer.it_lock);
1448 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1450 spin_unlock_irq(&timer.it_lock);
1454 while (!signal_pending(current)) {
1455 if (timer.it.cpu.expires.sched == 0) {
1457 * Our timer fired and was reset.
1459 spin_unlock_irq(&timer.it_lock);
1464 * Block until cpu_timer_fire (or a signal) wakes us.
1466 __set_current_state(TASK_INTERRUPTIBLE);
1467 spin_unlock_irq(&timer.it_lock);
1469 spin_lock_irq(&timer.it_lock);
1473 * We were interrupted by a signal.
1475 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1476 posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1477 spin_unlock_irq(&timer.it_lock);
1479 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1481 * It actually did fire already.
1487 * Report back to the user the time still remaining.
1489 if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
1490 copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1493 restart_block->fn = posix_cpu_clock_nanosleep_restart;
1494 /* Caller already set restart_block->arg1 */
1495 restart_block->arg0 = which_clock;
1496 restart_block->arg1 = (unsigned long) rmtp;
1497 restart_block->arg2 = rqtp->tv_sec;
1498 restart_block->arg3 = rqtp->tv_nsec;
1500 error = -ERESTART_RESTARTBLOCK;
1507 posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
1509 clockid_t which_clock = restart_block->arg0;
1510 struct timespec __user *rmtp;
1513 rmtp = (struct timespec __user *) restart_block->arg1;
1514 t.tv_sec = restart_block->arg2;
1515 t.tv_nsec = restart_block->arg3;
1517 restart_block->fn = do_no_restart_syscall;
1518 return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp);
1522 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1523 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1525 static int process_cpu_clock_getres(const clockid_t which_clock,
1526 struct timespec *tp)
1528 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1530 static int process_cpu_clock_get(const clockid_t which_clock,
1531 struct timespec *tp)
1533 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1535 static int process_cpu_timer_create(struct k_itimer *timer)
1537 timer->it_clock = PROCESS_CLOCK;
1538 return posix_cpu_timer_create(timer);
1540 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1541 struct timespec *rqtp,
1542 struct timespec __user *rmtp)
1544 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1546 static int thread_cpu_clock_getres(const clockid_t which_clock,
1547 struct timespec *tp)
1549 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1551 static int thread_cpu_clock_get(const clockid_t which_clock,
1552 struct timespec *tp)
1554 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1556 static int thread_cpu_timer_create(struct k_itimer *timer)
1558 timer->it_clock = THREAD_CLOCK;
1559 return posix_cpu_timer_create(timer);
1561 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1562 struct timespec *rqtp, struct timespec __user *rmtp)
1567 static __init int init_posix_cpu_timers(void)
1569 struct k_clock process = {
1570 .clock_getres = process_cpu_clock_getres,
1571 .clock_get = process_cpu_clock_get,
1572 .clock_set = do_posix_clock_nosettime,
1573 .timer_create = process_cpu_timer_create,
1574 .nsleep = process_cpu_nsleep,
1576 struct k_clock thread = {
1577 .clock_getres = thread_cpu_clock_getres,
1578 .clock_get = thread_cpu_clock_get,
1579 .clock_set = do_posix_clock_nosettime,
1580 .timer_create = thread_cpu_timer_create,
1581 .nsleep = thread_cpu_nsleep,
1584 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1585 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1589 __initcall(init_posix_cpu_timers);