2 * Implement CPU time clocks for the POSIX clock interface.
5 #include <linux/sched.h>
6 #include <linux/posix-timers.h>
7 #include <asm/uaccess.h>
8 #include <linux/errno.h>
9 #include <linux/vs_cvirt.h>
11 static int check_clock(const clockid_t which_clock)
14 struct task_struct *p;
15 const pid_t pid = CPUCLOCK_PID(which_clock);
17 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
23 read_lock(&tasklist_lock);
24 p = find_task_by_pid(pid);
25 if (!p || (CPUCLOCK_PERTHREAD(which_clock) ?
26 p->tgid != current->tgid : p->tgid != pid)) {
29 read_unlock(&tasklist_lock);
34 static inline union cpu_time_count
35 timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
37 union cpu_time_count ret;
38 ret.sched = 0; /* high half always zero when .cpu used */
39 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
40 ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
42 ret.cpu = timespec_to_cputime(tp);
47 static void sample_to_timespec(const clockid_t which_clock,
48 union cpu_time_count cpu,
51 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
52 tp->tv_sec = div_long_long_rem(cpu.sched,
53 NSEC_PER_SEC, &tp->tv_nsec);
55 cputime_to_timespec(cpu.cpu, tp);
59 static inline int cpu_time_before(const clockid_t which_clock,
60 union cpu_time_count now,
61 union cpu_time_count then)
63 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
64 return now.sched < then.sched;
66 return cputime_lt(now.cpu, then.cpu);
69 static inline void cpu_time_add(const clockid_t which_clock,
70 union cpu_time_count *acc,
71 union cpu_time_count val)
73 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
74 acc->sched += val.sched;
76 acc->cpu = cputime_add(acc->cpu, val.cpu);
79 static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
80 union cpu_time_count a,
81 union cpu_time_count b)
83 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
86 a.cpu = cputime_sub(a.cpu, b.cpu);
92 * Update expiry time from increment, and increase overrun count,
93 * given the current clock sample.
95 static void bump_cpu_timer(struct k_itimer *timer,
96 union cpu_time_count now)
100 if (timer->it.cpu.incr.sched == 0)
103 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
104 unsigned long long delta, incr;
106 if (now.sched < timer->it.cpu.expires.sched)
108 incr = timer->it.cpu.incr.sched;
109 delta = now.sched + incr - timer->it.cpu.expires.sched;
110 /* Don't use (incr*2 < delta), incr*2 might overflow. */
111 for (i = 0; incr < delta - incr; i++)
113 for (; i >= 0; incr >>= 1, i--) {
116 timer->it.cpu.expires.sched += incr;
117 timer->it_overrun += 1 << i;
121 cputime_t delta, incr;
123 if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu))
125 incr = timer->it.cpu.incr.cpu;
126 delta = cputime_sub(cputime_add(now.cpu, incr),
127 timer->it.cpu.expires.cpu);
128 /* Don't use (incr*2 < delta), incr*2 might overflow. */
129 for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++)
130 incr = cputime_add(incr, incr);
131 for (; i >= 0; incr = cputime_halve(incr), i--) {
132 if (cputime_lt(delta, incr))
134 timer->it.cpu.expires.cpu =
135 cputime_add(timer->it.cpu.expires.cpu, incr);
136 timer->it_overrun += 1 << i;
137 delta = cputime_sub(delta, incr);
142 static inline cputime_t prof_ticks(struct task_struct *p)
144 return cputime_add(p->utime, p->stime);
146 static inline cputime_t virt_ticks(struct task_struct *p)
150 static inline unsigned long long sched_ns(struct task_struct *p)
152 return (p == current) ? current_sched_time(p) : p->sched_time;
155 int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
157 int error = check_clock(which_clock);
160 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
161 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
163 * If sched_clock is using a cycle counter, we
164 * don't have any idea of its true resolution
165 * exported, but it is much more than 1s/HZ.
173 int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
176 * You can never reset a CPU clock, but we check for other errors
177 * in the call before failing with EPERM.
179 int error = check_clock(which_clock);
188 * Sample a per-thread clock for the given task.
190 static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
191 union cpu_time_count *cpu)
193 switch (CPUCLOCK_WHICH(which_clock)) {
197 cpu->cpu = prof_ticks(p);
200 cpu->cpu = virt_ticks(p);
203 cpu->sched = sched_ns(p);
210 * Sample a process (thread group) clock for the given group_leader task.
211 * Must be called with tasklist_lock held for reading.
212 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
214 static int cpu_clock_sample_group_locked(unsigned int clock_idx,
215 struct task_struct *p,
216 union cpu_time_count *cpu)
218 struct task_struct *t = p;
223 cpu->cpu = cputime_add(p->signal->utime, p->signal->stime);
225 cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
230 cpu->cpu = p->signal->utime;
232 cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
237 cpu->sched = p->signal->sched_time;
238 /* Add in each other live thread. */
239 while ((t = next_thread(t)) != p) {
240 cpu->sched += t->sched_time;
242 cpu->sched += sched_ns(p);
249 * Sample a process (thread group) clock for the given group_leader task.
250 * Must be called with tasklist_lock held for reading.
252 static int cpu_clock_sample_group(const clockid_t which_clock,
253 struct task_struct *p,
254 union cpu_time_count *cpu)
258 spin_lock_irqsave(&p->sighand->siglock, flags);
259 ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
261 spin_unlock_irqrestore(&p->sighand->siglock, flags);
266 int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
268 const pid_t pid = CPUCLOCK_PID(which_clock);
270 union cpu_time_count rtn;
274 * Special case constant value for our own clocks.
275 * We don't have to do any lookup to find ourselves.
277 if (CPUCLOCK_PERTHREAD(which_clock)) {
279 * Sampling just ourselves we can do with no locking.
281 error = cpu_clock_sample(which_clock,
284 read_lock(&tasklist_lock);
285 error = cpu_clock_sample_group(which_clock,
287 read_unlock(&tasklist_lock);
291 * Find the given PID, and validate that the caller
292 * should be able to see it.
294 struct task_struct *p;
295 read_lock(&tasklist_lock);
296 p = find_task_by_pid(pid);
298 if (CPUCLOCK_PERTHREAD(which_clock)) {
299 if (p->tgid == current->tgid) {
300 error = cpu_clock_sample(which_clock,
303 } else if (p->tgid == pid && p->signal) {
304 error = cpu_clock_sample_group(which_clock,
308 read_unlock(&tasklist_lock);
313 sample_to_timespec(which_clock, rtn, tp);
319 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
320 * This is called from sys_timer_create with the new timer already locked.
322 int posix_cpu_timer_create(struct k_itimer *new_timer)
325 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
326 struct task_struct *p;
328 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
331 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
332 new_timer->it.cpu.incr.sched = 0;
333 new_timer->it.cpu.expires.sched = 0;
335 read_lock(&tasklist_lock);
336 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
340 p = find_task_by_pid(pid);
341 if (p && p->tgid != current->tgid)
346 p = current->group_leader;
348 p = find_task_by_pid(pid);
349 if (p && p->tgid != pid)
353 new_timer->it.cpu.task = p;
359 read_unlock(&tasklist_lock);
365 * Clean up a CPU-clock timer that is about to be destroyed.
366 * This is called from timer deletion with the timer already locked.
367 * If we return TIMER_RETRY, it's necessary to release the timer's lock
368 * and try again. (This happens when the timer is in the middle of firing.)
370 int posix_cpu_timer_del(struct k_itimer *timer)
372 struct task_struct *p = timer->it.cpu.task;
375 if (likely(p != NULL)) {
376 read_lock(&tasklist_lock);
377 if (unlikely(p->signal == NULL)) {
379 * We raced with the reaping of the task.
380 * The deletion should have cleared us off the list.
382 BUG_ON(!list_empty(&timer->it.cpu.entry));
384 spin_lock(&p->sighand->siglock);
385 if (timer->it.cpu.firing)
388 list_del(&timer->it.cpu.entry);
389 spin_unlock(&p->sighand->siglock);
391 read_unlock(&tasklist_lock);
401 * Clean out CPU timers still ticking when a thread exited. The task
402 * pointer is cleared, and the expiry time is replaced with the residual
403 * time for later timer_gettime calls to return.
404 * This must be called with the siglock held.
406 static void cleanup_timers(struct list_head *head,
407 cputime_t utime, cputime_t stime,
408 unsigned long long sched_time)
410 struct cpu_timer_list *timer, *next;
411 cputime_t ptime = cputime_add(utime, stime);
413 list_for_each_entry_safe(timer, next, head, entry) {
414 list_del_init(&timer->entry);
415 if (cputime_lt(timer->expires.cpu, ptime)) {
416 timer->expires.cpu = cputime_zero;
418 timer->expires.cpu = cputime_sub(timer->expires.cpu,
424 list_for_each_entry_safe(timer, next, head, entry) {
425 list_del_init(&timer->entry);
426 if (cputime_lt(timer->expires.cpu, utime)) {
427 timer->expires.cpu = cputime_zero;
429 timer->expires.cpu = cputime_sub(timer->expires.cpu,
435 list_for_each_entry_safe(timer, next, head, entry) {
436 list_del_init(&timer->entry);
437 if (timer->expires.sched < sched_time) {
438 timer->expires.sched = 0;
440 timer->expires.sched -= sched_time;
446 * These are both called with the siglock held, when the current thread
447 * is being reaped. When the final (leader) thread in the group is reaped,
448 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
450 void posix_cpu_timers_exit(struct task_struct *tsk)
452 cleanup_timers(tsk->cpu_timers,
453 tsk->utime, tsk->stime, tsk->sched_time);
456 void posix_cpu_timers_exit_group(struct task_struct *tsk)
458 cleanup_timers(tsk->signal->cpu_timers,
459 cputime_add(tsk->utime, tsk->signal->utime),
460 cputime_add(tsk->stime, tsk->signal->stime),
461 tsk->sched_time + tsk->signal->sched_time);
466 * Set the expiry times of all the threads in the process so one of them
467 * will go off before the process cumulative expiry total is reached.
469 static void process_timer_rebalance(struct task_struct *p,
470 unsigned int clock_idx,
471 union cpu_time_count expires,
472 union cpu_time_count val)
474 cputime_t ticks, left;
475 unsigned long long ns, nsleft;
476 struct task_struct *t = p;
477 unsigned int nthreads = atomic_read(&p->signal->live);
487 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
490 if (likely(!(t->flags & PF_EXITING))) {
491 ticks = cputime_add(prof_ticks(t), left);
492 if (cputime_eq(t->it_prof_expires,
494 cputime_gt(t->it_prof_expires, ticks)) {
495 t->it_prof_expires = ticks;
502 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
505 if (likely(!(t->flags & PF_EXITING))) {
506 ticks = cputime_add(virt_ticks(t), left);
507 if (cputime_eq(t->it_virt_expires,
509 cputime_gt(t->it_virt_expires, ticks)) {
510 t->it_virt_expires = ticks;
517 nsleft = expires.sched - val.sched;
518 do_div(nsleft, nthreads);
520 if (likely(!(t->flags & PF_EXITING))) {
521 ns = t->sched_time + nsleft;
522 if (t->it_sched_expires == 0 ||
523 t->it_sched_expires > ns) {
524 t->it_sched_expires = ns;
533 static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
536 * That's all for this thread or process.
537 * We leave our residual in expires to be reported.
539 put_task_struct(timer->it.cpu.task);
540 timer->it.cpu.task = NULL;
541 timer->it.cpu.expires = cpu_time_sub(timer->it_clock,
542 timer->it.cpu.expires,
547 * Insert the timer on the appropriate list before any timers that
548 * expire later. This must be called with the tasklist_lock held
549 * for reading, and interrupts disabled.
551 static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
553 struct task_struct *p = timer->it.cpu.task;
554 struct list_head *head, *listpos;
555 struct cpu_timer_list *const nt = &timer->it.cpu;
556 struct cpu_timer_list *next;
559 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
560 p->cpu_timers : p->signal->cpu_timers);
561 head += CPUCLOCK_WHICH(timer->it_clock);
563 BUG_ON(!irqs_disabled());
564 spin_lock(&p->sighand->siglock);
567 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
568 list_for_each_entry(next, head, entry) {
569 if (next->expires.sched > nt->expires.sched)
571 listpos = &next->entry;
574 list_for_each_entry(next, head, entry) {
575 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
577 listpos = &next->entry;
580 list_add(&nt->entry, listpos);
582 if (listpos == head) {
584 * We are the new earliest-expiring timer.
585 * If we are a thread timer, there can always
586 * be a process timer telling us to stop earlier.
589 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
590 switch (CPUCLOCK_WHICH(timer->it_clock)) {
594 if (cputime_eq(p->it_prof_expires,
596 cputime_gt(p->it_prof_expires,
598 p->it_prof_expires = nt->expires.cpu;
601 if (cputime_eq(p->it_virt_expires,
603 cputime_gt(p->it_virt_expires,
605 p->it_virt_expires = nt->expires.cpu;
608 if (p->it_sched_expires == 0 ||
609 p->it_sched_expires > nt->expires.sched)
610 p->it_sched_expires = nt->expires.sched;
615 * For a process timer, we must balance
616 * all the live threads' expirations.
618 switch (CPUCLOCK_WHICH(timer->it_clock)) {
622 if (!cputime_eq(p->signal->it_virt_expires,
624 cputime_lt(p->signal->it_virt_expires,
625 timer->it.cpu.expires.cpu))
629 if (!cputime_eq(p->signal->it_prof_expires,
631 cputime_lt(p->signal->it_prof_expires,
632 timer->it.cpu.expires.cpu))
634 i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
635 if (i != RLIM_INFINITY &&
636 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
641 process_timer_rebalance(
643 CPUCLOCK_WHICH(timer->it_clock),
644 timer->it.cpu.expires, now);
650 spin_unlock(&p->sighand->siglock);
654 * The timer is locked, fire it and arrange for its reload.
656 static void cpu_timer_fire(struct k_itimer *timer)
658 if (unlikely(timer->sigq == NULL)) {
660 * This a special case for clock_nanosleep,
661 * not a normal timer from sys_timer_create.
663 wake_up_process(timer->it_process);
664 timer->it.cpu.expires.sched = 0;
665 } else if (timer->it.cpu.incr.sched == 0) {
667 * One-shot timer. Clear it as soon as it's fired.
669 posix_timer_event(timer, 0);
670 timer->it.cpu.expires.sched = 0;
671 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
673 * The signal did not get queued because the signal
674 * was ignored, so we won't get any callback to
675 * reload the timer. But we need to keep it
676 * ticking in case the signal is deliverable next time.
678 posix_cpu_timer_schedule(timer);
683 * Guts of sys_timer_settime for CPU timers.
684 * This is called with the timer locked and interrupts disabled.
685 * If we return TIMER_RETRY, it's necessary to release the timer's lock
686 * and try again. (This happens when the timer is in the middle of firing.)
688 int posix_cpu_timer_set(struct k_itimer *timer, int flags,
689 struct itimerspec *new, struct itimerspec *old)
691 struct task_struct *p = timer->it.cpu.task;
692 union cpu_time_count old_expires, new_expires, val;
695 if (unlikely(p == NULL)) {
697 * Timer refers to a dead task's clock.
702 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
704 read_lock(&tasklist_lock);
706 * We need the tasklist_lock to protect against reaping that
707 * clears p->signal. If p has just been reaped, we can no
708 * longer get any information about it at all.
710 if (unlikely(p->signal == NULL)) {
711 read_unlock(&tasklist_lock);
713 timer->it.cpu.task = NULL;
718 * Disarm any old timer after extracting its expiry time.
720 BUG_ON(!irqs_disabled());
723 spin_lock(&p->sighand->siglock);
724 old_expires = timer->it.cpu.expires;
725 if (unlikely(timer->it.cpu.firing)) {
726 timer->it.cpu.firing = -1;
729 list_del_init(&timer->it.cpu.entry);
730 spin_unlock(&p->sighand->siglock);
733 * We need to sample the current value to convert the new
734 * value from to relative and absolute, and to convert the
735 * old value from absolute to relative. To set a process
736 * timer, we need a sample to balance the thread expiry
737 * times (in arm_timer). With an absolute time, we must
738 * check if it's already passed. In short, we need a sample.
740 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
741 cpu_clock_sample(timer->it_clock, p, &val);
743 cpu_clock_sample_group(timer->it_clock, p, &val);
747 if (old_expires.sched == 0) {
748 old->it_value.tv_sec = 0;
749 old->it_value.tv_nsec = 0;
752 * Update the timer in case it has
753 * overrun already. If it has,
754 * we'll report it as having overrun
755 * and with the next reloaded timer
756 * already ticking, though we are
757 * swallowing that pending
758 * notification here to install the
761 bump_cpu_timer(timer, val);
762 if (cpu_time_before(timer->it_clock, val,
763 timer->it.cpu.expires)) {
764 old_expires = cpu_time_sub(
766 timer->it.cpu.expires, val);
767 sample_to_timespec(timer->it_clock,
771 old->it_value.tv_nsec = 1;
772 old->it_value.tv_sec = 0;
779 * We are colliding with the timer actually firing.
780 * Punt after filling in the timer's old value, and
781 * disable this firing since we are already reporting
782 * it as an overrun (thanks to bump_cpu_timer above).
784 read_unlock(&tasklist_lock);
788 if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) {
789 cpu_time_add(timer->it_clock, &new_expires, val);
793 * Install the new expiry time (or zero).
794 * For a timer with no notification action, we don't actually
795 * arm the timer (we'll just fake it for timer_gettime).
797 timer->it.cpu.expires = new_expires;
798 if (new_expires.sched != 0 &&
799 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
800 cpu_time_before(timer->it_clock, val, new_expires)) {
801 arm_timer(timer, val);
804 read_unlock(&tasklist_lock);
807 * Install the new reload setting, and
808 * set up the signal and overrun bookkeeping.
810 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
814 * This acts as a modification timestamp for the timer,
815 * so any automatic reload attempt will punt on seeing
816 * that we have reset the timer manually.
818 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
820 timer->it_overrun_last = 0;
821 timer->it_overrun = -1;
823 if (new_expires.sched != 0 &&
824 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
825 !cpu_time_before(timer->it_clock, val, new_expires)) {
827 * The designated time already passed, so we notify
828 * immediately, even if the thread never runs to
829 * accumulate more time on this clock.
831 cpu_timer_fire(timer);
837 sample_to_timespec(timer->it_clock,
838 timer->it.cpu.incr, &old->it_interval);
843 void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
845 union cpu_time_count now;
846 struct task_struct *p = timer->it.cpu.task;
850 * Easy part: convert the reload time.
852 sample_to_timespec(timer->it_clock,
853 timer->it.cpu.incr, &itp->it_interval);
855 if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */
856 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
860 if (unlikely(p == NULL)) {
862 * This task already died and the timer will never fire.
863 * In this case, expires is actually the dead value.
866 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
872 * Sample the clock to take the difference with the expiry time.
874 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
875 cpu_clock_sample(timer->it_clock, p, &now);
876 clear_dead = p->exit_state;
878 read_lock(&tasklist_lock);
879 if (unlikely(p->signal == NULL)) {
881 * The process has been reaped.
882 * We can't even collect a sample any more.
883 * Call the timer disarmed, nothing else to do.
886 timer->it.cpu.task = NULL;
887 timer->it.cpu.expires.sched = 0;
888 read_unlock(&tasklist_lock);
891 cpu_clock_sample_group(timer->it_clock, p, &now);
892 clear_dead = (unlikely(p->exit_state) &&
893 thread_group_empty(p));
895 read_unlock(&tasklist_lock);
898 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
899 if (timer->it.cpu.incr.sched == 0 &&
900 cpu_time_before(timer->it_clock,
901 timer->it.cpu.expires, now)) {
903 * Do-nothing timer expired and has no reload,
904 * so it's as if it was never set.
906 timer->it.cpu.expires.sched = 0;
907 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
911 * Account for any expirations and reloads that should
914 bump_cpu_timer(timer, now);
917 if (unlikely(clear_dead)) {
919 * We've noticed that the thread is dead, but
920 * not yet reaped. Take this opportunity to
923 clear_dead_task(timer, now);
927 if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) {
928 sample_to_timespec(timer->it_clock,
929 cpu_time_sub(timer->it_clock,
930 timer->it.cpu.expires, now),
934 * The timer should have expired already, but the firing
935 * hasn't taken place yet. Say it's just about to expire.
937 itp->it_value.tv_nsec = 1;
938 itp->it_value.tv_sec = 0;
943 * Check for any per-thread CPU timers that have fired and move them off
944 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
945 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
947 static void check_thread_timers(struct task_struct *tsk,
948 struct list_head *firing)
951 struct list_head *timers = tsk->cpu_timers;
954 tsk->it_prof_expires = cputime_zero;
955 while (!list_empty(timers)) {
956 struct cpu_timer_list *t = list_entry(timers->next,
957 struct cpu_timer_list,
959 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
960 tsk->it_prof_expires = t->expires.cpu;
964 list_move_tail(&t->entry, firing);
969 tsk->it_virt_expires = cputime_zero;
970 while (!list_empty(timers)) {
971 struct cpu_timer_list *t = list_entry(timers->next,
972 struct cpu_timer_list,
974 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
975 tsk->it_virt_expires = t->expires.cpu;
979 list_move_tail(&t->entry, firing);
984 tsk->it_sched_expires = 0;
985 while (!list_empty(timers)) {
986 struct cpu_timer_list *t = list_entry(timers->next,
987 struct cpu_timer_list,
989 if (!--maxfire || tsk->sched_time < t->expires.sched) {
990 tsk->it_sched_expires = t->expires.sched;
994 list_move_tail(&t->entry, firing);
999 * Check for any per-thread CPU timers that have fired and move them
1000 * off the tsk->*_timers list onto the firing list. Per-thread timers
1001 * have already been taken off.
1003 static void check_process_timers(struct task_struct *tsk,
1004 struct list_head *firing)
1007 struct signal_struct *const sig = tsk->signal;
1008 cputime_t utime, stime, ptime, virt_expires, prof_expires;
1009 unsigned long long sched_time, sched_expires;
1010 struct task_struct *t;
1011 struct list_head *timers = sig->cpu_timers;
1014 * Don't sample the current process CPU clocks if there are no timers.
1016 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1017 cputime_eq(sig->it_prof_expires, cputime_zero) &&
1018 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1019 list_empty(&timers[CPUCLOCK_VIRT]) &&
1020 cputime_eq(sig->it_virt_expires, cputime_zero) &&
1021 list_empty(&timers[CPUCLOCK_SCHED]))
1025 * Collect the current process totals.
1029 sched_time = sig->sched_time;
1032 utime = cputime_add(utime, t->utime);
1033 stime = cputime_add(stime, t->stime);
1034 sched_time += t->sched_time;
1037 ptime = cputime_add(utime, stime);
1040 prof_expires = cputime_zero;
1041 while (!list_empty(timers)) {
1042 struct cpu_timer_list *t = list_entry(timers->next,
1043 struct cpu_timer_list,
1045 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
1046 prof_expires = t->expires.cpu;
1050 list_move_tail(&t->entry, firing);
1055 virt_expires = cputime_zero;
1056 while (!list_empty(timers)) {
1057 struct cpu_timer_list *t = list_entry(timers->next,
1058 struct cpu_timer_list,
1060 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
1061 virt_expires = t->expires.cpu;
1065 list_move_tail(&t->entry, firing);
1071 while (!list_empty(timers)) {
1072 struct cpu_timer_list *t = list_entry(timers->next,
1073 struct cpu_timer_list,
1075 if (!--maxfire || sched_time < t->expires.sched) {
1076 sched_expires = t->expires.sched;
1080 list_move_tail(&t->entry, firing);
1084 * Check for the special case process timers.
1086 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1087 if (cputime_ge(ptime, sig->it_prof_expires)) {
1088 /* ITIMER_PROF fires and reloads. */
1089 sig->it_prof_expires = sig->it_prof_incr;
1090 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
1091 sig->it_prof_expires = cputime_add(
1092 sig->it_prof_expires, ptime);
1094 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1096 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1097 (cputime_eq(prof_expires, cputime_zero) ||
1098 cputime_lt(sig->it_prof_expires, prof_expires))) {
1099 prof_expires = sig->it_prof_expires;
1102 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1103 if (cputime_ge(utime, sig->it_virt_expires)) {
1104 /* ITIMER_VIRTUAL fires and reloads. */
1105 sig->it_virt_expires = sig->it_virt_incr;
1106 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1107 sig->it_virt_expires = cputime_add(
1108 sig->it_virt_expires, utime);
1110 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1112 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1113 (cputime_eq(virt_expires, cputime_zero) ||
1114 cputime_lt(sig->it_virt_expires, virt_expires))) {
1115 virt_expires = sig->it_virt_expires;
1118 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1119 unsigned long psecs = cputime_to_secs(ptime);
1121 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) {
1123 * At the hard limit, we just die.
1124 * No need to calculate anything else now.
1126 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1129 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) {
1131 * At the soft limit, send a SIGXCPU every second.
1133 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1134 if (sig->rlim[RLIMIT_CPU].rlim_cur
1135 < sig->rlim[RLIMIT_CPU].rlim_max) {
1136 sig->rlim[RLIMIT_CPU].rlim_cur++;
1139 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
1140 if (cputime_eq(prof_expires, cputime_zero) ||
1141 cputime_lt(x, prof_expires)) {
1146 if (!cputime_eq(prof_expires, cputime_zero) ||
1147 !cputime_eq(virt_expires, cputime_zero) ||
1148 sched_expires != 0) {
1150 * Rebalance the threads' expiry times for the remaining
1151 * process CPU timers.
1154 cputime_t prof_left, virt_left, ticks;
1155 unsigned long long sched_left, sched;
1156 const unsigned int nthreads = atomic_read(&sig->live);
1161 prof_left = cputime_sub(prof_expires, utime);
1162 prof_left = cputime_sub(prof_left, stime);
1163 prof_left = cputime_div(prof_left, nthreads);
1164 virt_left = cputime_sub(virt_expires, utime);
1165 virt_left = cputime_div(virt_left, nthreads);
1166 if (sched_expires) {
1167 sched_left = sched_expires - sched_time;
1168 do_div(sched_left, nthreads);
1174 if (unlikely(t->flags & PF_EXITING))
1177 ticks = cputime_add(cputime_add(t->utime, t->stime),
1179 if (!cputime_eq(prof_expires, cputime_zero) &&
1180 (cputime_eq(t->it_prof_expires, cputime_zero) ||
1181 cputime_gt(t->it_prof_expires, ticks))) {
1182 t->it_prof_expires = ticks;
1185 ticks = cputime_add(t->utime, virt_left);
1186 if (!cputime_eq(virt_expires, cputime_zero) &&
1187 (cputime_eq(t->it_virt_expires, cputime_zero) ||
1188 cputime_gt(t->it_virt_expires, ticks))) {
1189 t->it_virt_expires = ticks;
1192 sched = t->sched_time + sched_left;
1193 if (sched_expires && (t->it_sched_expires == 0 ||
1194 t->it_sched_expires > sched)) {
1195 t->it_sched_expires = sched;
1197 } while ((t = next_thread(t)) != tsk);
1202 * This is called from the signal code (via do_schedule_next_timer)
1203 * when the last timer signal was delivered and we have to reload the timer.
1205 void posix_cpu_timer_schedule(struct k_itimer *timer)
1207 struct task_struct *p = timer->it.cpu.task;
1208 union cpu_time_count now;
1210 if (unlikely(p == NULL))
1212 * The task was cleaned up already, no future firings.
1217 * Fetch the current sample and update the timer's expiry time.
1219 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1220 cpu_clock_sample(timer->it_clock, p, &now);
1221 bump_cpu_timer(timer, now);
1222 if (unlikely(p->exit_state)) {
1223 clear_dead_task(timer, now);
1226 read_lock(&tasklist_lock); /* arm_timer needs it. */
1228 read_lock(&tasklist_lock);
1229 if (unlikely(p->signal == NULL)) {
1231 * The process has been reaped.
1232 * We can't even collect a sample any more.
1235 timer->it.cpu.task = p = NULL;
1236 timer->it.cpu.expires.sched = 0;
1238 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1240 * We've noticed that the thread is dead, but
1241 * not yet reaped. Take this opportunity to
1242 * drop our task ref.
1244 clear_dead_task(timer, now);
1247 cpu_clock_sample_group(timer->it_clock, p, &now);
1248 bump_cpu_timer(timer, now);
1249 /* Leave the tasklist_lock locked for the call below. */
1253 * Now re-arm for the new expiry time.
1255 arm_timer(timer, now);
1258 read_unlock(&tasklist_lock);
1261 timer->it_overrun_last = timer->it_overrun;
1262 timer->it_overrun = -1;
1263 ++timer->it_requeue_pending;
1267 * This is called from the timer interrupt handler. The irq handler has
1268 * already updated our counts. We need to check if any timers fire now.
1269 * Interrupts are disabled.
1271 void run_posix_cpu_timers(struct task_struct *tsk)
1274 struct k_itimer *timer, *next;
1276 BUG_ON(!irqs_disabled());
1278 #define UNEXPIRED(clock) \
1279 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \
1280 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires))
1282 if (UNEXPIRED(prof) && UNEXPIRED(virt) &&
1283 (tsk->it_sched_expires == 0 ||
1284 tsk->sched_time < tsk->it_sched_expires))
1290 * Double-check with locks held.
1292 read_lock(&tasklist_lock);
1293 if (likely(tsk->signal != NULL)) {
1294 spin_lock(&tsk->sighand->siglock);
1297 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1298 * all the timers that are firing, and put them on the firing list.
1300 check_thread_timers(tsk, &firing);
1301 check_process_timers(tsk, &firing);
1304 * We must release these locks before taking any timer's lock.
1305 * There is a potential race with timer deletion here, as the
1306 * siglock now protects our private firing list. We have set
1307 * the firing flag in each timer, so that a deletion attempt
1308 * that gets the timer lock before we do will give it up and
1309 * spin until we've taken care of that timer below.
1311 spin_unlock(&tsk->sighand->siglock);
1313 read_unlock(&tasklist_lock);
1316 * Now that all the timers on our list have the firing flag,
1317 * noone will touch their list entries but us. We'll take
1318 * each timer's lock before clearing its firing flag, so no
1319 * timer call will interfere.
1321 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1323 spin_lock(&timer->it_lock);
1324 list_del_init(&timer->it.cpu.entry);
1325 firing = timer->it.cpu.firing;
1326 timer->it.cpu.firing = 0;
1328 * The firing flag is -1 if we collided with a reset
1329 * of the timer, which already reported this
1330 * almost-firing as an overrun. So don't generate an event.
1332 if (likely(firing >= 0)) {
1333 cpu_timer_fire(timer);
1335 spin_unlock(&timer->it_lock);
1340 * Set one of the process-wide special case CPU timers.
1341 * The tasklist_lock and tsk->sighand->siglock must be held by the caller.
1342 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is
1343 * absolute; non-null for ITIMER_*, where *newval is relative and we update
1344 * it to be absolute, *oldval is absolute and we update it to be relative.
1346 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1347 cputime_t *newval, cputime_t *oldval)
1349 union cpu_time_count now;
1350 struct list_head *head;
1352 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1353 cpu_clock_sample_group_locked(clock_idx, tsk, &now);
1356 if (!cputime_eq(*oldval, cputime_zero)) {
1357 if (cputime_le(*oldval, now.cpu)) {
1358 /* Just about to fire. */
1359 *oldval = jiffies_to_cputime(1);
1361 *oldval = cputime_sub(*oldval, now.cpu);
1365 if (cputime_eq(*newval, cputime_zero))
1367 *newval = cputime_add(*newval, now.cpu);
1370 * If the RLIMIT_CPU timer will expire before the
1371 * ITIMER_PROF timer, we have nothing else to do.
1373 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1374 < cputime_to_secs(*newval))
1379 * Check whether there are any process timers already set to fire
1380 * before this one. If so, we don't have anything more to do.
1382 head = &tsk->signal->cpu_timers[clock_idx];
1383 if (list_empty(head) ||
1384 cputime_ge(list_entry(head->next,
1385 struct cpu_timer_list, entry)->expires.cpu,
1388 * Rejigger each thread's expiry time so that one will
1389 * notice before we hit the process-cumulative expiry time.
1391 union cpu_time_count expires = { .sched = 0 };
1392 expires.cpu = *newval;
1393 process_timer_rebalance(tsk, clock_idx, expires, now);
1397 static long posix_cpu_clock_nanosleep_restart(struct restart_block *);
1399 int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1400 struct timespec *rqtp, struct timespec __user *rmtp)
1402 struct restart_block *restart_block =
1403 ¤t_thread_info()->restart_block;
1404 struct k_itimer timer;
1408 * Diagnose required errors first.
1410 if (CPUCLOCK_PERTHREAD(which_clock) &&
1411 (CPUCLOCK_PID(which_clock) == 0 ||
1412 CPUCLOCK_PID(which_clock) == current->pid))
1416 * Set up a temporary timer and then wait for it to go off.
1418 memset(&timer, 0, sizeof timer);
1419 spin_lock_init(&timer.it_lock);
1420 timer.it_clock = which_clock;
1421 timer.it_overrun = -1;
1422 error = posix_cpu_timer_create(&timer);
1423 timer.it_process = current;
1425 static struct itimerspec zero_it;
1426 struct itimerspec it = { .it_value = *rqtp,
1427 .it_interval = {} };
1429 spin_lock_irq(&timer.it_lock);
1430 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1432 spin_unlock_irq(&timer.it_lock);
1436 while (!signal_pending(current)) {
1437 if (timer.it.cpu.expires.sched == 0) {
1439 * Our timer fired and was reset.
1441 spin_unlock_irq(&timer.it_lock);
1446 * Block until cpu_timer_fire (or a signal) wakes us.
1448 __set_current_state(TASK_INTERRUPTIBLE);
1449 spin_unlock_irq(&timer.it_lock);
1451 spin_lock_irq(&timer.it_lock);
1455 * We were interrupted by a signal.
1457 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1458 posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1459 spin_unlock_irq(&timer.it_lock);
1461 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1463 * It actually did fire already.
1469 * Report back to the user the time still remaining.
1471 if (rmtp != NULL && !(flags & TIMER_ABSTIME) &&
1472 copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1475 restart_block->fn = posix_cpu_clock_nanosleep_restart;
1476 /* Caller already set restart_block->arg1 */
1477 restart_block->arg0 = which_clock;
1478 restart_block->arg1 = (unsigned long) rmtp;
1479 restart_block->arg2 = rqtp->tv_sec;
1480 restart_block->arg3 = rqtp->tv_nsec;
1482 error = -ERESTART_RESTARTBLOCK;
1489 posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block)
1491 clockid_t which_clock = restart_block->arg0;
1492 struct timespec __user *rmtp;
1495 rmtp = (struct timespec __user *) restart_block->arg1;
1496 t.tv_sec = restart_block->arg2;
1497 t.tv_nsec = restart_block->arg3;
1499 restart_block->fn = do_no_restart_syscall;
1500 return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t, rmtp);
1504 #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1505 #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1507 static int process_cpu_clock_getres(const clockid_t which_clock,
1508 struct timespec *tp)
1510 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1512 static int process_cpu_clock_get(const clockid_t which_clock,
1513 struct timespec *tp)
1515 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1517 static int process_cpu_timer_create(struct k_itimer *timer)
1519 timer->it_clock = PROCESS_CLOCK;
1520 return posix_cpu_timer_create(timer);
1522 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1523 struct timespec *rqtp,
1524 struct timespec __user *rmtp)
1526 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1528 static int thread_cpu_clock_getres(const clockid_t which_clock,
1529 struct timespec *tp)
1531 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1533 static int thread_cpu_clock_get(const clockid_t which_clock,
1534 struct timespec *tp)
1536 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1538 static int thread_cpu_timer_create(struct k_itimer *timer)
1540 timer->it_clock = THREAD_CLOCK;
1541 return posix_cpu_timer_create(timer);
1543 static int thread_cpu_nsleep(const clockid_t which_clock, int flags,
1544 struct timespec *rqtp, struct timespec __user *rmtp)
1549 static __init int init_posix_cpu_timers(void)
1551 struct k_clock process = {
1552 .clock_getres = process_cpu_clock_getres,
1553 .clock_get = process_cpu_clock_get,
1554 .clock_set = do_posix_clock_nosettime,
1555 .timer_create = process_cpu_timer_create,
1556 .nsleep = process_cpu_nsleep,
1558 struct k_clock thread = {
1559 .clock_getres = thread_cpu_clock_getres,
1560 .clock_get = thread_cpu_clock_get,
1561 .clock_set = do_posix_clock_nosettime,
1562 .timer_create = thread_cpu_timer_create,
1563 .nsleep = thread_cpu_nsleep,
1566 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1567 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1571 __initcall(init_posix_cpu_timers);