X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=kernel%2Fposix-timers.c;h=72530f71e10efd9a2caa0f5eb808c7499c3161c7;hb=3ec04f3d2903fdf6d9849a8633af59b8628164a5;hp=663980afbc49670d007ebc7cc44653b604fa2e9d;hpb=8d40237c730b8be87c1b80a5d96b9c603fefa829;p=linux-2.6.git diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 663980afb..72530f71e 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -43,10 +43,8 @@ #include #include #include -#include #include #include -#include #ifndef div_long_long_rem #include @@ -135,10 +133,18 @@ static spinlock_t idr_lock = SPIN_LOCK_UNLOCKED; * resolution. Here we define the standard CLOCK_REALTIME as a * 1/HZ resolution clock. * + * CPUTIME & THREAD_CPUTIME: We are not, at this time, definding these + * two clocks (and the other process related clocks (Std + * 1003.1d-1999). The way these should be supported, we think, + * is to use large negative numbers for the two clocks that are + * pinned to the executing process and to use -pid for clocks + * pinned to particular pids. Calls which supported these clock + * ids would split early in the function. + * * RESOLUTION: Clock resolution is used to round up timer and interval * times, NOT to report clock times, which are reported with as * much resolution as the system can muster. In some cases this - * resolution may depend on the underlying clock hardware and + * resolution may depend on the underlaying clock hardware and * may not be quantifiable until run time, and only then is the * necessary code is written. The standard says we should say * something about this issue in the documentation... @@ -156,7 +162,7 @@ static spinlock_t idr_lock = SPIN_LOCK_UNLOCKED; * * At this time all functions EXCEPT clock_nanosleep can be * redirected by the CLOCKS structure. Clock_nanosleep is in - * there, but the code ignores it. + * there, but the code ignors it. * * Permissions: It is assumed that the clock_settime() function defined * for each clock will take care of permission checks. Some @@ -186,10 +192,12 @@ static struct k_clock_abs abs_list = {.list = LIST_HEAD_INIT(abs_list.list), #define p_timer_del(clock,a) \ if_clock_do((clock)->timer_del, do_timer_delete, (a)) +void register_posix_clock(int clock_id, struct k_clock *new_clock); static int do_posix_gettime(struct k_clock *clock, struct timespec *tp); static u64 do_posix_clock_monotonic_gettime_parts( struct timespec *tp, struct timespec *mo); int do_posix_clock_monotonic_gettime(struct timespec *tp); +int do_posix_clock_monotonic_settime(struct timespec *tp); static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) @@ -208,7 +216,7 @@ static __init int init_posix_timers(void) struct k_clock clock_monotonic = {.res = CLOCK_REALTIME_RES, .abs_struct = NULL, .clock_get = do_posix_clock_monotonic_gettime, - .clock_set = do_posix_clock_nosettime + .clock_set = do_posix_clock_monotonic_settime }; register_posix_clock(CLOCK_REALTIME, &clock_realtime); @@ -363,10 +371,32 @@ exit: unlock_timer(timr, flags); } -int posix_timer_event(struct k_itimer *timr,int si_private) +/* + * Notify the task and set up the timer for the next expiration (if + * applicable). This function requires that the k_itimer structure + * it_lock is taken. This code will requeue the timer only if we get + * either an error return or a flag (ret > 0) from send_seg_info + * indicating that the signal was either not queued or was queued + * without an info block. In this case, we will not get a call back to + * do_schedule_next_timer() so we do it here. This should be rare... + + * An interesting problem can occur if, while a signal, and thus a call + * back is pending, the timer is rearmed, i.e. stopped and restarted. + * We then need to sort out the call back and do the right thing. What + * we do is to put a counter in the info block and match it with the + * timers copy on the call back. If they don't match, we just ignore + * the call back. The counter is local to the timer and we use odd to + * indicate a call back is pending. Note that we do allow the timer to + * be deleted while a signal is pending. The standard says we can + * allow that signal to be delivered, and we do. + */ + +static void timer_notify_task(struct k_itimer *timr) { + int ret; + memset(&timr->sigq->info, 0, sizeof(siginfo_t)); - timr->sigq->info.si_sys_private = si_private; + /* * Send signal to the process that owns this timer. @@ -381,6 +411,12 @@ int posix_timer_event(struct k_itimer *timr,int si_private) timr->sigq->info.si_code = SI_TIMER; timr->sigq->info.si_tid = timr->it_id; timr->sigq->info.si_value = timr->it_sigev_value; + if (timr->it_incr) + timr->sigq->info.si_sys_private = ++timr->it_requeue_pending; + else { + remove_from_abslist(timr); + } + if (timr->it_sigev_notify & SIGEV_THREAD_ID) { if (unlikely(timr->it_process->flags & PF_EXITING)) { timr->it_sigev_notify = SIGEV_SIGNAL; @@ -388,20 +424,28 @@ int posix_timer_event(struct k_itimer *timr,int si_private) timr->it_process = timr->it_process->group_leader; goto group; } - return send_sigqueue(timr->it_sigev_signo, timr->sigq, + ret = send_sigqueue(timr->it_sigev_signo, timr->sigq, timr->it_process); } else { group: - return send_group_sigqueue(timr->it_sigev_signo, timr->sigq, + ret = send_group_sigqueue(timr->it_sigev_signo, timr->sigq, timr->it_process); } + if (ret) { + /* + * signal was not sent because of sig_ignor + * we will not get a call back to restart it AND + * it should be restarted. + */ + schedule_next_timer(timr); + } } /* * This function gets called when a POSIX.1b interval timer expires. It * is used as a callback from the kernel internal timer. The - * run_timer_list code ALWAYS calls with interrupts on. + * run_timer_list code ALWAYS calls with interrutps on. * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. */ @@ -444,23 +488,8 @@ static void posix_timer_fn(unsigned long __data) spin_unlock(&abs_list.lock); } - if (do_notify) { - int si_private=0; - - if (timr->it_incr) - si_private = ++timr->it_requeue_pending; - else { - remove_from_abslist(timr); - } - - if (posix_timer_event(timr, si_private)) - /* - * signal was not sent because of sig_ignor - * we will not get a call back to restart it AND - * it should be restarted. - */ - schedule_next_timer(timr); - } + if (do_notify) + timer_notify_task(timr); unlock_timer(timr, flags); /* hold thru abs lock to keep irq off */ } @@ -574,17 +603,11 @@ sys_timer_create(clockid_t which_clock, new_timer->it_clock = which_clock; new_timer->it_incr = 0; new_timer->it_overrun = -1; - if (posix_clocks[which_clock].timer_create) { - error = posix_clocks[which_clock].timer_create(new_timer); - if (error) - goto out; - } else { - init_timer(&new_timer->it_timer); - new_timer->it_timer.expires = 0; - new_timer->it_timer.data = (unsigned long) new_timer; - new_timer->it_timer.function = posix_timer_fn; - set_timer_inactive(new_timer); - } + init_timer(&new_timer->it_timer); + new_timer->it_timer.expires = 0; + new_timer->it_timer.data = (unsigned long) new_timer; + new_timer->it_timer.function = posix_timer_fn; + set_timer_inactive(new_timer); /* * return the timer_id now. The next step is hard to @@ -1194,25 +1217,11 @@ int do_posix_clock_monotonic_gettime(struct timespec *tp) return 0; } -int do_posix_clock_nosettime(struct timespec *tp) -{ - return -EINVAL; -} - -int do_posix_clock_notimer_create(struct k_itimer *timer) +int do_posix_clock_monotonic_settime(struct timespec *tp) { return -EINVAL; } -int do_posix_clock_nonanosleep(int which_clock, int flags, struct timespec *t) -{ -#ifndef ENOTSUP - return -EOPNOTSUPP; /* aka ENOTSUP in userland for POSIX */ -#else /* parisc does define it separately. */ - return -ENOTSUP; -#endif -} - asmlinkage long sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp) { @@ -1229,23 +1238,19 @@ sys_clock_settime(clockid_t which_clock, const struct timespec __user *tp) return do_sys_settimeofday(&new_tp, NULL); } -static int do_clock_gettime(clockid_t which_clock, struct timespec *tp) +asmlinkage long +sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp) { + struct timespec rtn_tp; + int error = 0; + if ((unsigned) which_clock >= MAX_CLOCKS || !posix_clocks[which_clock].res) return -EINVAL; - return do_posix_gettime(&posix_clocks[which_clock], tp); -} - -asmlinkage long -sys_clock_gettime(clockid_t which_clock, struct timespec __user *tp) -{ - struct timespec kernel_tp; - int error; + error = do_posix_gettime(&posix_clocks[which_clock], &rtn_tp); - error = do_clock_gettime(which_clock, &kernel_tp); - if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp))) + if (!error && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp))) error = -EFAULT; return error; @@ -1403,10 +1408,7 @@ sys_clock_nanosleep(clockid_t which_clock, int flags, if ((unsigned) t.tv_nsec >= NSEC_PER_SEC || t.tv_sec < 0) return -EINVAL; - if (posix_clocks[which_clock].nsleep) - ret = posix_clocks[which_clock].nsleep(which_clock, flags, &t); - else - ret = do_clock_nanosleep(which_clock, flags, &t); + ret = do_clock_nanosleep(which_clock, flags, &t); /* * Do this here as do_clock_nanosleep does not have the real address */