2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <asm/param.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
28 #include <asm/siginfo.h>
30 extern void k_getrusage(struct task_struct *, int, struct rusage *);
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 #define sig_avoid_stop_race() \
157 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
158 sigtestsetmask(¤t->signal->shared_pending.signal, \
159 M(SIGCONT) | M(SIGKILL)))
161 static int sig_ignored(struct task_struct *t, int sig)
163 void __user * handler;
166 * Tracers always want to know about signals..
168 if (t->ptrace & PT_PTRACED)
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
176 if (sigismember(&t->blocked, sig))
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
194 switch (_NSIG_WORDS) {
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
219 if (t->signal->group_stop_count > 0 ||
220 PENDING(&t->pending, &t->blocked) ||
221 PENDING(&t->signal->shared_pending, &t->blocked))
222 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 clear_tsk_thread_flag(t, TIF_SIGPENDING);
227 void recalc_sigpending(void)
229 recalc_sigpending_tsk(current);
232 /* Given the mask, find the first available signal that should be serviced. */
235 next_signal(struct sigpending *pending, sigset_t *mask)
237 unsigned long i, *s, *m, x;
240 s = pending->signal.sig;
242 switch (_NSIG_WORDS) {
244 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
245 if ((x = *s &~ *m) != 0) {
246 sig = ffz(~x) + i*_NSIG_BPW + 1;
251 case 2: if ((x = s[0] &~ m[0]) != 0)
253 else if ((x = s[1] &~ m[1]) != 0)
260 case 1: if ((x = *s &~ *m) != 0)
268 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags)
270 struct sigqueue *q = NULL;
272 if (atomic_read(&t->user->sigpending) <
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
276 INIT_LIST_HEAD(&q->list);
279 q->user = get_uid(t->user);
280 atomic_inc(&q->user->sigpending);
285 static inline void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
289 atomic_dec(&q->user->sigpending);
291 kmem_cache_free(sigqueue_cachep, q);
294 static void flush_sigqueue(struct sigpending *queue)
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
307 * Flush all pending signals for a task.
311 flush_signals(struct task_struct *t)
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
323 * This function expects the tasklist_lock write-locked.
325 void __exit_sighand(struct task_struct *tsk)
327 struct sighand_struct * sighand = tsk->sighand;
329 /* Ok, we're done with the signal handlers */
331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand);
335 void exit_sighand(struct task_struct *tsk)
337 write_lock_irq(&tasklist_lock);
339 write_unlock_irq(&tasklist_lock);
343 * This function expects the tasklist_lock write-locked.
345 void __exit_signal(struct task_struct *tsk)
347 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand;
352 if (!atomic_read(&sig->count))
354 spin_lock(&sighand->siglock);
355 if (atomic_dec_and_test(&sig->count)) {
356 if (tsk == sig->curr_target)
357 sig->curr_target = next_thread(tsk);
359 spin_unlock(&sighand->siglock);
360 flush_sigqueue(&sig->shared_pending);
363 * If there is any task waiting for the group exit
366 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
367 wake_up_process(sig->group_exit_task);
368 sig->group_exit_task = NULL;
370 if (tsk == sig->curr_target)
371 sig->curr_target = next_thread(tsk);
374 * Accumulate here the counters for all threads but the
375 * group leader as they die, so they can be added into
376 * the process-wide totals when those are taken.
377 * The group leader stays around as a zombie as long
378 * as there are other threads. When it gets reaped,
379 * the exit.c code will add its counts into these totals.
380 * We won't ever get here for the group leader, since it
381 * will have been the last reference on the signal_struct.
383 sig->utime += tsk->utime;
384 sig->stime += tsk->stime;
385 sig->min_flt += tsk->min_flt;
386 sig->maj_flt += tsk->maj_flt;
387 sig->nvcsw += tsk->nvcsw;
388 sig->nivcsw += tsk->nivcsw;
389 spin_unlock(&sighand->siglock);
390 sig = NULL; /* Marker for below. */
392 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
393 flush_sigqueue(&tsk->pending);
396 * We are cleaning up the signal_struct here. We delayed
397 * calling exit_itimers until after flush_sigqueue, just in
398 * case our thread-local pending queue contained a queued
399 * timer signal that would have been cleared in
400 * exit_itimers. When that called sigqueue_free, it would
401 * attempt to re-take the tasklist_lock and deadlock. This
402 * can never happen if we ensure that all queues the
403 * timer's signal might be queued on have been flushed
404 * first. The shared_pending queue, and our own pending
405 * queue are the only queues the timer could be on, since
406 * there are no other threads left in the group and timer
407 * signals are constrained to threads inside the group.
410 kmem_cache_free(signal_cachep, sig);
414 void exit_signal(struct task_struct *tsk)
416 write_lock_irq(&tasklist_lock);
418 write_unlock_irq(&tasklist_lock);
422 * Flush all handlers for a task.
426 flush_signal_handlers(struct task_struct *t, int force_default)
429 struct k_sigaction *ka = &t->sighand->action[0];
430 for (i = _NSIG ; i != 0 ; i--) {
431 if (force_default || ka->sa.sa_handler != SIG_IGN)
432 ka->sa.sa_handler = SIG_DFL;
434 sigemptyset(&ka->sa.sa_mask);
440 /* Notify the system that a driver wants to block all signals for this
441 * process, and wants to be notified if any signals at all were to be
442 * sent/acted upon. If the notifier routine returns non-zero, then the
443 * signal will be acted upon after all. If the notifier routine returns 0,
444 * then then signal will be blocked. Only one block per process is
445 * allowed. priv is a pointer to private data that the notifier routine
446 * can use to determine if the signal should be blocked or not. */
449 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
453 spin_lock_irqsave(¤t->sighand->siglock, flags);
454 current->notifier_mask = mask;
455 current->notifier_data = priv;
456 current->notifier = notifier;
457 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
460 /* Notify the system that blocking has ended. */
463 unblock_all_signals(void)
467 spin_lock_irqsave(¤t->sighand->siglock, flags);
468 current->notifier = NULL;
469 current->notifier_data = NULL;
471 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
474 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
476 struct sigqueue *q, *first = NULL;
477 int still_pending = 0;
479 if (unlikely(!sigismember(&list->signal, sig)))
483 * Collect the siginfo appropriate to this signal. Check if
484 * there is another siginfo for the same signal.
486 list_for_each_entry(q, &list->list, list) {
487 if (q->info.si_signo == sig) {
496 list_del_init(&first->list);
497 copy_siginfo(info, &first->info);
498 __sigqueue_free(first);
500 sigdelset(&list->signal, sig);
503 /* Ok, it wasn't in the queue. This must be
504 a fast-pathed signal or we must have been
505 out of queue space. So zero out the info.
507 sigdelset(&list->signal, sig);
508 info->si_signo = sig;
517 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
522 sig = next_signal(pending, mask);
524 if (current->notifier) {
525 if (sigismember(current->notifier_mask, sig)) {
526 if (!(current->notifier)(current->notifier_data)) {
527 clear_thread_flag(TIF_SIGPENDING);
533 if (!collect_signal(sig, pending, info))
543 * Dequeue a signal and return the element to the caller, which is
544 * expected to free it.
546 * All callers have to hold the siglock.
548 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
550 int signr = __dequeue_signal(&tsk->pending, mask, info);
552 signr = __dequeue_signal(&tsk->signal->shared_pending,
555 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
556 info->si_sys_private){
557 do_schedule_next_timer(info);
563 * Tell a process that it has a new active signal..
565 * NOTE! we rely on the previous spin_lock to
566 * lock interrupts for us! We can only be called with
567 * "siglock" held, and the local interrupt must
568 * have been disabled when that got acquired!
570 * No need to set need_resched since signal event passing
571 * goes through ->blocked
573 void signal_wake_up(struct task_struct *t, int resume)
577 set_tsk_thread_flag(t, TIF_SIGPENDING);
580 * If resume is set, we want to wake it up in the TASK_STOPPED case.
581 * We don't check for TASK_STOPPED because there is a race with it
582 * executing another processor and just now entering stopped state.
583 * By calling wake_up_process any time resume is set, we ensure
584 * the process will wake up and handle its stop or death signal.
586 mask = TASK_INTERRUPTIBLE;
588 mask |= TASK_STOPPED;
589 if (!wake_up_state(t, mask))
594 * Remove signals in mask from the pending set and queue.
595 * Returns 1 if any signals were found.
597 * All callers must be holding the siglock.
599 static int rm_from_queue(unsigned long mask, struct sigpending *s)
601 struct sigqueue *q, *n;
603 if (!sigtestsetmask(&s->signal, mask))
606 sigdelsetmask(&s->signal, mask);
607 list_for_each_entry_safe(q, n, &s->list, list) {
608 if (q->info.si_signo < SIGRTMIN &&
609 (mask & sigmask(q->info.si_signo))) {
610 list_del_init(&q->list);
618 * Bad permissions for sending the signal
620 static int check_kill_permission(int sig, struct siginfo *info,
621 struct task_struct *t)
624 if (sig < 0 || sig > _NSIG)
627 if ((!info || ((unsigned long)info != 1 &&
628 (unsigned long)info != 2 && SI_FROMUSER(info)))
629 && ((sig != SIGCONT) ||
630 (current->signal->session != t->signal->session))
631 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
632 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
633 && !capable(CAP_KILL))
635 return security_task_kill(t, info, sig);
639 static void do_notify_parent_cldstop(struct task_struct *tsk,
640 struct task_struct *parent,
644 * Handle magic process-wide effects of stop/continue signals.
645 * Unlike the signal actions, these happen immediately at signal-generation
646 * time regardless of blocking, ignoring, or handling. This does the
647 * actual continuing for SIGCONT, but not the actual stopping for stop
648 * signals. The process stop is done as a signal action for SIG_DFL.
650 static void handle_stop_signal(int sig, struct task_struct *p)
652 struct task_struct *t;
654 if (sig_kernel_stop(sig)) {
656 * This is a stop signal. Remove SIGCONT from all queues.
658 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
661 rm_from_queue(sigmask(SIGCONT), &t->pending);
664 } else if (sig == SIGCONT) {
666 * Remove all stop signals from all queues,
667 * and wake all threads.
669 if (unlikely(p->signal->group_stop_count > 0)) {
671 * There was a group stop in progress. We'll
672 * pretend it finished before we got here. We are
673 * obliged to report it to the parent: if the
674 * SIGSTOP happened "after" this SIGCONT, then it
675 * would have cleared this pending SIGCONT. If it
676 * happened "before" this SIGCONT, then the parent
677 * got the SIGCHLD about the stop finishing before
678 * the continue happened. We do the notification
679 * now, and it's as if the stop had finished and
680 * the SIGCHLD was pending on entry to this kill.
682 p->signal->group_stop_count = 0;
683 p->signal->stop_state = 1;
684 spin_unlock(&p->sighand->siglock);
685 if (p->ptrace & PT_PTRACED)
686 do_notify_parent_cldstop(p, p->parent,
689 do_notify_parent_cldstop(
691 p->group_leader->real_parent,
693 spin_lock(&p->sighand->siglock);
695 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
699 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
702 * If there is a handler for SIGCONT, we must make
703 * sure that no thread returns to user mode before
704 * we post the signal, in case it was the only
705 * thread eligible to run the signal handler--then
706 * it must not do anything between resuming and
707 * running the handler. With the TIF_SIGPENDING
708 * flag set, the thread will pause and acquire the
709 * siglock that we hold now and until we've queued
710 * the pending signal.
712 * Wake up the stopped thread _after_ setting
715 state = TASK_STOPPED;
716 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
717 set_tsk_thread_flag(t, TIF_SIGPENDING);
718 state |= TASK_INTERRUPTIBLE;
720 wake_up_state(t, state);
725 if (p->signal->stop_state > 0) {
727 * We were in fact stopped, and are now continued.
728 * Notify the parent with CLD_CONTINUED.
730 p->signal->stop_state = -1;
731 p->signal->group_exit_code = 0;
732 spin_unlock(&p->sighand->siglock);
733 if (p->ptrace & PT_PTRACED)
734 do_notify_parent_cldstop(p, p->parent,
737 do_notify_parent_cldstop(
739 p->group_leader->real_parent,
741 spin_lock(&p->sighand->siglock);
746 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
747 struct sigpending *signals)
749 struct sigqueue * q = NULL;
753 * fast-pathed signals for kernel-internal things like SIGSTOP
756 if ((unsigned long)info == 2)
759 /* Real-time signals must be queued if sent by sigqueue, or
760 some other real-time mechanism. It is implementation
761 defined whether kill() does so. We attempt to do so, on
762 the principle of least surprise, but since kill is not
763 allowed to fail with EAGAIN when low on memory we just
764 make sure at least one signal gets delivered and don't
765 pass on the info struct. */
767 q = __sigqueue_alloc(t, GFP_ATOMIC);
769 list_add_tail(&q->list, &signals->list);
770 switch ((unsigned long) info) {
772 q->info.si_signo = sig;
773 q->info.si_errno = 0;
774 q->info.si_code = SI_USER;
775 q->info.si_pid = current->pid;
776 q->info.si_uid = current->uid;
779 q->info.si_signo = sig;
780 q->info.si_errno = 0;
781 q->info.si_code = SI_KERNEL;
786 copy_siginfo(&q->info, info);
790 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
791 && info->si_code != SI_USER)
793 * Queue overflow, abort. We may abort if the signal was rt
794 * and sent by user using something other than kill().
797 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
799 * Set up a return to indicate that we dropped
802 ret = info->si_sys_private;
806 sigaddset(&signals->signal, sig);
810 #define LEGACY_QUEUE(sigptr, sig) \
811 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
815 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
819 if (!irqs_disabled())
822 if (!spin_is_locked(&t->sighand->siglock))
826 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
828 * Set up a return to indicate that we dropped the signal.
830 ret = info->si_sys_private;
832 /* Short-circuit ignored signals. */
833 if (sig_ignored(t, sig))
836 /* Support queueing exactly one non-rt signal, so that we
837 can get more detailed information about the cause of
839 if (LEGACY_QUEUE(&t->pending, sig))
842 ret = send_signal(sig, info, t, &t->pending);
843 if (!ret && !sigismember(&t->blocked, sig))
844 signal_wake_up(t, sig == SIGKILL);
850 * Force a signal that the process can't ignore: if necessary
851 * we unblock the signal and change any SIG_IGN to SIG_DFL.
855 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
857 unsigned long int flags;
860 spin_lock_irqsave(&t->sighand->siglock, flags);
861 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
862 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
863 sigdelset(&t->blocked, sig);
864 recalc_sigpending_tsk(t);
866 ret = specific_send_sig_info(sig, info, t);
867 spin_unlock_irqrestore(&t->sighand->siglock, flags);
873 force_sig_specific(int sig, struct task_struct *t)
875 unsigned long int flags;
877 spin_lock_irqsave(&t->sighand->siglock, flags);
878 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
879 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
880 sigdelset(&t->blocked, sig);
881 recalc_sigpending_tsk(t);
882 specific_send_sig_info(sig, (void *)2, t);
883 spin_unlock_irqrestore(&t->sighand->siglock, flags);
887 * Test if P wants to take SIG. After we've checked all threads with this,
888 * it's equivalent to finding no threads not blocking SIG. Any threads not
889 * blocking SIG were ruled out because they are not running and already
890 * have pending signals. Such threads will dequeue from the shared queue
891 * as soon as they're available, so putting the signal on the shared queue
892 * will be equivalent to sending it to one such thread.
894 #define wants_signal(sig, p, mask) \
895 (!sigismember(&(p)->blocked, sig) \
896 && !((p)->state & mask) \
897 && !((p)->flags & PF_EXITING) \
898 && (task_curr(p) || !signal_pending(p)))
902 __group_complete_signal(int sig, struct task_struct *p)
905 struct task_struct *t;
908 * Don't bother zombies and stopped tasks (but
909 * SIGKILL will punch through stopped state)
911 mask = EXIT_DEAD | EXIT_ZOMBIE | TASK_TRACED;
913 mask |= TASK_STOPPED;
916 * Now find a thread we can wake up to take the signal off the queue.
918 * If the main thread wants the signal, it gets first crack.
919 * Probably the least surprising to the average bear.
921 if (wants_signal(sig, p, mask))
923 else if (thread_group_empty(p))
925 * There is just one thread and it does not need to be woken.
926 * It will dequeue unblocked signals before it runs again.
931 * Otherwise try to find a suitable thread.
933 t = p->signal->curr_target;
935 /* restart balancing at this thread */
936 t = p->signal->curr_target = p;
937 BUG_ON(t->tgid != p->tgid);
939 while (!wants_signal(sig, t, mask)) {
941 if (t == p->signal->curr_target)
943 * No thread needs to be woken.
944 * Any eligible threads will see
945 * the signal in the queue soon.
949 p->signal->curr_target = t;
953 * Found a killable thread. If the signal will be fatal,
954 * then start taking the whole group down immediately.
956 if (sig_fatal(p, sig) && !p->signal->group_exit &&
957 !sigismember(&t->real_blocked, sig) &&
958 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
960 * This signal will be fatal to the whole group.
962 if (!sig_kernel_coredump(sig)) {
964 * Start a group exit and wake everybody up.
965 * This way we don't have other threads
966 * running and doing things after a slower
967 * thread has the fatal signal pending.
969 p->signal->group_exit = 1;
970 p->signal->group_exit_code = sig;
971 p->signal->group_stop_count = 0;
974 sigaddset(&t->pending.signal, SIGKILL);
975 signal_wake_up(t, 1);
982 * There will be a core dump. We make all threads other
983 * than the chosen one go into a group stop so that nothing
984 * happens until it gets scheduled, takes the signal off
985 * the shared queue, and does the core dump. This is a
986 * little more complicated than strictly necessary, but it
987 * keeps the signal state that winds up in the core dump
988 * unchanged from the death state, e.g. which thread had
989 * the core-dump signal unblocked.
991 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
992 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
993 p->signal->group_stop_count = 0;
994 p->signal->group_exit_task = t;
997 p->signal->group_stop_count++;
998 signal_wake_up(t, 0);
1001 wake_up_process(p->signal->group_exit_task);
1006 * The signal is already in the shared-pending queue.
1007 * Tell the chosen thread to wake up and dequeue it.
1009 signal_wake_up(t, sig == SIGKILL);
1014 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1019 if (!spin_is_locked(&p->sighand->siglock))
1022 handle_stop_signal(sig, p);
1024 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1026 * Set up a return to indicate that we dropped the signal.
1028 ret = info->si_sys_private;
1030 /* Short-circuit ignored signals. */
1031 if (sig_ignored(p, sig))
1034 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1035 /* This is a non-RT signal and we already have one queued. */
1039 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1040 * We always use the shared queue for process-wide signals,
1041 * to avoid several races.
1043 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1047 __group_complete_signal(sig, p);
1052 * Nuke all other threads in the group.
1054 void zap_other_threads(struct task_struct *p)
1056 struct task_struct *t;
1058 p->signal->group_stop_count = 0;
1060 if (thread_group_empty(p))
1063 for (t = next_thread(p); t != p; t = next_thread(t)) {
1065 * Don't bother with already dead threads
1067 if (t->exit_state & (EXIT_ZOMBIE|EXIT_DEAD))
1071 * We don't want to notify the parent, since we are
1072 * killed as part of a thread group due to another
1073 * thread doing an execve() or similar. So set the
1074 * exit signal to -1 to allow immediate reaping of
1075 * the process. But don't detach the thread group
1078 if (t != p->group_leader)
1079 t->exit_signal = -1;
1081 sigaddset(&t->pending.signal, SIGKILL);
1082 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1083 signal_wake_up(t, 1);
1088 * Must be called with the tasklist_lock held for reading!
1090 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1092 unsigned long flags;
1095 ret = check_kill_permission(sig, info, p);
1096 if (!ret && sig && p->sighand) {
1097 spin_lock_irqsave(&p->sighand->siglock, flags);
1098 ret = __group_send_sig_info(sig, info, p);
1099 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1106 * kill_pg_info() sends a signal to a process group: this is what the tty
1107 * control characters do (^C, ^Z etc)
1110 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1112 struct task_struct *p = NULL;
1113 int retval, success;
1120 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1121 int err = group_send_sig_info(sig, info, p);
1124 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1125 return success ? 0 : retval;
1129 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1133 read_lock(&tasklist_lock);
1134 retval = __kill_pg_info(sig, info, pgrp);
1135 read_unlock(&tasklist_lock);
1141 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1144 struct task_struct *p;
1146 read_lock(&tasklist_lock);
1147 p = find_task_by_pid(pid);
1150 error = group_send_sig_info(sig, info, p);
1151 read_unlock(&tasklist_lock);
1157 * kill_something_info() interprets pid in interesting ways just like kill(2).
1159 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1160 * is probably wrong. Should make it like BSD or SYSV.
1163 static int kill_something_info(int sig, struct siginfo *info, int pid)
1166 return kill_pg_info(sig, info, process_group(current));
1167 } else if (pid == -1) {
1168 int retval = 0, count = 0;
1169 struct task_struct * p;
1171 read_lock(&tasklist_lock);
1172 for_each_process(p) {
1173 if (p->pid > 1 && p->tgid != current->tgid) {
1174 int err = group_send_sig_info(sig, info, p);
1180 read_unlock(&tasklist_lock);
1181 return count ? retval : -ESRCH;
1182 } else if (pid < 0) {
1183 return kill_pg_info(sig, info, -pid);
1185 return kill_proc_info(sig, info, pid);
1190 * These are for backward compatibility with the rest of the kernel source.
1194 * These two are the most common entry points. They send a signal
1195 * just to the specific thread.
1198 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1201 unsigned long flags;
1204 * Make sure legacy kernel users don't send in bad values
1205 * (normal paths check this in check_kill_permission).
1207 if (sig < 0 || sig > _NSIG)
1211 * We need the tasklist lock even for the specific
1212 * thread case (when we don't need to follow the group
1213 * lists) in order to avoid races with "p->sighand"
1214 * going away or changing from under us.
1216 read_lock(&tasklist_lock);
1217 spin_lock_irqsave(&p->sighand->siglock, flags);
1218 ret = specific_send_sig_info(sig, info, p);
1219 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1220 read_unlock(&tasklist_lock);
1225 send_sig(int sig, struct task_struct *p, int priv)
1227 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1231 * This is the entry point for "process-wide" signals.
1232 * They will go to an appropriate thread in the thread group.
1235 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1238 read_lock(&tasklist_lock);
1239 ret = group_send_sig_info(sig, info, p);
1240 read_unlock(&tasklist_lock);
1245 force_sig(int sig, struct task_struct *p)
1247 force_sig_info(sig, (void*)1L, p);
1251 * When things go south during signal handling, we
1252 * will force a SIGSEGV. And if the signal that caused
1253 * the problem was already a SIGSEGV, we'll want to
1254 * make sure we don't even try to deliver the signal..
1257 force_sigsegv(int sig, struct task_struct *p)
1259 if (sig == SIGSEGV) {
1260 unsigned long flags;
1261 spin_lock_irqsave(&p->sighand->siglock, flags);
1262 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1263 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1265 force_sig(SIGSEGV, p);
1270 kill_pg(pid_t pgrp, int sig, int priv)
1272 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1276 kill_proc(pid_t pid, int sig, int priv)
1278 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1282 * These functions support sending signals using preallocated sigqueue
1283 * structures. This is needed "because realtime applications cannot
1284 * afford to lose notifications of asynchronous events, like timer
1285 * expirations or I/O completions". In the case of Posix Timers
1286 * we allocate the sigqueue structure from the timer_create. If this
1287 * allocation fails we are able to report the failure to the application
1288 * with an EAGAIN error.
1291 struct sigqueue *sigqueue_alloc(void)
1295 if ((q = __sigqueue_alloc(current, GFP_KERNEL)))
1296 q->flags |= SIGQUEUE_PREALLOC;
1300 void sigqueue_free(struct sigqueue *q)
1302 unsigned long flags;
1303 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1305 * If the signal is still pending remove it from the
1308 if (unlikely(!list_empty(&q->list))) {
1309 read_lock(&tasklist_lock);
1310 spin_lock_irqsave(q->lock, flags);
1311 if (!list_empty(&q->list))
1312 list_del_init(&q->list);
1313 spin_unlock_irqrestore(q->lock, flags);
1314 read_unlock(&tasklist_lock);
1316 q->flags &= ~SIGQUEUE_PREALLOC;
1321 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1323 unsigned long flags;
1327 * We need the tasklist lock even for the specific
1328 * thread case (when we don't need to follow the group
1329 * lists) in order to avoid races with "p->sighand"
1330 * going away or changing from under us.
1332 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1333 read_lock(&tasklist_lock);
1334 spin_lock_irqsave(&p->sighand->siglock, flags);
1336 if (unlikely(!list_empty(&q->list))) {
1338 * If an SI_TIMER entry is already queue just increment
1339 * the overrun count.
1341 if (q->info.si_code != SI_TIMER)
1343 q->info.si_overrun++;
1346 /* Short-circuit ignored signals. */
1347 if (sig_ignored(p, sig)) {
1352 q->lock = &p->sighand->siglock;
1353 list_add_tail(&q->list, &p->pending.list);
1354 sigaddset(&p->pending.signal, sig);
1355 if (!sigismember(&p->blocked, sig))
1356 signal_wake_up(p, sig == SIGKILL);
1359 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1360 read_unlock(&tasklist_lock);
1365 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1367 unsigned long flags;
1370 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1371 read_lock(&tasklist_lock);
1372 spin_lock_irqsave(&p->sighand->siglock, flags);
1373 handle_stop_signal(sig, p);
1375 /* Short-circuit ignored signals. */
1376 if (sig_ignored(p, sig)) {
1381 if (unlikely(!list_empty(&q->list))) {
1383 * If an SI_TIMER entry is already queue just increment
1384 * the overrun count. Other uses should not try to
1385 * send the signal multiple times.
1387 if (q->info.si_code != SI_TIMER)
1389 q->info.si_overrun++;
1394 * Put this signal on the shared-pending queue.
1395 * We always use the shared queue for process-wide signals,
1396 * to avoid several races.
1398 q->lock = &p->sighand->siglock;
1399 list_add_tail(&q->list, &p->signal->shared_pending.list);
1400 sigaddset(&p->signal->shared_pending.signal, sig);
1402 __group_complete_signal(sig, p);
1404 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1405 read_unlock(&tasklist_lock);
1410 * Joy. Or not. Pthread wants us to wake up every thread
1411 * in our parent group.
1413 static void __wake_up_parent(struct task_struct *p,
1414 struct task_struct *parent)
1416 struct task_struct *tsk = parent;
1419 * Fortunately this is not necessary for thread groups:
1421 if (p->tgid == tsk->tgid) {
1422 wake_up_interruptible_sync(&tsk->wait_chldexit);
1427 wake_up_interruptible_sync(&tsk->wait_chldexit);
1428 tsk = next_thread(tsk);
1429 if (tsk->signal != parent->signal)
1431 } while (tsk != parent);
1435 * Let a parent know about the death of a child.
1436 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1439 void do_notify_parent(struct task_struct *tsk, int sig)
1441 struct siginfo info;
1442 unsigned long flags;
1443 struct sighand_struct *psig;
1448 /* do_notify_parent_cldstop should have been called instead. */
1449 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1451 BUG_ON(!tsk->ptrace &&
1452 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1454 info.si_signo = sig;
1456 info.si_pid = tsk->pid;
1457 info.si_uid = tsk->uid;
1459 /* FIXME: find out whether or not this is supposed to be c*time. */
1460 info.si_utime = tsk->utime + tsk->signal->utime;
1461 info.si_stime = tsk->stime + tsk->signal->stime;
1463 info.si_status = tsk->exit_code & 0x7f;
1464 if (tsk->exit_code & 0x80)
1465 info.si_code = CLD_DUMPED;
1466 else if (tsk->exit_code & 0x7f)
1467 info.si_code = CLD_KILLED;
1469 info.si_code = CLD_EXITED;
1470 info.si_status = tsk->exit_code >> 8;
1473 psig = tsk->parent->sighand;
1474 spin_lock_irqsave(&psig->siglock, flags);
1475 if (sig == SIGCHLD &&
1476 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1477 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1479 * We are exiting and our parent doesn't care. POSIX.1
1480 * defines special semantics for setting SIGCHLD to SIG_IGN
1481 * or setting the SA_NOCLDWAIT flag: we should be reaped
1482 * automatically and not left for our parent's wait4 call.
1483 * Rather than having the parent do it as a magic kind of
1484 * signal handler, we just set this to tell do_exit that we
1485 * can be cleaned up without becoming a zombie. Note that
1486 * we still call __wake_up_parent in this case, because a
1487 * blocked sys_wait4 might now return -ECHILD.
1489 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1490 * is implementation-defined: we do (if you don't want
1491 * it, just use SIG_IGN instead).
1493 tsk->exit_signal = -1;
1494 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1497 if (sig > 0 && sig <= _NSIG)
1498 __group_send_sig_info(sig, &info, tsk->parent);
1499 __wake_up_parent(tsk, tsk->parent);
1500 spin_unlock_irqrestore(&psig->siglock, flags);
1504 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1507 struct siginfo info;
1508 unsigned long flags;
1509 struct sighand_struct *sighand;
1511 info.si_signo = SIGCHLD;
1513 info.si_pid = tsk->pid;
1514 info.si_uid = tsk->uid;
1516 /* FIXME: find out whether or not this is supposed to be c*time. */
1517 info.si_utime = tsk->utime;
1518 info.si_stime = tsk->stime;
1523 info.si_status = SIGCONT;
1526 info.si_status = tsk->signal->group_exit_code & 0x7f;
1529 info.si_status = tsk->exit_code & 0x7f;
1535 sighand = parent->sighand;
1536 spin_lock_irqsave(&sighand->siglock, flags);
1537 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1538 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1539 __group_send_sig_info(SIGCHLD, &info, parent);
1541 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1543 __wake_up_parent(tsk, parent);
1544 spin_unlock_irqrestore(&sighand->siglock, flags);
1548 * This must be called with current->sighand->siglock held.
1550 * This should be the path for all ptrace stops.
1551 * We always set current->last_siginfo while stopped here.
1552 * That makes it a way to test a stopped process for
1553 * being ptrace-stopped vs being job-control-stopped.
1555 static void ptrace_stop(int exit_code, siginfo_t *info)
1557 BUG_ON(!(current->ptrace & PT_PTRACED));
1560 * If there is a group stop in progress,
1561 * we must participate in the bookkeeping.
1563 if (current->signal->group_stop_count > 0)
1564 --current->signal->group_stop_count;
1566 current->last_siginfo = info;
1567 current->exit_code = exit_code;
1569 /* Let the debugger run. */
1570 set_current_state(TASK_TRACED);
1571 spin_unlock_irq(¤t->sighand->siglock);
1572 read_lock(&tasklist_lock);
1573 do_notify_parent_cldstop(current, current->parent, CLD_TRAPPED);
1574 read_unlock(&tasklist_lock);
1578 * We are back. Now reacquire the siglock before touching
1579 * last_siginfo, so that we are sure to have synchronized with
1580 * any signal-sending on another CPU that wants to examine it.
1582 spin_lock_irq(¤t->sighand->siglock);
1583 current->last_siginfo = NULL;
1586 * Queued signals ignored us while we were stopped for tracing.
1587 * So check for any that we should take before resuming user mode.
1589 recalc_sigpending();
1592 void ptrace_notify(int exit_code)
1596 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1598 memset(&info, 0, sizeof info);
1599 info.si_signo = SIGTRAP;
1600 info.si_code = exit_code;
1601 info.si_pid = current->pid;
1602 info.si_uid = current->uid;
1604 /* Let the debugger run. */
1605 spin_lock_irq(¤t->sighand->siglock);
1606 ptrace_stop(exit_code, &info);
1607 spin_unlock_irq(¤t->sighand->siglock);
1610 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1613 finish_stop(int stop_count)
1616 * If there are no other threads in the group, or if there is
1617 * a group stop in progress and we are the last to stop,
1618 * report to the parent. When ptraced, every thread reports itself.
1620 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1621 read_lock(&tasklist_lock);
1622 do_notify_parent_cldstop(current, current->parent,
1624 read_unlock(&tasklist_lock);
1626 else if (stop_count == 0) {
1627 read_lock(&tasklist_lock);
1628 do_notify_parent_cldstop(current->group_leader,
1629 current->group_leader->real_parent,
1631 read_unlock(&tasklist_lock);
1636 * Now we don't run again until continued.
1638 current->exit_code = 0;
1642 * This performs the stopping for SIGSTOP and other stop signals.
1643 * We have to stop all threads in the thread group.
1646 do_signal_stop(int signr)
1648 struct signal_struct *sig = current->signal;
1649 struct sighand_struct *sighand = current->sighand;
1650 int stop_count = -1;
1652 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1654 if (sig->group_stop_count > 0) {
1656 * There is a group stop in progress. We don't need to
1657 * start another one.
1659 signr = sig->group_exit_code;
1660 stop_count = --sig->group_stop_count;
1661 current->exit_code = signr;
1662 set_current_state(TASK_STOPPED);
1663 if (stop_count == 0)
1664 sig->stop_state = 1;
1665 spin_unlock_irq(&sighand->siglock);
1667 else if (thread_group_empty(current)) {
1669 * Lock must be held through transition to stopped state.
1671 current->exit_code = current->signal->group_exit_code = signr;
1672 set_current_state(TASK_STOPPED);
1673 sig->stop_state = 1;
1674 spin_unlock_irq(&sighand->siglock);
1678 * There is no group stop already in progress.
1679 * We must initiate one now, but that requires
1680 * dropping siglock to get both the tasklist lock
1681 * and siglock again in the proper order. Note that
1682 * this allows an intervening SIGCONT to be posted.
1683 * We need to check for that and bail out if necessary.
1685 struct task_struct *t;
1687 spin_unlock_irq(&sighand->siglock);
1689 /* signals can be posted during this window */
1691 read_lock(&tasklist_lock);
1692 spin_lock_irq(&sighand->siglock);
1694 if (unlikely(sig->group_exit)) {
1696 * There is a group exit in progress now.
1697 * We'll just ignore the stop and process the
1698 * associated fatal signal.
1700 spin_unlock_irq(&sighand->siglock);
1701 read_unlock(&tasklist_lock);
1705 if (unlikely(sig_avoid_stop_race())) {
1707 * Either a SIGCONT or a SIGKILL signal was
1708 * posted in the siglock-not-held window.
1710 spin_unlock_irq(&sighand->siglock);
1711 read_unlock(&tasklist_lock);
1715 if (sig->group_stop_count == 0) {
1716 sig->group_exit_code = signr;
1718 for (t = next_thread(current); t != current;
1721 * Setting state to TASK_STOPPED for a group
1722 * stop is always done with the siglock held,
1723 * so this check has no races.
1725 if (t->state < TASK_STOPPED) {
1727 signal_wake_up(t, 0);
1729 sig->group_stop_count = stop_count;
1732 /* A race with another thread while unlocked. */
1733 signr = sig->group_exit_code;
1734 stop_count = --sig->group_stop_count;
1737 current->exit_code = signr;
1738 set_current_state(TASK_STOPPED);
1739 if (stop_count == 0)
1740 sig->stop_state = 1;
1742 spin_unlock_irq(&sighand->siglock);
1743 read_unlock(&tasklist_lock);
1746 finish_stop(stop_count);
1750 * Do appropriate magic when group_stop_count > 0.
1751 * We return nonzero if we stopped, after releasing the siglock.
1752 * We return zero if we still hold the siglock and should look
1753 * for another signal without checking group_stop_count again.
1755 static inline int handle_group_stop(void)
1759 if (current->signal->group_exit_task == current) {
1761 * Group stop is so we can do a core dump,
1762 * We are the initiating thread, so get on with it.
1764 current->signal->group_exit_task = NULL;
1768 if (current->signal->group_exit)
1770 * Group stop is so another thread can do a core dump,
1771 * or else we are racing against a death signal.
1772 * Just punt the stop so we can get the next signal.
1777 * There is a group stop in progress. We stop
1778 * without any associated signal being in our queue.
1780 stop_count = --current->signal->group_stop_count;
1781 if (stop_count == 0)
1782 current->signal->stop_state = 1;
1783 current->exit_code = current->signal->group_exit_code;
1784 set_current_state(TASK_STOPPED);
1785 spin_unlock_irq(¤t->sighand->siglock);
1786 finish_stop(stop_count);
1790 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1791 struct pt_regs *regs, void *cookie)
1793 sigset_t *mask = ¤t->blocked;
1797 spin_lock_irq(¤t->sighand->siglock);
1799 struct k_sigaction *ka;
1801 if (unlikely(current->signal->group_stop_count > 0) &&
1802 handle_group_stop())
1805 signr = dequeue_signal(current, mask, info);
1808 break; /* will return 0 */
1810 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1811 ptrace_signal_deliver(regs, cookie);
1813 /* Let the debugger run. */
1814 ptrace_stop(signr, info);
1816 /* We're back. Did the debugger cancel the sig? */
1817 signr = current->exit_code;
1821 current->exit_code = 0;
1823 /* Update the siginfo structure if the signal has
1824 changed. If the debugger wanted something
1825 specific in the siginfo structure then it should
1826 have updated *info via PTRACE_SETSIGINFO. */
1827 if (signr != info->si_signo) {
1828 info->si_signo = signr;
1830 info->si_code = SI_USER;
1831 info->si_pid = current->parent->pid;
1832 info->si_uid = current->parent->uid;
1835 /* If the (new) signal is now blocked, requeue it. */
1836 if (sigismember(¤t->blocked, signr)) {
1837 specific_send_sig_info(signr, info, current);
1842 ka = ¤t->sighand->action[signr-1];
1843 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1845 if (ka->sa.sa_handler != SIG_DFL) {
1846 /* Run the handler. */
1849 if (ka->sa.sa_flags & SA_ONESHOT)
1850 ka->sa.sa_handler = SIG_DFL;
1852 break; /* will return non-zero "signr" value */
1856 * Now we are doing the default action for this signal.
1858 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1861 /* Init gets no signals it doesn't want. */
1862 if (current->pid == 1)
1865 if (sig_kernel_stop(signr)) {
1867 * The default action is to stop all threads in
1868 * the thread group. The job control signals
1869 * do nothing in an orphaned pgrp, but SIGSTOP
1870 * always works. Note that siglock needs to be
1871 * dropped during the call to is_orphaned_pgrp()
1872 * because of lock ordering with tasklist_lock.
1873 * This allows an intervening SIGCONT to be posted.
1874 * We need to check for that and bail out if necessary.
1876 if (signr == SIGSTOP) {
1877 do_signal_stop(signr); /* releases siglock */
1880 spin_unlock_irq(¤t->sighand->siglock);
1882 /* signals can be posted during this window */
1884 if (is_orphaned_pgrp(process_group(current)))
1887 spin_lock_irq(¤t->sighand->siglock);
1888 if (unlikely(sig_avoid_stop_race())) {
1890 * Either a SIGCONT or a SIGKILL signal was
1891 * posted in the siglock-not-held window.
1896 do_signal_stop(signr); /* releases siglock */
1900 spin_unlock_irq(¤t->sighand->siglock);
1903 * Anything else is fatal, maybe with a core dump.
1905 current->flags |= PF_SIGNALED;
1906 if (sig_kernel_coredump(signr)) {
1908 * If it was able to dump core, this kills all
1909 * other threads in the group and synchronizes with
1910 * their demise. If we lost the race with another
1911 * thread getting here, it set group_exit_code
1912 * first and our do_group_exit call below will use
1913 * that value and ignore the one we pass it.
1915 do_coredump((long)signr, signr, regs);
1919 * Death signals, no core dump.
1921 do_group_exit(signr);
1924 spin_unlock_irq(¤t->sighand->siglock);
1930 EXPORT_SYMBOL(recalc_sigpending);
1931 EXPORT_SYMBOL_GPL(dequeue_signal);
1932 EXPORT_SYMBOL(flush_signals);
1933 EXPORT_SYMBOL(force_sig);
1934 EXPORT_SYMBOL(kill_pg);
1935 EXPORT_SYMBOL(kill_proc);
1936 EXPORT_SYMBOL(ptrace_notify);
1937 EXPORT_SYMBOL(send_sig);
1938 EXPORT_SYMBOL(send_sig_info);
1939 EXPORT_SYMBOL(sigprocmask);
1940 EXPORT_SYMBOL(block_all_signals);
1941 EXPORT_SYMBOL(unblock_all_signals);
1945 * System call entry points.
1948 asmlinkage long sys_restart_syscall(void)
1950 struct restart_block *restart = ¤t_thread_info()->restart_block;
1951 return restart->fn(restart);
1954 long do_no_restart_syscall(struct restart_block *param)
1960 * We don't need to get the kernel lock - this is all local to this
1961 * particular thread.. (and that's good, because this is _heavily_
1962 * used by various programs)
1966 * This is also useful for kernel threads that want to temporarily
1967 * (or permanently) block certain signals.
1969 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1970 * interface happily blocks "unblockable" signals like SIGKILL
1973 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1978 spin_lock_irq(¤t->sighand->siglock);
1979 old_block = current->blocked;
1983 sigorsets(¤t->blocked, ¤t->blocked, set);
1986 signandsets(¤t->blocked, ¤t->blocked, set);
1989 current->blocked = *set;
1994 recalc_sigpending();
1995 spin_unlock_irq(¤t->sighand->siglock);
1997 *oldset = old_block;
2002 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2004 int error = -EINVAL;
2005 sigset_t old_set, new_set;
2007 /* XXX: Don't preclude handling different sized sigset_t's. */
2008 if (sigsetsize != sizeof(sigset_t))
2013 if (copy_from_user(&new_set, set, sizeof(*set)))
2015 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2017 error = sigprocmask(how, &new_set, &old_set);
2023 spin_lock_irq(¤t->sighand->siglock);
2024 old_set = current->blocked;
2025 spin_unlock_irq(¤t->sighand->siglock);
2029 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2037 long do_sigpending(void __user *set, unsigned long sigsetsize)
2039 long error = -EINVAL;
2042 if (sigsetsize > sizeof(sigset_t))
2045 spin_lock_irq(¤t->sighand->siglock);
2046 sigorsets(&pending, ¤t->pending.signal,
2047 ¤t->signal->shared_pending.signal);
2048 spin_unlock_irq(¤t->sighand->siglock);
2050 /* Outside the lock because only this thread touches it. */
2051 sigandsets(&pending, ¤t->blocked, &pending);
2054 if (!copy_to_user(set, &pending, sigsetsize))
2062 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2064 return do_sigpending(set, sigsetsize);
2067 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2069 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2073 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2075 if (from->si_code < 0)
2076 return __copy_to_user(to, from, sizeof(siginfo_t))
2079 * If you change siginfo_t structure, please be sure
2080 * this code is fixed accordingly.
2081 * It should never copy any pad contained in the structure
2082 * to avoid security leaks, but must copy the generic
2083 * 3 ints plus the relevant union member.
2085 err = __put_user(from->si_signo, &to->si_signo);
2086 err |= __put_user(from->si_errno, &to->si_errno);
2087 err |= __put_user((short)from->si_code, &to->si_code);
2088 switch (from->si_code & __SI_MASK) {
2090 err |= __put_user(from->si_pid, &to->si_pid);
2091 err |= __put_user(from->si_uid, &to->si_uid);
2094 err |= __put_user(from->si_tid, &to->si_tid);
2095 err |= __put_user(from->si_overrun, &to->si_overrun);
2096 err |= __put_user(from->si_ptr, &to->si_ptr);
2099 err |= __put_user(from->si_band, &to->si_band);
2100 err |= __put_user(from->si_fd, &to->si_fd);
2103 err |= __put_user(from->si_addr, &to->si_addr);
2104 #ifdef __ARCH_SI_TRAPNO
2105 err |= __put_user(from->si_trapno, &to->si_trapno);
2109 err |= __put_user(from->si_pid, &to->si_pid);
2110 err |= __put_user(from->si_uid, &to->si_uid);
2111 err |= __put_user(from->si_status, &to->si_status);
2112 err |= __put_user(from->si_utime, &to->si_utime);
2113 err |= __put_user(from->si_stime, &to->si_stime);
2115 case __SI_RT: /* This is not generated by the kernel as of now. */
2116 case __SI_MESGQ: /* But this is */
2117 err |= __put_user(from->si_pid, &to->si_pid);
2118 err |= __put_user(from->si_uid, &to->si_uid);
2119 err |= __put_user(from->si_ptr, &to->si_ptr);
2121 default: /* this is just in case for now ... */
2122 err |= __put_user(from->si_pid, &to->si_pid);
2123 err |= __put_user(from->si_uid, &to->si_uid);
2132 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2133 siginfo_t __user *uinfo,
2134 const struct timespec __user *uts,
2143 /* XXX: Don't preclude handling different sized sigset_t's. */
2144 if (sigsetsize != sizeof(sigset_t))
2147 if (copy_from_user(&these, uthese, sizeof(these)))
2151 * Invert the set of allowed signals to get those we
2154 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2158 if (copy_from_user(&ts, uts, sizeof(ts)))
2160 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2165 spin_lock_irq(¤t->sighand->siglock);
2166 sig = dequeue_signal(current, &these, &info);
2168 timeout = MAX_SCHEDULE_TIMEOUT;
2170 timeout = (timespec_to_jiffies(&ts)
2171 + (ts.tv_sec || ts.tv_nsec));
2174 /* None ready -- temporarily unblock those we're
2175 * interested while we are sleeping in so that we'll
2176 * be awakened when they arrive. */
2177 current->real_blocked = current->blocked;
2178 sigandsets(¤t->blocked, ¤t->blocked, &these);
2179 recalc_sigpending();
2180 spin_unlock_irq(¤t->sighand->siglock);
2182 current->state = TASK_INTERRUPTIBLE;
2183 timeout = schedule_timeout(timeout);
2185 spin_lock_irq(¤t->sighand->siglock);
2186 sig = dequeue_signal(current, &these, &info);
2187 current->blocked = current->real_blocked;
2188 siginitset(¤t->real_blocked, 0);
2189 recalc_sigpending();
2192 spin_unlock_irq(¤t->sighand->siglock);
2197 if (copy_siginfo_to_user(uinfo, &info))
2210 sys_kill(int pid, int sig)
2212 struct siginfo info;
2214 info.si_signo = sig;
2216 info.si_code = SI_USER;
2217 info.si_pid = current->tgid;
2218 info.si_uid = current->uid;
2220 return kill_something_info(sig, &info, pid);
2224 * sys_tgkill - send signal to one specific thread
2225 * @tgid: the thread group ID of the thread
2226 * @pid: the PID of the thread
2227 * @sig: signal to be sent
2229 * This syscall also checks the tgid and returns -ESRCH even if the PID
2230 * exists but it's not belonging to the target process anymore. This
2231 * method solves the problem of threads exiting and PIDs getting reused.
2233 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2235 struct siginfo info;
2237 struct task_struct *p;
2239 /* This is only valid for single tasks */
2240 if (pid <= 0 || tgid <= 0)
2243 info.si_signo = sig;
2245 info.si_code = SI_TKILL;
2246 info.si_pid = current->tgid;
2247 info.si_uid = current->uid;
2249 read_lock(&tasklist_lock);
2250 p = find_task_by_pid(pid);
2252 if (p && (p->tgid == tgid)) {
2253 error = check_kill_permission(sig, &info, p);
2255 * The null signal is a permissions and process existence
2256 * probe. No signal is actually delivered.
2258 if (!error && sig && p->sighand) {
2259 spin_lock_irq(&p->sighand->siglock);
2260 handle_stop_signal(sig, p);
2261 error = specific_send_sig_info(sig, &info, p);
2262 spin_unlock_irq(&p->sighand->siglock);
2265 read_unlock(&tasklist_lock);
2270 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2273 sys_tkill(int pid, int sig)
2275 struct siginfo info;
2277 struct task_struct *p;
2279 /* This is only valid for single tasks */
2283 info.si_signo = sig;
2285 info.si_code = SI_TKILL;
2286 info.si_pid = current->tgid;
2287 info.si_uid = current->uid;
2289 read_lock(&tasklist_lock);
2290 p = find_task_by_pid(pid);
2293 error = check_kill_permission(sig, &info, p);
2295 * The null signal is a permissions and process existence
2296 * probe. No signal is actually delivered.
2298 if (!error && sig && p->sighand) {
2299 spin_lock_irq(&p->sighand->siglock);
2300 handle_stop_signal(sig, p);
2301 error = specific_send_sig_info(sig, &info, p);
2302 spin_unlock_irq(&p->sighand->siglock);
2305 read_unlock(&tasklist_lock);
2310 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2314 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2317 /* Not even root can pretend to send signals from the kernel.
2318 Nor can they impersonate a kill(), which adds source info. */
2319 if (info.si_code >= 0)
2321 info.si_signo = sig;
2323 /* POSIX.1b doesn't mention process groups. */
2324 return kill_proc_info(sig, &info, pid);
2328 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2330 struct k_sigaction *k;
2332 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2335 k = ¤t->sighand->action[sig-1];
2337 spin_lock_irq(¤t->sighand->siglock);
2338 if (signal_pending(current)) {
2340 * If there might be a fatal signal pending on multiple
2341 * threads, make sure we take it before changing the action.
2343 spin_unlock_irq(¤t->sighand->siglock);
2344 return -ERESTARTNOINTR;
2353 * "Setting a signal action to SIG_IGN for a signal that is
2354 * pending shall cause the pending signal to be discarded,
2355 * whether or not it is blocked."
2357 * "Setting a signal action to SIG_DFL for a signal that is
2358 * pending and whose default action is to ignore the signal
2359 * (for example, SIGCHLD), shall cause the pending signal to
2360 * be discarded, whether or not it is blocked"
2362 if (act->sa.sa_handler == SIG_IGN ||
2363 (act->sa.sa_handler == SIG_DFL &&
2364 sig_kernel_ignore(sig))) {
2366 * This is a fairly rare case, so we only take the
2367 * tasklist_lock once we're sure we'll need it.
2368 * Now we must do this little unlock and relock
2369 * dance to maintain the lock hierarchy.
2371 struct task_struct *t = current;
2372 spin_unlock_irq(&t->sighand->siglock);
2373 read_lock(&tasklist_lock);
2374 spin_lock_irq(&t->sighand->siglock);
2376 sigdelsetmask(&k->sa.sa_mask,
2377 sigmask(SIGKILL) | sigmask(SIGSTOP));
2378 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2380 rm_from_queue(sigmask(sig), &t->pending);
2381 recalc_sigpending_tsk(t);
2383 } while (t != current);
2384 spin_unlock_irq(¤t->sighand->siglock);
2385 read_unlock(&tasklist_lock);
2390 sigdelsetmask(&k->sa.sa_mask,
2391 sigmask(SIGKILL) | sigmask(SIGSTOP));
2394 spin_unlock_irq(¤t->sighand->siglock);
2399 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2405 oss.ss_sp = (void __user *) current->sas_ss_sp;
2406 oss.ss_size = current->sas_ss_size;
2407 oss.ss_flags = sas_ss_flags(sp);
2416 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2417 || __get_user(ss_sp, &uss->ss_sp)
2418 || __get_user(ss_flags, &uss->ss_flags)
2419 || __get_user(ss_size, &uss->ss_size))
2423 if (on_sig_stack(sp))
2429 * Note - this code used to test ss_flags incorrectly
2430 * old code may have been written using ss_flags==0
2431 * to mean ss_flags==SS_ONSTACK (as this was the only
2432 * way that worked) - this fix preserves that older
2435 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2438 if (ss_flags == SS_DISABLE) {
2443 if (ss_size < MINSIGSTKSZ)
2447 current->sas_ss_sp = (unsigned long) ss_sp;
2448 current->sas_ss_size = ss_size;
2453 if (copy_to_user(uoss, &oss, sizeof(oss)))
2462 #ifdef __ARCH_WANT_SYS_SIGPENDING
2465 sys_sigpending(old_sigset_t __user *set)
2467 return do_sigpending(set, sizeof(*set));
2472 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2473 /* Some platforms have their own version with special arguments others
2474 support only sys_rt_sigprocmask. */
2477 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2480 old_sigset_t old_set, new_set;
2484 if (copy_from_user(&new_set, set, sizeof(*set)))
2486 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2488 spin_lock_irq(¤t->sighand->siglock);
2489 old_set = current->blocked.sig[0];
2497 sigaddsetmask(¤t->blocked, new_set);
2500 sigdelsetmask(¤t->blocked, new_set);
2503 current->blocked.sig[0] = new_set;
2507 recalc_sigpending();
2508 spin_unlock_irq(¤t->sighand->siglock);
2514 old_set = current->blocked.sig[0];
2517 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2524 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2526 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2528 sys_rt_sigaction(int sig,
2529 const struct sigaction __user *act,
2530 struct sigaction __user *oact,
2533 struct k_sigaction new_sa, old_sa;
2536 /* XXX: Don't preclude handling different sized sigset_t's. */
2537 if (sigsetsize != sizeof(sigset_t))
2541 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2545 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2548 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2554 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2556 #ifdef __ARCH_WANT_SYS_SGETMASK
2559 * For backwards compatibility. Functionality superseded by sigprocmask.
2565 return current->blocked.sig[0];
2569 sys_ssetmask(int newmask)
2573 spin_lock_irq(¤t->sighand->siglock);
2574 old = current->blocked.sig[0];
2576 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2578 recalc_sigpending();
2579 spin_unlock_irq(¤t->sighand->siglock);
2583 #endif /* __ARCH_WANT_SGETMASK */
2585 #ifdef __ARCH_WANT_SYS_SIGNAL
2587 * For backwards compatibility. Functionality superseded by sigaction.
2589 asmlinkage unsigned long
2590 sys_signal(int sig, __sighandler_t handler)
2592 struct k_sigaction new_sa, old_sa;
2595 new_sa.sa.sa_handler = handler;
2596 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2598 ret = do_sigaction(sig, &new_sa, &old_sa);
2600 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2602 #endif /* __ARCH_WANT_SYS_SIGNAL */
2604 #ifdef __ARCH_WANT_SYS_PAUSE
2609 current->state = TASK_INTERRUPTIBLE;
2611 return -ERESTARTNOHAND;
2616 void __init signals_init(void)
2619 kmem_cache_create("sigqueue",
2620 sizeof(struct sigqueue),
2621 __alignof__(struct sigqueue),
2622 SLAB_PANIC, NULL, NULL);