2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/siginfo.h>
29 * SLAB caches for signal bits.
32 static kmem_cache_t *sigqueue_cachep;
34 atomic_t nr_queued_signals;
35 int max_queued_signals = 1024;
38 * In POSIX a signal is sent either to a specific thread (Linux task)
39 * or to the process as a whole (Linux thread group). How the signal
40 * is sent determines whether it's to one thread or the whole group,
41 * which determines which signal mask(s) are involved in blocking it
42 * from being delivered until later. When the signal is delivered,
43 * either it's caught or ignored by a user handler or it has a default
44 * effect that applies to the whole thread group (POSIX process).
46 * The possible effects an unblocked signal set to SIG_DFL can have are:
47 * ignore - Nothing Happens
48 * terminate - kill the process, i.e. all threads in the group,
49 * similar to exit_group. The group leader (only) reports
50 * WIFSIGNALED status to its parent.
51 * coredump - write a core dump file describing all threads using
52 * the same mm and then kill all those threads
53 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
55 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
56 * Other signals when not blocked and set to SIG_DFL behaves as follows.
57 * The job control signals also have other special effects.
59 * +--------------------+------------------+
60 * | POSIX signal | default action |
61 * +--------------------+------------------+
62 * | SIGHUP | terminate |
63 * | SIGINT | terminate |
64 * | SIGQUIT | coredump |
65 * | SIGILL | coredump |
66 * | SIGTRAP | coredump |
67 * | SIGABRT/SIGIOT | coredump |
68 * | SIGBUS | coredump |
69 * | SIGFPE | coredump |
70 * | SIGKILL | terminate(+) |
71 * | SIGUSR1 | terminate |
72 * | SIGSEGV | coredump |
73 * | SIGUSR2 | terminate |
74 * | SIGPIPE | terminate |
75 * | SIGALRM | terminate |
76 * | SIGTERM | terminate |
77 * | SIGCHLD | ignore |
78 * | SIGCONT | ignore(*) |
79 * | SIGSTOP | stop(*)(+) |
80 * | SIGTSTP | stop(*) |
81 * | SIGTTIN | stop(*) |
82 * | SIGTTOU | stop(*) |
84 * | SIGXCPU | coredump |
85 * | SIGXFSZ | coredump |
86 * | SIGVTALRM | terminate |
87 * | SIGPROF | terminate |
88 * | SIGPOLL/SIGIO | terminate |
89 * | SIGSYS/SIGUNUSED | coredump |
90 * | SIGSTKFLT | terminate |
91 * | SIGWINCH | ignore |
92 * | SIGPWR | terminate |
93 * | SIGRTMIN-SIGRTMAX | terminate |
94 * +--------------------+------------------+
95 * | non-POSIX signal | default action |
96 * +--------------------+------------------+
97 * | SIGEMT | coredump |
98 * +--------------------+------------------+
100 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
101 * (*) Special job control effects:
102 * When SIGCONT is sent, it resumes the process (all threads in the group)
103 * from TASK_STOPPED state and also clears any pending/queued stop signals
104 * (any of those marked with "stop(*)"). This happens regardless of blocking,
105 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
106 * any pending/queued SIGCONT signals; this happens regardless of blocking,
107 * catching, or ignored the stop signal, though (except for SIGSTOP) the
108 * default action of stopping the process may happen later or never.
112 #define M_SIGEMT M(SIGEMT)
117 #if SIGRTMIN > BITS_PER_LONG
118 #define M(sig) (1ULL << ((sig)-1))
120 #define M(sig) (1UL << ((sig)-1))
122 #define T(sig, mask) (M(sig) & (mask))
124 #define SIG_KERNEL_ONLY_MASK (\
125 M(SIGKILL) | M(SIGSTOP) )
127 #define SIG_KERNEL_STOP_MASK (\
128 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
130 #define SIG_KERNEL_COREDUMP_MASK (\
131 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
132 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
133 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
135 #define SIG_KERNEL_IGNORE_MASK (\
136 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
138 #define sig_kernel_only(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
140 #define sig_kernel_coredump(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
142 #define sig_kernel_ignore(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
144 #define sig_kernel_stop(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
147 #define sig_user_defined(t, signr) \
148 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
149 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
151 #define sig_fatal(t, signr) \
152 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
153 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
155 #define sig_avoid_stop_race() \
156 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
157 sigtestsetmask(¤t->signal->shared_pending.signal, \
158 M(SIGCONT) | M(SIGKILL)))
160 static int sig_ignored(struct task_struct *t, int sig)
165 * Tracers always want to know about signals..
167 if (t->ptrace & PT_PTRACED)
171 * Blocked signals are never ignored, since the
172 * signal handler may change by the time it is
175 if (sigismember(&t->blocked, sig))
178 /* Is it explicitly or implicitly ignored? */
179 handler = t->sighand->action[sig-1].sa.sa_handler;
180 return handler == SIG_IGN ||
181 (handler == SIG_DFL && sig_kernel_ignore(sig));
185 * Re-calculate pending state from the set of locally pending
186 * signals, globally pending signals, and blocked signals.
188 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
193 switch (_NSIG_WORDS) {
195 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
196 ready |= signal->sig[i] &~ blocked->sig[i];
199 case 4: ready = signal->sig[3] &~ blocked->sig[3];
200 ready |= signal->sig[2] &~ blocked->sig[2];
201 ready |= signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 2: ready = signal->sig[1] &~ blocked->sig[1];
206 ready |= signal->sig[0] &~ blocked->sig[0];
209 case 1: ready = signal->sig[0] &~ blocked->sig[0];
214 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216 fastcall void recalc_sigpending_tsk(struct task_struct *t)
218 if (t->signal->group_stop_count > 0 ||
219 PENDING(&t->pending, &t->blocked) ||
220 PENDING(&t->signal->shared_pending, &t->blocked))
221 set_tsk_thread_flag(t, TIF_SIGPENDING);
223 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226 void recalc_sigpending(void)
228 recalc_sigpending_tsk(current);
231 /* Given the mask, find the first available signal that should be serviced. */
234 next_signal(struct sigpending *pending, sigset_t *mask)
236 unsigned long i, *s, *m, x;
239 s = pending->signal.sig;
241 switch (_NSIG_WORDS) {
243 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
244 if ((x = *s &~ *m) != 0) {
245 sig = ffz(~x) + i*_NSIG_BPW + 1;
250 case 2: if ((x = s[0] &~ m[0]) != 0)
252 else if ((x = s[1] &~ m[1]) != 0)
259 case 1: if ((x = *s &~ *m) != 0)
267 struct sigqueue *__sigqueue_alloc(void)
269 struct sigqueue *q = 0;
271 if (atomic_read(&nr_queued_signals) < max_queued_signals)
272 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
274 atomic_inc(&nr_queued_signals);
275 INIT_LIST_HEAD(&q->list);
282 static inline void __sigqueue_free(struct sigqueue *q)
284 if (q->flags & SIGQUEUE_PREALLOC)
286 kmem_cache_free(sigqueue_cachep, q);
287 atomic_dec(&nr_queued_signals);
290 static void flush_sigqueue(struct sigpending *queue)
294 sigemptyset(&queue->signal);
295 while (!list_empty(&queue->list)) {
296 q = list_entry(queue->list.next, struct sigqueue , list);
297 list_del_init(&q->list);
303 * Flush all pending signals for a task.
307 flush_signals(struct task_struct *t)
311 spin_lock_irqsave(&t->sighand->siglock, flags);
312 clear_tsk_thread_flag(t,TIF_SIGPENDING);
313 flush_sigqueue(&t->pending);
314 flush_sigqueue(&t->signal->shared_pending);
315 spin_unlock_irqrestore(&t->sighand->siglock, flags);
319 * This function expects the tasklist_lock write-locked.
321 void __exit_sighand(struct task_struct *tsk)
323 struct sighand_struct * sighand = tsk->sighand;
325 /* Ok, we're done with the signal handlers */
327 if (atomic_dec_and_test(&sighand->count))
328 kmem_cache_free(sighand_cachep, sighand);
331 void exit_sighand(struct task_struct *tsk)
333 write_lock_irq(&tasklist_lock);
335 write_unlock_irq(&tasklist_lock);
339 * This function expects the tasklist_lock write-locked.
341 void __exit_signal(struct task_struct *tsk)
343 struct signal_struct * sig = tsk->signal;
344 struct sighand_struct * sighand = tsk->sighand;
348 if (!atomic_read(&sig->count))
350 spin_lock(&sighand->siglock);
351 if (atomic_dec_and_test(&sig->count)) {
352 if (tsk == sig->curr_target)
353 sig->curr_target = next_thread(tsk);
355 spin_unlock(&sighand->siglock);
356 flush_sigqueue(&sig->shared_pending);
359 * If there is any task waiting for the group exit
362 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
363 wake_up_process(sig->group_exit_task);
364 sig->group_exit_task = NULL;
366 if (tsk == sig->curr_target)
367 sig->curr_target = next_thread(tsk);
369 spin_unlock(&sighand->siglock);
370 sig = NULL; /* Marker for below. */
372 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
373 flush_sigqueue(&tsk->pending);
376 * We are cleaning up the signal_struct here. We delayed
377 * calling exit_itimers until after flush_sigqueue, just in
378 * case our thread-local pending queue contained a queued
379 * timer signal that would have been cleared in
380 * exit_itimers. When that called sigqueue_free, it would
381 * attempt to re-take the tasklist_lock and deadlock. This
382 * can never happen if we ensure that all queues the
383 * timer's signal might be queued on have been flushed
384 * first. The shared_pending queue, and our own pending
385 * queue are the only queues the timer could be on, since
386 * there are no other threads left in the group and timer
387 * signals are constrained to threads inside the group.
390 kmem_cache_free(signal_cachep, sig);
394 void exit_signal(struct task_struct *tsk)
396 write_lock_irq(&tasklist_lock);
398 write_unlock_irq(&tasklist_lock);
402 * Flush all handlers for a task.
406 flush_signal_handlers(struct task_struct *t, int force_default)
409 struct k_sigaction *ka = &t->sighand->action[0];
410 for (i = _NSIG ; i != 0 ; i--) {
411 if (force_default || ka->sa.sa_handler != SIG_IGN)
412 ka->sa.sa_handler = SIG_DFL;
414 sigemptyset(&ka->sa.sa_mask);
420 /* Notify the system that a driver wants to block all signals for this
421 * process, and wants to be notified if any signals at all were to be
422 * sent/acted upon. If the notifier routine returns non-zero, then the
423 * signal will be acted upon after all. If the notifier routine returns 0,
424 * then then signal will be blocked. Only one block per process is
425 * allowed. priv is a pointer to private data that the notifier routine
426 * can use to determine if the signal should be blocked or not. */
429 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
433 spin_lock_irqsave(¤t->sighand->siglock, flags);
434 current->notifier_mask = mask;
435 current->notifier_data = priv;
436 current->notifier = notifier;
437 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
440 /* Notify the system that blocking has ended. */
443 unblock_all_signals(void)
447 spin_lock_irqsave(¤t->sighand->siglock, flags);
448 current->notifier = NULL;
449 current->notifier_data = NULL;
451 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
454 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
456 struct sigqueue *q, *first = 0;
457 int still_pending = 0;
459 if (unlikely(!sigismember(&list->signal, sig)))
463 * Collect the siginfo appropriate to this signal. Check if
464 * there is another siginfo for the same signal.
466 list_for_each_entry(q, &list->list, list) {
467 if (q->info.si_signo == sig) {
476 list_del_init(&first->list);
477 copy_siginfo(info, &first->info);
478 __sigqueue_free(first);
480 sigdelset(&list->signal, sig);
483 /* Ok, it wasn't in the queue. This must be
484 a fast-pathed signal or we must have been
485 out of queue space. So zero out the info.
487 sigdelset(&list->signal, sig);
488 info->si_signo = sig;
497 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
502 sig = next_signal(pending, mask);
504 if (current->notifier) {
505 if (sigismember(current->notifier_mask, sig)) {
506 if (!(current->notifier)(current->notifier_data)) {
507 clear_thread_flag(TIF_SIGPENDING);
513 if (!collect_signal(sig, pending, info))
523 * Dequeue a signal and return the element to the caller, which is
524 * expected to free it.
526 * All callers have to hold the siglock.
528 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
530 int signr = __dequeue_signal(&tsk->pending, mask, info);
532 signr = __dequeue_signal(&tsk->signal->shared_pending,
535 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
536 info->si_sys_private){
537 do_schedule_next_timer(info);
543 * Tell a process that it has a new active signal..
545 * NOTE! we rely on the previous spin_lock to
546 * lock interrupts for us! We can only be called with
547 * "siglock" held, and the local interrupt must
548 * have been disabled when that got acquired!
550 * No need to set need_resched since signal event passing
551 * goes through ->blocked
553 void signal_wake_up(struct task_struct *t, int resume)
557 set_tsk_thread_flag(t, TIF_SIGPENDING);
560 * If resume is set, we want to wake it up in the TASK_STOPPED case.
561 * We don't check for TASK_STOPPED because there is a race with it
562 * executing another processor and just now entering stopped state.
563 * By calling wake_up_process any time resume is set, we ensure
564 * the process will wake up and handle its stop or death signal.
566 mask = TASK_INTERRUPTIBLE;
568 mask |= TASK_STOPPED;
569 if (!wake_up_state(t, mask))
574 * Remove signals in mask from the pending set and queue.
575 * Returns 1 if any signals were found.
577 * All callers must be holding the siglock.
579 static int rm_from_queue(unsigned long mask, struct sigpending *s)
581 struct sigqueue *q, *n;
583 if (!sigtestsetmask(&s->signal, mask))
586 sigdelsetmask(&s->signal, mask);
587 list_for_each_entry_safe(q, n, &s->list, list) {
588 if (q->info.si_signo < SIGRTMIN &&
589 (mask & sigmask(q->info.si_signo))) {
590 list_del_init(&q->list);
598 * Bad permissions for sending the signal
600 static int check_kill_permission(int sig, struct siginfo *info,
601 struct task_struct *t)
604 if (sig < 0 || sig > _NSIG)
607 if ((!info || ((unsigned long)info != 1 &&
608 (unsigned long)info != 2 && SI_FROMUSER(info)))
609 && ((sig != SIGCONT) ||
610 (current->signal->session != t->signal->session))
611 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
612 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
613 && !capable(CAP_KILL))
615 return security_task_kill(t, info, sig);
619 static void do_notify_parent_cldstop(struct task_struct *tsk,
620 struct task_struct *parent);
623 * Handle magic process-wide effects of stop/continue signals.
624 * Unlike the signal actions, these happen immediately at signal-generation
625 * time regardless of blocking, ignoring, or handling. This does the
626 * actual continuing for SIGCONT, but not the actual stopping for stop
627 * signals. The process stop is done as a signal action for SIG_DFL.
629 static void handle_stop_signal(int sig, struct task_struct *p)
631 struct task_struct *t;
633 if (sig_kernel_stop(sig)) {
635 * This is a stop signal. Remove SIGCONT from all queues.
637 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
640 rm_from_queue(sigmask(SIGCONT), &t->pending);
643 } else if (sig == SIGCONT) {
645 * Remove all stop signals from all queues,
646 * and wake all threads.
648 if (unlikely(p->signal->group_stop_count > 0)) {
650 * There was a group stop in progress. We'll
651 * pretend it finished before we got here. We are
652 * obliged to report it to the parent: if the
653 * SIGSTOP happened "after" this SIGCONT, then it
654 * would have cleared this pending SIGCONT. If it
655 * happened "before" this SIGCONT, then the parent
656 * got the SIGCHLD about the stop finishing before
657 * the continue happened. We do the notification
658 * now, and it's as if the stop had finished and
659 * the SIGCHLD was pending on entry to this kill.
661 p->signal->group_stop_count = 0;
662 if (p->ptrace & PT_PTRACED)
663 do_notify_parent_cldstop(p, p->parent);
665 do_notify_parent_cldstop(
667 p->group_leader->real_parent);
669 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
673 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
676 * If there is a handler for SIGCONT, we must make
677 * sure that no thread returns to user mode before
678 * we post the signal, in case it was the only
679 * thread eligible to run the signal handler--then
680 * it must not do anything between resuming and
681 * running the handler. With the TIF_SIGPENDING
682 * flag set, the thread will pause and acquire the
683 * siglock that we hold now and until we've queued
684 * the pending signal.
686 * Wake up the stopped thread _after_ setting
689 state = TASK_STOPPED;
690 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
691 set_tsk_thread_flag(t, TIF_SIGPENDING);
692 state |= TASK_INTERRUPTIBLE;
694 wake_up_state(t, state);
701 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
703 struct sigqueue * q = NULL;
707 * fast-pathed signals for kernel-internal things like SIGSTOP
710 if ((unsigned long)info == 2)
713 /* Real-time signals must be queued if sent by sigqueue, or
714 some other real-time mechanism. It is implementation
715 defined whether kill() does so. We attempt to do so, on
716 the principle of least surprise, but since kill is not
717 allowed to fail with EAGAIN when low on memory we just
718 make sure at least one signal gets delivered and don't
719 pass on the info struct. */
721 if (atomic_read(&nr_queued_signals) < max_queued_signals)
722 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
725 atomic_inc(&nr_queued_signals);
727 list_add_tail(&q->list, &signals->list);
728 switch ((unsigned long) info) {
730 q->info.si_signo = sig;
731 q->info.si_errno = 0;
732 q->info.si_code = SI_USER;
733 q->info.si_pid = current->pid;
734 q->info.si_uid = current->uid;
737 q->info.si_signo = sig;
738 q->info.si_errno = 0;
739 q->info.si_code = SI_KERNEL;
744 copy_siginfo(&q->info, info);
748 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
749 && info->si_code != SI_USER)
751 * Queue overflow, abort. We may abort if the signal was rt
752 * and sent by user using something other than kill().
755 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
757 * Set up a return to indicate that we dropped
760 ret = info->si_sys_private;
764 sigaddset(&signals->signal, sig);
768 #define LEGACY_QUEUE(sigptr, sig) \
769 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
773 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
777 if (!irqs_disabled())
780 if (!spin_is_locked(&t->sighand->siglock))
784 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
786 * Set up a return to indicate that we dropped the signal.
788 ret = info->si_sys_private;
790 /* Short-circuit ignored signals. */
791 if (sig_ignored(t, sig))
794 /* Support queueing exactly one non-rt signal, so that we
795 can get more detailed information about the cause of
797 if (LEGACY_QUEUE(&t->pending, sig))
800 ret = send_signal(sig, info, &t->pending);
801 if (!ret && !sigismember(&t->blocked, sig))
802 signal_wake_up(t, sig == SIGKILL);
808 * Force a signal that the process can't ignore: if necessary
809 * we unblock the signal and change any SIG_IGN to SIG_DFL.
813 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
815 unsigned long int flags;
818 spin_lock_irqsave(&t->sighand->siglock, flags);
819 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
820 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
821 sigdelset(&t->blocked, sig);
822 recalc_sigpending_tsk(t);
824 ret = specific_send_sig_info(sig, info, t);
825 spin_unlock_irqrestore(&t->sighand->siglock, flags);
831 force_sig_specific(int sig, struct task_struct *t)
833 unsigned long int flags;
835 spin_lock_irqsave(&t->sighand->siglock, flags);
836 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
837 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
838 sigdelset(&t->blocked, sig);
839 recalc_sigpending_tsk(t);
840 specific_send_sig_info(sig, (void *)2, t);
841 spin_unlock_irqrestore(&t->sighand->siglock, flags);
845 * Test if P wants to take SIG. After we've checked all threads with this,
846 * it's equivalent to finding no threads not blocking SIG. Any threads not
847 * blocking SIG were ruled out because they are not running and already
848 * have pending signals. Such threads will dequeue from the shared queue
849 * as soon as they're available, so putting the signal on the shared queue
850 * will be equivalent to sending it to one such thread.
852 #define wants_signal(sig, p, mask) \
853 (!sigismember(&(p)->blocked, sig) \
854 && !((p)->state & mask) \
855 && !((p)->flags & PF_EXITING) \
856 && (task_curr(p) || !signal_pending(p)))
860 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
862 struct task_struct *t;
865 * Now find a thread we can wake up to take the signal off the queue.
867 * If the main thread wants the signal, it gets first crack.
868 * Probably the least surprising to the average bear.
870 if (wants_signal(sig, p, mask))
872 else if (thread_group_empty(p))
874 * There is just one thread and it does not need to be woken.
875 * It will dequeue unblocked signals before it runs again.
880 * Otherwise try to find a suitable thread.
882 t = p->signal->curr_target;
884 /* restart balancing at this thread */
885 t = p->signal->curr_target = p;
886 BUG_ON(t->tgid != p->tgid);
888 while (!wants_signal(sig, t, mask)) {
890 if (t == p->signal->curr_target)
892 * No thread needs to be woken.
893 * Any eligible threads will see
894 * the signal in the queue soon.
898 p->signal->curr_target = t;
902 * Found a killable thread. If the signal will be fatal,
903 * then start taking the whole group down immediately.
905 if (sig_fatal(p, sig) && !p->signal->group_exit &&
906 !sigismember(&t->real_blocked, sig) &&
907 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
909 * This signal will be fatal to the whole group.
911 if (!sig_kernel_coredump(sig)) {
913 * Start a group exit and wake everybody up.
914 * This way we don't have other threads
915 * running and doing things after a slower
916 * thread has the fatal signal pending.
918 p->signal->group_exit = 1;
919 p->signal->group_exit_code = sig;
920 p->signal->group_stop_count = 0;
923 sigaddset(&t->pending.signal, SIGKILL);
924 signal_wake_up(t, 1);
931 * There will be a core dump. We make all threads other
932 * than the chosen one go into a group stop so that nothing
933 * happens until it gets scheduled, takes the signal off
934 * the shared queue, and does the core dump. This is a
935 * little more complicated than strictly necessary, but it
936 * keeps the signal state that winds up in the core dump
937 * unchanged from the death state, e.g. which thread had
938 * the core-dump signal unblocked.
940 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
941 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
942 p->signal->group_stop_count = 0;
943 p->signal->group_exit_task = t;
946 p->signal->group_stop_count++;
947 signal_wake_up(t, 0);
950 wake_up_process(p->signal->group_exit_task);
955 * The signal is already in the shared-pending queue.
956 * Tell the chosen thread to wake up and dequeue it.
958 signal_wake_up(t, sig == SIGKILL);
963 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
969 if (!spin_is_locked(&p->sighand->siglock))
972 handle_stop_signal(sig, p);
974 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
976 * Set up a return to indicate that we dropped the signal.
978 ret = info->si_sys_private;
980 /* Short-circuit ignored signals. */
981 if (sig_ignored(p, sig))
984 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
985 /* This is a non-RT signal and we already have one queued. */
989 * Don't bother zombies and stopped tasks (but
990 * SIGKILL will punch through stopped state)
992 mask = TASK_DEAD | TASK_ZOMBIE;
994 mask |= TASK_STOPPED;
997 * Put this signal on the shared-pending queue, or fail with EAGAIN.
998 * We always use the shared queue for process-wide signals,
999 * to avoid several races.
1001 ret = send_signal(sig, info, &p->signal->shared_pending);
1005 __group_complete_signal(sig, p, mask);
1010 * Nuke all other threads in the group.
1012 void zap_other_threads(struct task_struct *p)
1014 struct task_struct *t;
1016 p->signal->group_stop_count = 0;
1018 if (thread_group_empty(p))
1021 for (t = next_thread(p); t != p; t = next_thread(t)) {
1023 * Don't bother with already dead threads
1025 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1029 * We don't want to notify the parent, since we are
1030 * killed as part of a thread group due to another
1031 * thread doing an execve() or similar. So set the
1032 * exit signal to -1 to allow immediate reaping of
1033 * the process. But don't detach the thread group
1036 if (t != p->group_leader)
1037 t->exit_signal = -1;
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1041 signal_wake_up(t, 1);
1046 * Must be called with the tasklist_lock held for reading!
1048 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1050 unsigned long flags;
1053 if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
1056 ret = check_kill_permission(sig, info, p);
1057 if (!ret && sig && p->sighand) {
1058 spin_lock_irqsave(&p->sighand->siglock, flags);
1059 ret = __group_send_sig_info(sig, info, p);
1060 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1067 * kill_pg_info() sends a signal to a process group: this is what the tty
1068 * control characters do (^C, ^Z etc)
1071 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1073 struct task_struct *p;
1074 struct list_head *l;
1084 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1088 err = group_send_sig_info(sig, info, p);
1092 return found ? retval : -ESRCH;
1096 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1100 read_lock(&tasklist_lock);
1101 retval = __kill_pg_info(sig, info, pgrp);
1102 read_unlock(&tasklist_lock);
1108 * kill_sl_info() sends a signal to the session leader: this is used
1109 * to send SIGHUP to the controlling process of a terminal when
1110 * the connection is lost.
1115 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1117 int err, retval = -EINVAL;
1119 struct list_head *l;
1120 struct task_struct *p;
1126 read_lock(&tasklist_lock);
1127 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1128 if (!p->signal->leader)
1130 err = group_send_sig_info(sig, info, p);
1134 read_unlock(&tasklist_lock);
1140 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1143 struct task_struct *p;
1145 read_lock(&tasklist_lock);
1146 p = find_task_by_pid(pid);
1149 error = group_send_sig_info(sig, info, p);
1150 read_unlock(&tasklist_lock);
1156 * kill_something_info() interprets pid in interesting ways just like kill(2).
1158 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1159 * is probably wrong. Should make it like BSD or SYSV.
1162 static int kill_something_info(int sig, struct siginfo *info, int pid)
1165 return kill_pg_info(sig, info, process_group(current));
1166 } else if (pid == -1) {
1167 int retval = 0, count = 0;
1168 struct task_struct * p;
1170 read_lock(&tasklist_lock);
1171 for_each_process(p) {
1172 if (p->pid > 1 && p->tgid != current->tgid) {
1173 int err = group_send_sig_info(sig, info, p);
1179 read_unlock(&tasklist_lock);
1180 return count ? retval : -ESRCH;
1181 } else if (pid < 0) {
1182 return kill_pg_info(sig, info, -pid);
1184 return kill_proc_info(sig, info, pid);
1189 * These are for backward compatibility with the rest of the kernel source.
1193 * These two are the most common entry points. They send a signal
1194 * just to the specific thread.
1197 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1200 unsigned long flags;
1203 * We need the tasklist lock even for the specific
1204 * thread case (when we don't need to follow the group
1205 * lists) in order to avoid races with "p->sighand"
1206 * going away or changing from under us.
1208 read_lock(&tasklist_lock);
1209 spin_lock_irqsave(&p->sighand->siglock, flags);
1210 ret = specific_send_sig_info(sig, info, p);
1211 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1212 read_unlock(&tasklist_lock);
1217 send_sig(int sig, struct task_struct *p, int priv)
1219 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1223 * This is the entry point for "process-wide" signals.
1224 * They will go to an appropriate thread in the thread group.
1227 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1230 read_lock(&tasklist_lock);
1231 ret = group_send_sig_info(sig, info, p);
1232 read_unlock(&tasklist_lock);
1237 force_sig(int sig, struct task_struct *p)
1239 force_sig_info(sig, (void*)1L, p);
1243 kill_pg(pid_t pgrp, int sig, int priv)
1245 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1249 kill_sl(pid_t sess, int sig, int priv)
1251 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1255 kill_proc(pid_t pid, int sig, int priv)
1257 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1261 * These functions support sending signals using preallocated sigqueue
1262 * structures. This is needed "because realtime applications cannot
1263 * afford to lose notifications of asynchronous events, like timer
1264 * expirations or I/O completions". In the case of Posix Timers
1265 * we allocate the sigqueue structure from the timer_create. If this
1266 * allocation fails we are able to report the failure to the application
1267 * with an EAGAIN error.
1270 struct sigqueue *sigqueue_alloc(void)
1274 if ((q = __sigqueue_alloc()))
1275 q->flags |= SIGQUEUE_PREALLOC;
1279 void sigqueue_free(struct sigqueue *q)
1281 unsigned long flags;
1282 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1284 * If the signal is still pending remove it from the
1287 if (unlikely(!list_empty(&q->list))) {
1288 read_lock(&tasklist_lock);
1289 spin_lock_irqsave(q->lock, flags);
1290 if (!list_empty(&q->list))
1291 list_del_init(&q->list);
1292 spin_unlock_irqrestore(q->lock, flags);
1293 read_unlock(&tasklist_lock);
1295 q->flags &= ~SIGQUEUE_PREALLOC;
1300 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1302 unsigned long flags;
1306 * We need the tasklist lock even for the specific
1307 * thread case (when we don't need to follow the group
1308 * lists) in order to avoid races with "p->sighand"
1309 * going away or changing from under us.
1311 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1312 read_lock(&tasklist_lock);
1313 spin_lock_irqsave(&p->sighand->siglock, flags);
1315 if (unlikely(!list_empty(&q->list))) {
1317 * If an SI_TIMER entry is already queue just increment
1318 * the overrun count.
1320 if (q->info.si_code != SI_TIMER)
1322 q->info.si_overrun++;
1325 /* Short-circuit ignored signals. */
1326 if (sig_ignored(p, sig)) {
1331 q->lock = &p->sighand->siglock;
1332 list_add_tail(&q->list, &p->pending.list);
1333 sigaddset(&p->pending.signal, sig);
1334 if (!sigismember(&p->blocked, sig))
1335 signal_wake_up(p, sig == SIGKILL);
1338 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1339 read_unlock(&tasklist_lock);
1344 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1346 unsigned long flags;
1350 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1351 read_lock(&tasklist_lock);
1352 spin_lock_irqsave(&p->sighand->siglock, flags);
1353 handle_stop_signal(sig, p);
1355 /* Short-circuit ignored signals. */
1356 if (sig_ignored(p, sig)) {
1361 if (unlikely(!list_empty(&q->list))) {
1363 * If an SI_TIMER entry is already queue just increment
1364 * the overrun count. Other uses should not try to
1365 * send the signal multiple times.
1367 if (q->info.si_code != SI_TIMER)
1369 q->info.si_overrun++;
1373 * Don't bother zombies and stopped tasks (but
1374 * SIGKILL will punch through stopped state)
1376 mask = TASK_DEAD | TASK_ZOMBIE;
1378 mask |= TASK_STOPPED;
1381 * Put this signal on the shared-pending queue.
1382 * We always use the shared queue for process-wide signals,
1383 * to avoid several races.
1385 q->lock = &p->sighand->siglock;
1386 list_add_tail(&q->list, &p->signal->shared_pending.list);
1387 sigaddset(&p->signal->shared_pending.signal, sig);
1389 __group_complete_signal(sig, p, mask);
1391 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1392 read_unlock(&tasklist_lock);
1397 * Joy. Or not. Pthread wants us to wake up every thread
1398 * in our parent group.
1400 static void __wake_up_parent(struct task_struct *p,
1401 struct task_struct *parent)
1403 struct task_struct *tsk = parent;
1406 * Fortunately this is not necessary for thread groups:
1408 if (p->tgid == tsk->tgid) {
1409 wake_up_interruptible(&tsk->wait_chldexit);
1414 wake_up_interruptible(&tsk->wait_chldexit);
1415 tsk = next_thread(tsk);
1416 if (tsk->signal != parent->signal)
1418 } while (tsk != parent);
1422 * Let a parent know about a status change of a child.
1425 void do_notify_parent(struct task_struct *tsk, int sig)
1427 struct siginfo info;
1428 unsigned long flags;
1430 struct sighand_struct *psig;
1435 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1436 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1438 info.si_signo = sig;
1440 info.si_pid = tsk->pid;
1441 info.si_uid = tsk->uid;
1443 /* FIXME: find out whether or not this is supposed to be c*time. */
1444 info.si_utime = tsk->utime;
1445 info.si_stime = tsk->stime;
1447 status = tsk->exit_code & 0x7f;
1448 why = SI_KERNEL; /* shouldn't happen */
1449 switch (tsk->state) {
1451 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1452 if (tsk->ptrace & PT_PTRACED)
1459 if (tsk->exit_code & 0x80)
1461 else if (tsk->exit_code & 0x7f)
1465 status = tsk->exit_code >> 8;
1470 info.si_status = status;
1472 psig = tsk->parent->sighand;
1473 spin_lock_irqsave(&psig->siglock, flags);
1474 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1475 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1476 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1478 * We are exiting and our parent doesn't care. POSIX.1
1479 * defines special semantics for setting SIGCHLD to SIG_IGN
1480 * or setting the SA_NOCLDWAIT flag: we should be reaped
1481 * automatically and not left for our parent's wait4 call.
1482 * Rather than having the parent do it as a magic kind of
1483 * signal handler, we just set this to tell do_exit that we
1484 * can be cleaned up without becoming a zombie. Note that
1485 * we still call __wake_up_parent in this case, because a
1486 * blocked sys_wait4 might now return -ECHILD.
1488 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1489 * is implementation-defined: we do (if you don't want
1490 * it, just use SIG_IGN instead).
1492 tsk->exit_signal = -1;
1493 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1496 if (sig > 0 && sig <= _NSIG)
1497 __group_send_sig_info(sig, &info, tsk->parent);
1498 __wake_up_parent(tsk, tsk->parent);
1499 spin_unlock_irqrestore(&psig->siglock, flags);
1504 * We need the tasklist lock because it's the only
1505 * thing that protects out "parent" pointer.
1507 * exit.c calls "do_notify_parent()" directly, because
1508 * it already has the tasklist lock.
1511 notify_parent(struct task_struct *tsk, int sig)
1514 read_lock(&tasklist_lock);
1515 do_notify_parent(tsk, sig);
1516 read_unlock(&tasklist_lock);
1521 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1523 struct siginfo info;
1524 unsigned long flags;
1525 struct sighand_struct *sighand;
1527 info.si_signo = SIGCHLD;
1529 info.si_pid = tsk->pid;
1530 info.si_uid = tsk->uid;
1532 /* FIXME: find out whether or not this is supposed to be c*time. */
1533 info.si_utime = tsk->utime;
1534 info.si_stime = tsk->stime;
1536 info.si_status = tsk->exit_code & 0x7f;
1537 info.si_code = CLD_STOPPED;
1539 sighand = parent->sighand;
1540 spin_lock_irqsave(&sighand->siglock, flags);
1541 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1542 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1543 __group_send_sig_info(SIGCHLD, &info, parent);
1545 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1547 __wake_up_parent(tsk, parent);
1548 spin_unlock_irqrestore(&sighand->siglock, flags);
1552 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1555 finish_stop(int stop_count)
1558 * If there are no other threads in the group, or if there is
1559 * a group stop in progress and we are the last to stop,
1560 * report to the parent. When ptraced, every thread reports itself.
1562 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1563 read_lock(&tasklist_lock);
1564 do_notify_parent_cldstop(current, current->parent);
1565 read_unlock(&tasklist_lock);
1567 else if (stop_count == 0) {
1568 read_lock(&tasklist_lock);
1569 do_notify_parent_cldstop(current->group_leader,
1570 current->group_leader->real_parent);
1571 read_unlock(&tasklist_lock);
1576 * Now we don't run again until continued.
1578 current->exit_code = 0;
1582 * This performs the stopping for SIGSTOP and other stop signals.
1583 * We have to stop all threads in the thread group.
1586 do_signal_stop(int signr)
1588 struct signal_struct *sig = current->signal;
1589 struct sighand_struct *sighand = current->sighand;
1590 int stop_count = -1;
1592 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1594 if (sig->group_stop_count > 0) {
1596 * There is a group stop in progress. We don't need to
1597 * start another one.
1599 signr = sig->group_exit_code;
1600 stop_count = --sig->group_stop_count;
1601 current->exit_code = signr;
1602 set_current_state(TASK_STOPPED);
1603 spin_unlock_irq(&sighand->siglock);
1605 else if (thread_group_empty(current)) {
1607 * Lock must be held through transition to stopped state.
1609 current->exit_code = signr;
1610 set_current_state(TASK_STOPPED);
1611 spin_unlock_irq(&sighand->siglock);
1615 * There is no group stop already in progress.
1616 * We must initiate one now, but that requires
1617 * dropping siglock to get both the tasklist lock
1618 * and siglock again in the proper order. Note that
1619 * this allows an intervening SIGCONT to be posted.
1620 * We need to check for that and bail out if necessary.
1622 struct task_struct *t;
1624 spin_unlock_irq(&sighand->siglock);
1626 /* signals can be posted during this window */
1628 read_lock(&tasklist_lock);
1629 spin_lock_irq(&sighand->siglock);
1631 if (unlikely(sig->group_exit)) {
1633 * There is a group exit in progress now.
1634 * We'll just ignore the stop and process the
1635 * associated fatal signal.
1637 spin_unlock_irq(&sighand->siglock);
1638 read_unlock(&tasklist_lock);
1642 if (unlikely(sig_avoid_stop_race())) {
1644 * Either a SIGCONT or a SIGKILL signal was
1645 * posted in the siglock-not-held window.
1647 spin_unlock_irq(&sighand->siglock);
1648 read_unlock(&tasklist_lock);
1652 if (sig->group_stop_count == 0) {
1653 sig->group_exit_code = signr;
1655 for (t = next_thread(current); t != current;
1658 * Setting state to TASK_STOPPED for a group
1659 * stop is always done with the siglock held,
1660 * so this check has no races.
1662 if (t->state < TASK_STOPPED) {
1664 signal_wake_up(t, 0);
1666 sig->group_stop_count = stop_count;
1669 /* A race with another thread while unlocked. */
1670 signr = sig->group_exit_code;
1671 stop_count = --sig->group_stop_count;
1674 current->exit_code = signr;
1675 set_current_state(TASK_STOPPED);
1677 spin_unlock_irq(&sighand->siglock);
1678 read_unlock(&tasklist_lock);
1681 finish_stop(stop_count);
1685 * Do appropriate magic when group_stop_count > 0.
1686 * We return nonzero if we stopped, after releasing the siglock.
1687 * We return zero if we still hold the siglock and should look
1688 * for another signal without checking group_stop_count again.
1690 static inline int handle_group_stop(void)
1694 if (current->signal->group_exit_task == current) {
1696 * Group stop is so we can do a core dump,
1697 * We are the initiating thread, so get on with it.
1699 current->signal->group_exit_task = NULL;
1703 if (current->signal->group_exit)
1705 * Group stop is so another thread can do a core dump,
1706 * or else we are racing against a death signal.
1707 * Just punt the stop so we can get the next signal.
1712 * There is a group stop in progress. We stop
1713 * without any associated signal being in our queue.
1715 stop_count = --current->signal->group_stop_count;
1716 current->exit_code = current->signal->group_exit_code;
1717 set_current_state(TASK_STOPPED);
1718 spin_unlock_irq(¤t->sighand->siglock);
1719 finish_stop(stop_count);
1723 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1725 sigset_t *mask = ¤t->blocked;
1729 spin_lock_irq(¤t->sighand->siglock);
1731 struct k_sigaction *ka;
1733 if (unlikely(current->signal->group_stop_count > 0) &&
1734 handle_group_stop())
1737 signr = dequeue_signal(current, mask, info);
1740 break; /* will return 0 */
1742 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1743 ptrace_signal_deliver(regs, cookie);
1746 * If there is a group stop in progress,
1747 * we must participate in the bookkeeping.
1749 if (current->signal->group_stop_count > 0)
1750 --current->signal->group_stop_count;
1752 /* Let the debugger run. */
1753 current->exit_code = signr;
1754 current->last_siginfo = info;
1755 set_current_state(TASK_STOPPED);
1756 spin_unlock_irq(¤t->sighand->siglock);
1757 notify_parent(current, SIGCHLD);
1760 current->last_siginfo = NULL;
1762 /* We're back. Did the debugger cancel the sig? */
1763 spin_lock_irq(¤t->sighand->siglock);
1764 signr = current->exit_code;
1768 current->exit_code = 0;
1770 /* Update the siginfo structure if the signal has
1771 changed. If the debugger wanted something
1772 specific in the siginfo structure then it should
1773 have updated *info via PTRACE_SETSIGINFO. */
1774 if (signr != info->si_signo) {
1775 info->si_signo = signr;
1777 info->si_code = SI_USER;
1778 info->si_pid = current->parent->pid;
1779 info->si_uid = current->parent->uid;
1782 /* If the (new) signal is now blocked, requeue it. */
1783 if (sigismember(¤t->blocked, signr)) {
1784 specific_send_sig_info(signr, info, current);
1789 ka = ¤t->sighand->action[signr-1];
1790 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1792 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1793 break; /* will return non-zero "signr" value */
1796 * Now we are doing the default action for this signal.
1798 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1801 /* Init gets no signals it doesn't want. */
1802 if (current->pid == 1)
1805 if (sig_kernel_stop(signr)) {
1807 * The default action is to stop all threads in
1808 * the thread group. The job control signals
1809 * do nothing in an orphaned pgrp, but SIGSTOP
1810 * always works. Note that siglock needs to be
1811 * dropped during the call to is_orphaned_pgrp()
1812 * because of lock ordering with tasklist_lock.
1813 * This allows an intervening SIGCONT to be posted.
1814 * We need to check for that and bail out if necessary.
1816 if (signr == SIGSTOP) {
1817 do_signal_stop(signr); /* releases siglock */
1820 spin_unlock_irq(¤t->sighand->siglock);
1822 /* signals can be posted during this window */
1824 if (is_orphaned_pgrp(process_group(current)))
1827 spin_lock_irq(¤t->sighand->siglock);
1828 if (unlikely(sig_avoid_stop_race())) {
1830 * Either a SIGCONT or a SIGKILL signal was
1831 * posted in the siglock-not-held window.
1836 do_signal_stop(signr); /* releases siglock */
1840 spin_unlock_irq(¤t->sighand->siglock);
1843 * Anything else is fatal, maybe with a core dump.
1845 current->flags |= PF_SIGNALED;
1846 if (sig_kernel_coredump(signr) &&
1847 do_coredump((long)signr, signr, regs)) {
1849 * That killed all other threads in the group and
1850 * synchronized with their demise, so there can't
1851 * be any more left to kill now. The group_exit
1852 * flags are set by do_coredump. Note that
1853 * thread_group_empty won't always be true yet,
1854 * because those threads were blocked in __exit_mm
1855 * and we just let them go to finish dying.
1857 const int code = signr | 0x80;
1858 BUG_ON(!current->signal->group_exit);
1859 BUG_ON(current->signal->group_exit_code != code);
1865 * Death signals, no core dump.
1867 do_group_exit(signr);
1870 spin_unlock_irq(¤t->sighand->siglock);
1876 EXPORT_SYMBOL(recalc_sigpending);
1877 EXPORT_SYMBOL_GPL(dequeue_signal);
1878 EXPORT_SYMBOL(flush_signals);
1879 EXPORT_SYMBOL(force_sig);
1880 EXPORT_SYMBOL(force_sig_info);
1881 EXPORT_SYMBOL(kill_pg);
1882 EXPORT_SYMBOL(kill_pg_info);
1883 EXPORT_SYMBOL(kill_proc);
1884 EXPORT_SYMBOL(kill_proc_info);
1885 EXPORT_SYMBOL(kill_sl);
1886 EXPORT_SYMBOL(kill_sl_info);
1887 EXPORT_SYMBOL(notify_parent);
1888 EXPORT_SYMBOL(send_sig);
1889 EXPORT_SYMBOL(send_sig_info);
1890 EXPORT_SYMBOL(send_group_sig_info);
1891 EXPORT_SYMBOL(sigqueue_alloc);
1892 EXPORT_SYMBOL(sigqueue_free);
1893 EXPORT_SYMBOL(send_sigqueue);
1894 EXPORT_SYMBOL(send_group_sigqueue);
1895 EXPORT_SYMBOL(sigprocmask);
1896 EXPORT_SYMBOL(block_all_signals);
1897 EXPORT_SYMBOL(unblock_all_signals);
1901 * System call entry points.
1904 asmlinkage long sys_restart_syscall(void)
1906 struct restart_block *restart = ¤t_thread_info()->restart_block;
1907 return restart->fn(restart);
1910 long do_no_restart_syscall(struct restart_block *param)
1916 * We don't need to get the kernel lock - this is all local to this
1917 * particular thread.. (and that's good, because this is _heavily_
1918 * used by various programs)
1922 * This is also useful for kernel threads that want to temporarily
1923 * (or permanently) block certain signals.
1925 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1926 * interface happily blocks "unblockable" signals like SIGKILL
1929 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1934 spin_lock_irq(¤t->sighand->siglock);
1935 old_block = current->blocked;
1939 sigorsets(¤t->blocked, ¤t->blocked, set);
1942 signandsets(¤t->blocked, ¤t->blocked, set);
1945 current->blocked = *set;
1950 recalc_sigpending();
1951 spin_unlock_irq(¤t->sighand->siglock);
1953 *oldset = old_block;
1958 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1960 int error = -EINVAL;
1961 sigset_t old_set, new_set;
1963 /* XXX: Don't preclude handling different sized sigset_t's. */
1964 if (sigsetsize != sizeof(sigset_t))
1969 if (copy_from_user(&new_set, set, sizeof(*set)))
1971 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1973 error = sigprocmask(how, &new_set, &old_set);
1979 spin_lock_irq(¤t->sighand->siglock);
1980 old_set = current->blocked;
1981 spin_unlock_irq(¤t->sighand->siglock);
1985 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1993 long do_sigpending(void __user *set, unsigned long sigsetsize)
1995 long error = -EINVAL;
1998 if (sigsetsize > sizeof(sigset_t))
2001 spin_lock_irq(¤t->sighand->siglock);
2002 sigorsets(&pending, ¤t->pending.signal,
2003 ¤t->signal->shared_pending.signal);
2004 spin_unlock_irq(¤t->sighand->siglock);
2006 /* Outside the lock because only this thread touches it. */
2007 sigandsets(&pending, ¤t->blocked, &pending);
2010 if (!copy_to_user(set, &pending, sigsetsize))
2018 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2020 return do_sigpending(set, sigsetsize);
2023 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2025 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2029 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2031 if (from->si_code < 0)
2032 return __copy_to_user(to, from, sizeof(siginfo_t))
2035 * If you change siginfo_t structure, please be sure
2036 * this code is fixed accordingly.
2037 * It should never copy any pad contained in the structure
2038 * to avoid security leaks, but must copy the generic
2039 * 3 ints plus the relevant union member.
2041 err = __put_user(from->si_signo, &to->si_signo);
2042 err |= __put_user(from->si_errno, &to->si_errno);
2043 err |= __put_user((short)from->si_code, &to->si_code);
2044 switch (from->si_code & __SI_MASK) {
2046 err |= __put_user(from->si_pid, &to->si_pid);
2047 err |= __put_user(from->si_uid, &to->si_uid);
2050 err |= __put_user(from->si_tid, &to->si_tid);
2051 err |= __put_user(from->si_overrun, &to->si_overrun);
2052 err |= __put_user(from->si_ptr, &to->si_ptr);
2055 err |= __put_user(from->si_band, &to->si_band);
2056 err |= __put_user(from->si_fd, &to->si_fd);
2059 err |= __put_user(from->si_addr, &to->si_addr);
2060 #ifdef __ARCH_SI_TRAPNO
2061 err |= __put_user(from->si_trapno, &to->si_trapno);
2065 err |= __put_user(from->si_pid, &to->si_pid);
2066 err |= __put_user(from->si_uid, &to->si_uid);
2067 err |= __put_user(from->si_status, &to->si_status);
2068 err |= __put_user(from->si_utime, &to->si_utime);
2069 err |= __put_user(from->si_stime, &to->si_stime);
2071 case __SI_RT: /* This is not generated by the kernel as of now. */
2072 case __SI_MESGQ: /* But this is */
2073 err |= __put_user(from->si_pid, &to->si_pid);
2074 err |= __put_user(from->si_uid, &to->si_uid);
2075 err |= __put_user(from->si_ptr, &to->si_ptr);
2077 default: /* this is just in case for now ... */
2078 err |= __put_user(from->si_pid, &to->si_pid);
2079 err |= __put_user(from->si_uid, &to->si_uid);
2088 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2089 siginfo_t __user *uinfo,
2090 const struct timespec __user *uts,
2099 /* XXX: Don't preclude handling different sized sigset_t's. */
2100 if (sigsetsize != sizeof(sigset_t))
2103 if (copy_from_user(&these, uthese, sizeof(these)))
2107 * Invert the set of allowed signals to get those we
2110 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2114 if (copy_from_user(&ts, uts, sizeof(ts)))
2116 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2121 spin_lock_irq(¤t->sighand->siglock);
2122 sig = dequeue_signal(current, &these, &info);
2124 timeout = MAX_SCHEDULE_TIMEOUT;
2126 timeout = (timespec_to_jiffies(&ts)
2127 + (ts.tv_sec || ts.tv_nsec));
2130 /* None ready -- temporarily unblock those we're
2131 * interested while we are sleeping in so that we'll
2132 * be awakened when they arrive. */
2133 current->real_blocked = current->blocked;
2134 sigandsets(¤t->blocked, ¤t->blocked, &these);
2135 recalc_sigpending();
2136 spin_unlock_irq(¤t->sighand->siglock);
2138 current->state = TASK_INTERRUPTIBLE;
2139 timeout = schedule_timeout(timeout);
2141 spin_lock_irq(¤t->sighand->siglock);
2142 sig = dequeue_signal(current, &these, &info);
2143 current->blocked = current->real_blocked;
2144 siginitset(¤t->real_blocked, 0);
2145 recalc_sigpending();
2148 spin_unlock_irq(¤t->sighand->siglock);
2153 if (copy_siginfo_to_user(uinfo, &info))
2166 sys_kill(int pid, int sig)
2168 struct siginfo info;
2170 info.si_signo = sig;
2172 info.si_code = SI_USER;
2173 info.si_pid = current->tgid;
2174 info.si_uid = current->uid;
2176 return kill_something_info(sig, &info, pid);
2180 * sys_tkill - send signal to one specific thread
2181 * @tgid: the thread group ID of the thread
2182 * @pid: the PID of the thread
2183 * @sig: signal to be sent
2185 * This syscall also checks the tgid and returns -ESRCH even if the PID
2186 * exists but it's not belonging to the target process anymore. This
2187 * method solves the problem of threads exiting and PIDs getting reused.
2189 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2191 struct siginfo info;
2193 struct task_struct *p;
2195 /* This is only valid for single tasks */
2196 if (pid <= 0 || tgid <= 0)
2199 info.si_signo = sig;
2201 info.si_code = SI_TKILL;
2202 info.si_pid = current->tgid;
2203 info.si_uid = current->uid;
2205 read_lock(&tasklist_lock);
2206 p = find_task_by_pid(pid);
2208 if (p && (p->tgid == tgid)) {
2209 error = check_kill_permission(sig, &info, p);
2211 * The null signal is a permissions and process existence
2212 * probe. No signal is actually delivered.
2214 if (!error && sig && p->sighand) {
2215 spin_lock_irq(&p->sighand->siglock);
2216 handle_stop_signal(sig, p);
2217 error = specific_send_sig_info(sig, &info, p);
2218 spin_unlock_irq(&p->sighand->siglock);
2221 read_unlock(&tasklist_lock);
2226 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2229 sys_tkill(int pid, int sig)
2231 struct siginfo info;
2233 struct task_struct *p;
2235 /* This is only valid for single tasks */
2239 info.si_signo = sig;
2241 info.si_code = SI_TKILL;
2242 info.si_pid = current->tgid;
2243 info.si_uid = current->uid;
2245 read_lock(&tasklist_lock);
2246 p = find_task_by_pid(pid);
2249 error = check_kill_permission(sig, &info, p);
2251 * The null signal is a permissions and process existence
2252 * probe. No signal is actually delivered.
2254 if (!error && sig && p->sighand) {
2255 spin_lock_irq(&p->sighand->siglock);
2256 handle_stop_signal(sig, p);
2257 error = specific_send_sig_info(sig, &info, p);
2258 spin_unlock_irq(&p->sighand->siglock);
2261 read_unlock(&tasklist_lock);
2266 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2270 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2273 /* Not even root can pretend to send signals from the kernel.
2274 Nor can they impersonate a kill(), which adds source info. */
2275 if (info.si_code >= 0)
2277 info.si_signo = sig;
2279 /* POSIX.1b doesn't mention process groups. */
2280 return kill_proc_info(sig, &info, pid);
2284 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2286 struct k_sigaction *k;
2288 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2291 k = ¤t->sighand->action[sig-1];
2293 spin_lock_irq(¤t->sighand->siglock);
2294 if (signal_pending(current)) {
2296 * If there might be a fatal signal pending on multiple
2297 * threads, make sure we take it before changing the action.
2299 spin_unlock_irq(¤t->sighand->siglock);
2300 return -ERESTARTNOINTR;
2309 * "Setting a signal action to SIG_IGN for a signal that is
2310 * pending shall cause the pending signal to be discarded,
2311 * whether or not it is blocked."
2313 * "Setting a signal action to SIG_DFL for a signal that is
2314 * pending and whose default action is to ignore the signal
2315 * (for example, SIGCHLD), shall cause the pending signal to
2316 * be discarded, whether or not it is blocked"
2318 if (act->sa.sa_handler == SIG_IGN ||
2319 (act->sa.sa_handler == SIG_DFL &&
2320 sig_kernel_ignore(sig))) {
2322 * This is a fairly rare case, so we only take the
2323 * tasklist_lock once we're sure we'll need it.
2324 * Now we must do this little unlock and relock
2325 * dance to maintain the lock hierarchy.
2327 struct task_struct *t = current;
2328 spin_unlock_irq(&t->sighand->siglock);
2329 read_lock(&tasklist_lock);
2330 spin_lock_irq(&t->sighand->siglock);
2332 sigdelsetmask(&k->sa.sa_mask,
2333 sigmask(SIGKILL) | sigmask(SIGSTOP));
2334 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2336 rm_from_queue(sigmask(sig), &t->pending);
2337 recalc_sigpending_tsk(t);
2339 } while (t != current);
2340 spin_unlock_irq(¤t->sighand->siglock);
2341 read_unlock(&tasklist_lock);
2346 sigdelsetmask(&k->sa.sa_mask,
2347 sigmask(SIGKILL) | sigmask(SIGSTOP));
2350 spin_unlock_irq(¤t->sighand->siglock);
2355 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2361 oss.ss_sp = (void *) current->sas_ss_sp;
2362 oss.ss_size = current->sas_ss_size;
2363 oss.ss_flags = sas_ss_flags(sp);
2372 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2373 || __get_user(ss_sp, &uss->ss_sp)
2374 || __get_user(ss_flags, &uss->ss_flags)
2375 || __get_user(ss_size, &uss->ss_size))
2379 if (on_sig_stack(sp))
2385 * Note - this code used to test ss_flags incorrectly
2386 * old code may have been written using ss_flags==0
2387 * to mean ss_flags==SS_ONSTACK (as this was the only
2388 * way that worked) - this fix preserves that older
2391 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2394 if (ss_flags == SS_DISABLE) {
2399 if (ss_size < MINSIGSTKSZ)
2403 current->sas_ss_sp = (unsigned long) ss_sp;
2404 current->sas_ss_size = ss_size;
2409 if (copy_to_user(uoss, &oss, sizeof(oss)))
2419 sys_sigpending(old_sigset_t __user *set)
2421 return do_sigpending(set, sizeof(*set));
2424 #if !defined(__alpha__)
2425 /* Alpha has its own versions with special arguments. */
2428 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2431 old_sigset_t old_set, new_set;
2435 if (copy_from_user(&new_set, set, sizeof(*set)))
2437 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2439 spin_lock_irq(¤t->sighand->siglock);
2440 old_set = current->blocked.sig[0];
2448 sigaddsetmask(¤t->blocked, new_set);
2451 sigdelsetmask(¤t->blocked, new_set);
2454 current->blocked.sig[0] = new_set;
2458 recalc_sigpending();
2459 spin_unlock_irq(¤t->sighand->siglock);
2465 old_set = current->blocked.sig[0];
2468 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2478 sys_rt_sigaction(int sig,
2479 const struct sigaction __user *act,
2480 struct sigaction __user *oact,
2483 struct k_sigaction new_sa, old_sa;
2486 /* XXX: Don't preclude handling different sized sigset_t's. */
2487 if (sigsetsize != sizeof(sigset_t))
2491 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2495 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2498 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2504 #endif /* __sparc__ */
2507 #if !defined(__alpha__) && !defined(__ia64__) && \
2508 !defined(__arm__) && !defined(__s390__)
2510 * For backwards compatibility. Functionality superseded by sigprocmask.
2516 return current->blocked.sig[0];
2520 sys_ssetmask(int newmask)
2524 spin_lock_irq(¤t->sighand->siglock);
2525 old = current->blocked.sig[0];
2527 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2529 recalc_sigpending();
2530 spin_unlock_irq(¤t->sighand->siglock);
2534 #endif /* !defined(__alpha__) */
2536 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
2539 * For backwards compatibility. Functionality superseded by sigaction.
2541 asmlinkage unsigned long
2542 sys_signal(int sig, __sighandler_t handler)
2544 struct k_sigaction new_sa, old_sa;
2547 new_sa.sa.sa_handler = handler;
2548 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2550 ret = do_sigaction(sig, &new_sa, &old_sa);
2552 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2554 #endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
2556 #ifndef HAVE_ARCH_SYS_PAUSE
2561 current->state = TASK_INTERRUPTIBLE;
2563 return -ERESTARTNOHAND;
2566 #endif /* HAVE_ARCH_SYS_PAUSE */
2568 void __init signals_init(void)
2571 kmem_cache_create("sigqueue",
2572 sizeof(struct sigqueue),
2573 __alignof__(struct sigqueue),
2575 if (!sigqueue_cachep)
2576 panic("signals_init(): cannot create sigqueue SLAB cache");