2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
36 * In POSIX a signal is sent either to a specific thread (Linux task)
37 * or to the process as a whole (Linux thread group). How the signal
38 * is sent determines whether it's to one thread or the whole group,
39 * which determines which signal mask(s) are involved in blocking it
40 * from being delivered until later. When the signal is delivered,
41 * either it's caught or ignored by a user handler or it has a default
42 * effect that applies to the whole thread group (POSIX process).
44 * The possible effects an unblocked signal set to SIG_DFL can have are:
45 * ignore - Nothing Happens
46 * terminate - kill the process, i.e. all threads in the group,
47 * similar to exit_group. The group leader (only) reports
48 * WIFSIGNALED status to its parent.
49 * coredump - write a core dump file describing all threads using
50 * the same mm and then kill all those threads
51 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
53 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
54 * Other signals when not blocked and set to SIG_DFL behaves as follows.
55 * The job control signals also have other special effects.
57 * +--------------------+------------------+
58 * | POSIX signal | default action |
59 * +--------------------+------------------+
60 * | SIGHUP | terminate |
61 * | SIGINT | terminate |
62 * | SIGQUIT | coredump |
63 * | SIGILL | coredump |
64 * | SIGTRAP | coredump |
65 * | SIGABRT/SIGIOT | coredump |
66 * | SIGBUS | coredump |
67 * | SIGFPE | coredump |
68 * | SIGKILL | terminate(+) |
69 * | SIGUSR1 | terminate |
70 * | SIGSEGV | coredump |
71 * | SIGUSR2 | terminate |
72 * | SIGPIPE | terminate |
73 * | SIGALRM | terminate |
74 * | SIGTERM | terminate |
75 * | SIGCHLD | ignore |
76 * | SIGCONT | ignore(*) |
77 * | SIGSTOP | stop(*)(+) |
78 * | SIGTSTP | stop(*) |
79 * | SIGTTIN | stop(*) |
80 * | SIGTTOU | stop(*) |
82 * | SIGXCPU | coredump |
83 * | SIGXFSZ | coredump |
84 * | SIGVTALRM | terminate |
85 * | SIGPROF | terminate |
86 * | SIGPOLL/SIGIO | terminate |
87 * | SIGSYS/SIGUNUSED | coredump |
88 * | SIGSTKFLT | terminate |
89 * | SIGWINCH | ignore |
90 * | SIGPWR | terminate |
91 * | SIGRTMIN-SIGRTMAX | terminate |
92 * +--------------------+------------------+
93 * | non-POSIX signal | default action |
94 * +--------------------+------------------+
95 * | SIGEMT | coredump |
96 * +--------------------+------------------+
98 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
99 * (*) Special job control effects:
100 * When SIGCONT is sent, it resumes the process (all threads in the group)
101 * from TASK_STOPPED state and also clears any pending/queued stop signals
102 * (any of those marked with "stop(*)"). This happens regardless of blocking,
103 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
104 * any pending/queued SIGCONT signals; this happens regardless of blocking,
105 * catching, or ignored the stop signal, though (except for SIGSTOP) the
106 * default action of stopping the process may happen later or never.
110 #define M_SIGEMT M(SIGEMT)
115 #if SIGRTMIN > BITS_PER_LONG
116 #define M(sig) (1ULL << ((sig)-1))
118 #define M(sig) (1UL << ((sig)-1))
120 #define T(sig, mask) (M(sig) & (mask))
122 #define SIG_KERNEL_ONLY_MASK (\
123 M(SIGKILL) | M(SIGSTOP) )
125 #define SIG_KERNEL_STOP_MASK (\
126 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
128 #define SIG_KERNEL_COREDUMP_MASK (\
129 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
130 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
131 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
133 #define SIG_KERNEL_IGNORE_MASK (\
134 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
136 #define sig_kernel_only(sig) \
137 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
138 #define sig_kernel_coredump(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
140 #define sig_kernel_ignore(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
142 #define sig_kernel_stop(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
145 #define sig_user_defined(t, signr) \
146 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
147 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
149 #define sig_fatal(t, signr) \
150 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
151 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
153 #define sig_avoid_stop_race() \
154 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
155 sigtestsetmask(¤t->signal->shared_pending.signal, \
156 M(SIGCONT) | M(SIGKILL)))
158 static int sig_ignored(struct task_struct *t, int sig)
160 void __user * handler;
163 * Tracers always want to know about signals..
165 if (t->ptrace & PT_PTRACED)
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 return handler == SIG_IGN ||
179 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 * Re-calculate pending state from the set of locally pending
184 * signals, globally pending signals, and blocked signals.
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
191 switch (_NSIG_WORDS) {
193 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 ready |= signal->sig[i] &~ blocked->sig[i];
197 case 4: ready = signal->sig[3] &~ blocked->sig[3];
198 ready |= signal->sig[2] &~ blocked->sig[2];
199 ready |= signal->sig[1] &~ blocked->sig[1];
200 ready |= signal->sig[0] &~ blocked->sig[0];
203 case 2: ready = signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 1: ready = signal->sig[0] &~ blocked->sig[0];
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(void)
267 struct sigqueue *q = NULL;
269 if (atomic_read(¤t->user->sigpending) <
270 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
271 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
273 INIT_LIST_HEAD(&q->list);
276 q->user = get_uid(current->user);
277 atomic_inc(&q->user->sigpending);
282 static inline void __sigqueue_free(struct sigqueue *q)
284 if (q->flags & SIGQUEUE_PREALLOC)
286 atomic_dec(&q->user->sigpending);
288 kmem_cache_free(sigqueue_cachep, q);
291 static void flush_sigqueue(struct sigpending *queue)
295 sigemptyset(&queue->signal);
296 while (!list_empty(&queue->list)) {
297 q = list_entry(queue->list.next, struct sigqueue , list);
298 list_del_init(&q->list);
304 * Flush all pending signals for a task.
308 flush_signals(struct task_struct *t)
312 spin_lock_irqsave(&t->sighand->siglock, flags);
313 clear_tsk_thread_flag(t,TIF_SIGPENDING);
314 flush_sigqueue(&t->pending);
315 flush_sigqueue(&t->signal->shared_pending);
316 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 * This function expects the tasklist_lock write-locked.
322 void __exit_sighand(struct task_struct *tsk)
324 struct sighand_struct * sighand = tsk->sighand;
326 /* Ok, we're done with the signal handlers */
328 if (atomic_dec_and_test(&sighand->count))
329 kmem_cache_free(sighand_cachep, sighand);
332 void exit_sighand(struct task_struct *tsk)
334 write_lock_irq(&tasklist_lock);
336 write_unlock_irq(&tasklist_lock);
340 * This function expects the tasklist_lock write-locked.
342 void __exit_signal(struct task_struct *tsk)
344 struct signal_struct * sig = tsk->signal;
345 struct sighand_struct * sighand = tsk->sighand;
349 if (!atomic_read(&sig->count))
351 spin_lock(&sighand->siglock);
352 if (atomic_dec_and_test(&sig->count)) {
353 if (tsk == sig->curr_target)
354 sig->curr_target = next_thread(tsk);
356 spin_unlock(&sighand->siglock);
357 flush_sigqueue(&sig->shared_pending);
360 * If there is any task waiting for the group exit
363 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
364 wake_up_process(sig->group_exit_task);
365 sig->group_exit_task = NULL;
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
370 spin_unlock(&sighand->siglock);
371 sig = NULL; /* Marker for below. */
373 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
374 flush_sigqueue(&tsk->pending);
377 * We are cleaning up the signal_struct here. We delayed
378 * calling exit_itimers until after flush_sigqueue, just in
379 * case our thread-local pending queue contained a queued
380 * timer signal that would have been cleared in
381 * exit_itimers. When that called sigqueue_free, it would
382 * attempt to re-take the tasklist_lock and deadlock. This
383 * can never happen if we ensure that all queues the
384 * timer's signal might be queued on have been flushed
385 * first. The shared_pending queue, and our own pending
386 * queue are the only queues the timer could be on, since
387 * there are no other threads left in the group and timer
388 * signals are constrained to threads inside the group.
391 kmem_cache_free(signal_cachep, sig);
395 void exit_signal(struct task_struct *tsk)
397 write_lock_irq(&tasklist_lock);
399 write_unlock_irq(&tasklist_lock);
403 * Flush all handlers for a task.
407 flush_signal_handlers(struct task_struct *t, int force_default)
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
415 sigemptyset(&ka->sa.sa_mask);
420 EXPORT_SYMBOL_GPL(flush_signal_handlers);
422 /* Notify the system that a driver wants to block all signals for this
423 * process, and wants to be notified if any signals at all were to be
424 * sent/acted upon. If the notifier routine returns non-zero, then the
425 * signal will be acted upon after all. If the notifier routine returns 0,
426 * then then signal will be blocked. Only one block per process is
427 * allowed. priv is a pointer to private data that the notifier routine
428 * can use to determine if the signal should be blocked or not. */
431 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
435 spin_lock_irqsave(¤t->sighand->siglock, flags);
436 current->notifier_mask = mask;
437 current->notifier_data = priv;
438 current->notifier = notifier;
439 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
442 /* Notify the system that blocking has ended. */
445 unblock_all_signals(void)
449 spin_lock_irqsave(¤t->sighand->siglock, flags);
450 current->notifier = NULL;
451 current->notifier_data = NULL;
453 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
456 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
458 struct sigqueue *q, *first = NULL;
459 int still_pending = 0;
461 if (unlikely(!sigismember(&list->signal, sig)))
465 * Collect the siginfo appropriate to this signal. Check if
466 * there is another siginfo for the same signal.
468 list_for_each_entry(q, &list->list, list) {
469 if (q->info.si_signo == sig) {
478 list_del_init(&first->list);
479 copy_siginfo(info, &first->info);
480 __sigqueue_free(first);
482 sigdelset(&list->signal, sig);
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
489 sigdelset(&list->signal, sig);
490 info->si_signo = sig;
499 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
504 sig = next_signal(pending, mask);
506 if (current->notifier) {
507 if (sigismember(current->notifier_mask, sig)) {
508 if (!(current->notifier)(current->notifier_data)) {
509 clear_thread_flag(TIF_SIGPENDING);
515 if (!collect_signal(sig, pending, info))
525 * Dequeue a signal and return the element to the caller, which is
526 * expected to free it.
528 * All callers have to hold the siglock.
530 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
532 int signr = __dequeue_signal(&tsk->pending, mask, info);
534 signr = __dequeue_signal(&tsk->signal->shared_pending,
537 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
538 info->si_sys_private){
539 do_schedule_next_timer(info);
545 * Tell a process that it has a new active signal..
547 * NOTE! we rely on the previous spin_lock to
548 * lock interrupts for us! We can only be called with
549 * "siglock" held, and the local interrupt must
550 * have been disabled when that got acquired!
552 * No need to set need_resched since signal event passing
553 * goes through ->blocked
555 void signal_wake_up(struct task_struct *t, int resume)
559 set_tsk_thread_flag(t, TIF_SIGPENDING);
562 * If resume is set, we want to wake it up in the TASK_STOPPED case.
563 * We don't check for TASK_STOPPED because there is a race with it
564 * executing another processor and just now entering stopped state.
565 * By calling wake_up_process any time resume is set, we ensure
566 * the process will wake up and handle its stop or death signal.
568 mask = TASK_INTERRUPTIBLE;
570 mask |= TASK_STOPPED;
571 if (!wake_up_state(t, mask))
576 * Remove signals in mask from the pending set and queue.
577 * Returns 1 if any signals were found.
579 * All callers must be holding the siglock.
581 static int rm_from_queue(unsigned long mask, struct sigpending *s)
583 struct sigqueue *q, *n;
585 if (!sigtestsetmask(&s->signal, mask))
588 sigdelsetmask(&s->signal, mask);
589 list_for_each_entry_safe(q, n, &s->list, list) {
590 if (q->info.si_signo < SIGRTMIN &&
591 (mask & sigmask(q->info.si_signo))) {
592 list_del_init(&q->list);
600 * Bad permissions for sending the signal
602 static int check_kill_permission(int sig, struct siginfo *info,
603 struct task_struct *t)
608 if (sig < 0 || sig > _NSIG)
612 (info != SEND_SIG_PRIV &&
613 info != SEND_SIG_FORCED &&
617 if (user && (sig != SIGCONT ||
618 current->signal->session != t->signal->session)
619 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
620 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
621 && !capable(CAP_KILL))
625 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
628 return security_task_kill(t, info, sig);
632 static void do_notify_parent_cldstop(struct task_struct *tsk,
633 struct task_struct *parent);
636 * Handle magic process-wide effects of stop/continue signals.
637 * Unlike the signal actions, these happen immediately at signal-generation
638 * time regardless of blocking, ignoring, or handling. This does the
639 * actual continuing for SIGCONT, but not the actual stopping for stop
640 * signals. The process stop is done as a signal action for SIG_DFL.
642 static void handle_stop_signal(int sig, struct task_struct *p)
644 struct task_struct *t;
646 if (sig_kernel_stop(sig)) {
648 * This is a stop signal. Remove SIGCONT from all queues.
650 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
653 rm_from_queue(sigmask(SIGCONT), &t->pending);
656 } else if (sig == SIGCONT) {
658 * Remove all stop signals from all queues,
659 * and wake all threads.
661 if (unlikely(p->signal->group_stop_count > 0)) {
663 * There was a group stop in progress. We'll
664 * pretend it finished before we got here. We are
665 * obliged to report it to the parent: if the
666 * SIGSTOP happened "after" this SIGCONT, then it
667 * would have cleared this pending SIGCONT. If it
668 * happened "before" this SIGCONT, then the parent
669 * got the SIGCHLD about the stop finishing before
670 * the continue happened. We do the notification
671 * now, and it's as if the stop had finished and
672 * the SIGCHLD was pending on entry to this kill.
674 p->signal->group_stop_count = 0;
675 if (p->ptrace & PT_PTRACED)
676 do_notify_parent_cldstop(p, p->parent);
678 do_notify_parent_cldstop(
680 p->group_leader->real_parent);
682 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
686 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
689 * If there is a handler for SIGCONT, we must make
690 * sure that no thread returns to user mode before
691 * we post the signal, in case it was the only
692 * thread eligible to run the signal handler--then
693 * it must not do anything between resuming and
694 * running the handler. With the TIF_SIGPENDING
695 * flag set, the thread will pause and acquire the
696 * siglock that we hold now and until we've queued
697 * the pending signal.
699 * Wake up the stopped thread _after_ setting
702 state = TASK_STOPPED;
703 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
704 set_tsk_thread_flag(t, TIF_SIGPENDING);
705 state |= TASK_INTERRUPTIBLE;
707 wake_up_state(t, state);
714 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
715 struct sigpending *signals)
717 struct sigqueue * q = NULL;
721 * fast-pathed signals for kernel-internal things like SIGSTOP
724 if ((unsigned long)info == 2)
727 /* Real-time signals must be queued if sent by sigqueue, or
728 some other real-time mechanism. It is implementation
729 defined whether kill() does so. We attempt to do so, on
730 the principle of least surprise, but since kill is not
731 allowed to fail with EAGAIN when low on memory we just
732 make sure at least one signal gets delivered and don't
733 pass on the info struct. */
735 if (atomic_read(&t->user->sigpending) <
736 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
737 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
741 q->user = get_uid(t->user);
742 atomic_inc(&q->user->sigpending);
743 list_add_tail(&q->list, &signals->list);
744 switch ((unsigned long) info) {
746 q->info.si_signo = sig;
747 q->info.si_errno = 0;
748 q->info.si_code = SI_USER;
749 q->info.si_pid = current->pid;
750 q->info.si_uid = current->uid;
753 q->info.si_signo = sig;
754 q->info.si_errno = 0;
755 q->info.si_code = SI_KERNEL;
760 copy_siginfo(&q->info, info);
764 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
765 && info->si_code != SI_USER)
767 * Queue overflow, abort. We may abort if the signal was rt
768 * and sent by user using something other than kill().
771 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
773 * Set up a return to indicate that we dropped
776 ret = info->si_sys_private;
780 sigaddset(&signals->signal, sig);
784 #define LEGACY_QUEUE(sigptr, sig) \
785 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
789 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
793 if (!irqs_disabled())
796 if (!spin_is_locked(&t->sighand->siglock))
800 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
802 * Set up a return to indicate that we dropped the signal.
804 ret = info->si_sys_private;
806 /* Short-circuit ignored signals. */
807 if (sig_ignored(t, sig))
810 /* Support queueing exactly one non-rt signal, so that we
811 can get more detailed information about the cause of
813 if (LEGACY_QUEUE(&t->pending, sig))
816 ret = send_signal(sig, info, t, &t->pending);
817 if (!ret && !sigismember(&t->blocked, sig))
818 signal_wake_up(t, sig == SIGKILL);
824 * Force a signal that the process can't ignore: if necessary
825 * we unblock the signal and change any SIG_IGN to SIG_DFL.
829 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
831 unsigned long int flags;
834 spin_lock_irqsave(&t->sighand->siglock, flags);
835 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
836 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
837 sigdelset(&t->blocked, sig);
838 recalc_sigpending_tsk(t);
840 ret = specific_send_sig_info(sig, info, t);
841 spin_unlock_irqrestore(&t->sighand->siglock, flags);
847 force_sig_specific(int sig, struct task_struct *t)
849 unsigned long int flags;
851 spin_lock_irqsave(&t->sighand->siglock, flags);
852 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
853 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
854 sigdelset(&t->blocked, sig);
855 recalc_sigpending_tsk(t);
856 specific_send_sig_info(sig, (void *)2, t);
857 spin_unlock_irqrestore(&t->sighand->siglock, flags);
861 * Test if P wants to take SIG. After we've checked all threads with this,
862 * it's equivalent to finding no threads not blocking SIG. Any threads not
863 * blocking SIG were ruled out because they are not running and already
864 * have pending signals. Such threads will dequeue from the shared queue
865 * as soon as they're available, so putting the signal on the shared queue
866 * will be equivalent to sending it to one such thread.
868 #define wants_signal(sig, p, mask) \
869 (!sigismember(&(p)->blocked, sig) \
870 && !((p)->state & mask) \
871 && !((p)->flags & PF_EXITING) \
872 && (task_curr(p) || !signal_pending(p)))
876 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
878 struct task_struct *t;
881 * Now find a thread we can wake up to take the signal off the queue.
883 * If the main thread wants the signal, it gets first crack.
884 * Probably the least surprising to the average bear.
886 if (wants_signal(sig, p, mask))
888 else if (thread_group_empty(p))
890 * There is just one thread and it does not need to be woken.
891 * It will dequeue unblocked signals before it runs again.
896 * Otherwise try to find a suitable thread.
898 t = p->signal->curr_target;
900 /* restart balancing at this thread */
901 t = p->signal->curr_target = p;
902 BUG_ON(t->tgid != p->tgid);
904 while (!wants_signal(sig, t, mask)) {
906 if (t == p->signal->curr_target)
908 * No thread needs to be woken.
909 * Any eligible threads will see
910 * the signal in the queue soon.
914 p->signal->curr_target = t;
918 * Found a killable thread. If the signal will be fatal,
919 * then start taking the whole group down immediately.
921 if (sig_fatal(p, sig) && !p->signal->group_exit &&
922 !sigismember(&t->real_blocked, sig) &&
923 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
925 * This signal will be fatal to the whole group.
927 if (!sig_kernel_coredump(sig)) {
929 * Start a group exit and wake everybody up.
930 * This way we don't have other threads
931 * running and doing things after a slower
932 * thread has the fatal signal pending.
934 p->signal->group_exit = 1;
935 p->signal->group_exit_code = sig;
936 p->signal->group_stop_count = 0;
939 sigaddset(&t->pending.signal, SIGKILL);
940 signal_wake_up(t, 1);
947 * There will be a core dump. We make all threads other
948 * than the chosen one go into a group stop so that nothing
949 * happens until it gets scheduled, takes the signal off
950 * the shared queue, and does the core dump. This is a
951 * little more complicated than strictly necessary, but it
952 * keeps the signal state that winds up in the core dump
953 * unchanged from the death state, e.g. which thread had
954 * the core-dump signal unblocked.
956 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
957 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
958 p->signal->group_stop_count = 0;
959 p->signal->group_exit_task = t;
962 p->signal->group_stop_count++;
963 signal_wake_up(t, 0);
966 wake_up_process(p->signal->group_exit_task);
971 * The signal is already in the shared-pending queue.
972 * Tell the chosen thread to wake up and dequeue it.
974 signal_wake_up(t, sig == SIGKILL);
979 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
985 if (!spin_is_locked(&p->sighand->siglock))
988 handle_stop_signal(sig, p);
990 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
992 * Set up a return to indicate that we dropped the signal.
994 ret = info->si_sys_private;
996 /* Short-circuit ignored signals. */
997 if (sig_ignored(p, sig))
1000 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1001 /* This is a non-RT signal and we already have one queued. */
1005 * Don't bother zombies and stopped tasks (but
1006 * SIGKILL will punch through stopped state)
1008 mask = TASK_DEAD | TASK_ZOMBIE;
1010 mask |= TASK_STOPPED;
1013 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1014 * We always use the shared queue for process-wide signals,
1015 * to avoid several races.
1017 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1021 __group_complete_signal(sig, p, mask);
1026 * Nuke all other threads in the group.
1028 void zap_other_threads(struct task_struct *p)
1030 struct task_struct *t;
1032 p->signal->group_stop_count = 0;
1034 if (thread_group_empty(p))
1037 for (t = next_thread(p); t != p; t = next_thread(t)) {
1039 * Don't bother with already dead threads
1041 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1045 * We don't want to notify the parent, since we are
1046 * killed as part of a thread group due to another
1047 * thread doing an execve() or similar. So set the
1048 * exit signal to -1 to allow immediate reaping of
1049 * the process. But don't detach the thread group
1052 if (t != p->group_leader)
1053 t->exit_signal = -1;
1055 sigaddset(&t->pending.signal, SIGKILL);
1056 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1057 signal_wake_up(t, 1);
1062 * Must be called with the tasklist_lock held for reading!
1064 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1066 unsigned long flags;
1069 ret = check_kill_permission(sig, info, p);
1070 if (!ret && sig && p->sighand) {
1071 spin_lock_irqsave(&p->sighand->siglock, flags);
1072 ret = __group_send_sig_info(sig, info, p);
1073 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1080 * kill_pg_info() sends a signal to a process group: this is what the tty
1081 * control characters do (^C, ^Z etc)
1084 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1086 struct task_struct *p;
1087 struct list_head *l;
1089 int retval, success;
1096 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1097 int err = group_send_sig_info(sig, info, p);
1101 return success ? 0 : retval;
1105 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1109 read_lock(&tasklist_lock);
1110 retval = __kill_pg_info(sig, info, pgrp);
1111 read_unlock(&tasklist_lock);
1117 * kill_sl_info() sends a signal to the session leader: this is used
1118 * to send SIGHUP to the controlling process of a terminal when
1119 * the connection is lost.
1124 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1126 int err, retval = -EINVAL;
1128 struct list_head *l;
1129 struct task_struct *p;
1135 read_lock(&tasklist_lock);
1136 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1137 if (!p->signal->leader)
1139 err = group_send_sig_info(sig, info, p);
1143 read_unlock(&tasklist_lock);
1149 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1152 struct task_struct *p;
1154 read_lock(&tasklist_lock);
1155 p = find_task_by_pid(pid);
1158 error = group_send_sig_info(sig, info, p);
1159 read_unlock(&tasklist_lock);
1165 * kill_something_info() interprets pid in interesting ways just like kill(2).
1167 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1168 * is probably wrong. Should make it like BSD or SYSV.
1171 static int kill_something_info(int sig, struct siginfo *info, int pid)
1174 return kill_pg_info(sig, info, process_group(current));
1175 } else if (pid == -1) {
1176 int retval = 0, count = 0;
1177 struct task_struct * p;
1179 read_lock(&tasklist_lock);
1180 for_each_process(p) {
1181 if (p->pid > 1 && p->tgid != current->tgid) {
1182 int err = group_send_sig_info(sig, info, p);
1188 read_unlock(&tasklist_lock);
1189 return count ? retval : -ESRCH;
1190 } else if (pid < 0) {
1191 return kill_pg_info(sig, info, -pid);
1193 return kill_proc_info(sig, info, pid);
1198 * These are for backward compatibility with the rest of the kernel source.
1202 * These two are the most common entry points. They send a signal
1203 * just to the specific thread.
1206 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1209 unsigned long flags;
1212 * Make sure legacy kernel users don't send in bad values
1213 * (normal paths check this in check_kill_permission).
1215 if (sig < 0 || sig > _NSIG)
1219 * We need the tasklist lock even for the specific
1220 * thread case (when we don't need to follow the group
1221 * lists) in order to avoid races with "p->sighand"
1222 * going away or changing from under us.
1224 read_lock(&tasklist_lock);
1225 spin_lock_irqsave(&p->sighand->siglock, flags);
1226 ret = specific_send_sig_info(sig, info, p);
1227 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1228 read_unlock(&tasklist_lock);
1233 send_sig(int sig, struct task_struct *p, int priv)
1235 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1239 * This is the entry point for "process-wide" signals.
1240 * They will go to an appropriate thread in the thread group.
1243 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1246 read_lock(&tasklist_lock);
1247 ret = group_send_sig_info(sig, info, p);
1248 read_unlock(&tasklist_lock);
1253 force_sig(int sig, struct task_struct *p)
1255 force_sig_info(sig, (void*)1L, p);
1259 kill_pg(pid_t pgrp, int sig, int priv)
1261 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1265 kill_sl(pid_t sess, int sig, int priv)
1267 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1271 kill_proc(pid_t pid, int sig, int priv)
1273 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1277 * These functions support sending signals using preallocated sigqueue
1278 * structures. This is needed "because realtime applications cannot
1279 * afford to lose notifications of asynchronous events, like timer
1280 * expirations or I/O completions". In the case of Posix Timers
1281 * we allocate the sigqueue structure from the timer_create. If this
1282 * allocation fails we are able to report the failure to the application
1283 * with an EAGAIN error.
1286 struct sigqueue *sigqueue_alloc(void)
1290 if ((q = __sigqueue_alloc()))
1291 q->flags |= SIGQUEUE_PREALLOC;
1295 void sigqueue_free(struct sigqueue *q)
1297 unsigned long flags;
1298 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1300 * If the signal is still pending remove it from the
1303 if (unlikely(!list_empty(&q->list))) {
1304 read_lock(&tasklist_lock);
1305 spin_lock_irqsave(q->lock, flags);
1306 if (!list_empty(&q->list))
1307 list_del_init(&q->list);
1308 spin_unlock_irqrestore(q->lock, flags);
1309 read_unlock(&tasklist_lock);
1311 q->flags &= ~SIGQUEUE_PREALLOC;
1316 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1318 unsigned long flags;
1322 * We need the tasklist lock even for the specific
1323 * thread case (when we don't need to follow the group
1324 * lists) in order to avoid races with "p->sighand"
1325 * going away or changing from under us.
1327 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1328 read_lock(&tasklist_lock);
1329 spin_lock_irqsave(&p->sighand->siglock, flags);
1331 if (unlikely(!list_empty(&q->list))) {
1333 * If an SI_TIMER entry is already queue just increment
1334 * the overrun count.
1336 if (q->info.si_code != SI_TIMER)
1338 q->info.si_overrun++;
1341 /* Short-circuit ignored signals. */
1342 if (sig_ignored(p, sig)) {
1347 q->lock = &p->sighand->siglock;
1348 list_add_tail(&q->list, &p->pending.list);
1349 sigaddset(&p->pending.signal, sig);
1350 if (!sigismember(&p->blocked, sig))
1351 signal_wake_up(p, sig == SIGKILL);
1354 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1355 read_unlock(&tasklist_lock);
1360 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1362 unsigned long flags;
1366 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1367 read_lock(&tasklist_lock);
1368 spin_lock_irqsave(&p->sighand->siglock, flags);
1369 handle_stop_signal(sig, p);
1371 /* Short-circuit ignored signals. */
1372 if (sig_ignored(p, sig)) {
1377 if (unlikely(!list_empty(&q->list))) {
1379 * If an SI_TIMER entry is already queue just increment
1380 * the overrun count. Other uses should not try to
1381 * send the signal multiple times.
1383 if (q->info.si_code != SI_TIMER)
1385 q->info.si_overrun++;
1389 * Don't bother zombies and stopped tasks (but
1390 * SIGKILL will punch through stopped state)
1392 mask = TASK_DEAD | TASK_ZOMBIE;
1394 mask |= TASK_STOPPED;
1397 * Put this signal on the shared-pending queue.
1398 * We always use the shared queue for process-wide signals,
1399 * to avoid several races.
1401 q->lock = &p->sighand->siglock;
1402 list_add_tail(&q->list, &p->signal->shared_pending.list);
1403 sigaddset(&p->signal->shared_pending.signal, sig);
1405 __group_complete_signal(sig, p, mask);
1407 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1408 read_unlock(&tasklist_lock);
1413 * Joy. Or not. Pthread wants us to wake up every thread
1414 * in our parent group.
1416 static void __wake_up_parent(struct task_struct *p,
1417 struct task_struct *parent)
1419 struct task_struct *tsk = parent;
1422 * Fortunately this is not necessary for thread groups:
1424 if (p->tgid == tsk->tgid) {
1425 wake_up_interruptible_sync(&tsk->wait_chldexit);
1430 wake_up_interruptible_sync(&tsk->wait_chldexit);
1431 tsk = next_thread(tsk);
1432 if (tsk->signal != parent->signal)
1434 } while (tsk != parent);
1438 * Let a parent know about a status change of a child.
1441 void do_notify_parent(struct task_struct *tsk, int sig)
1443 struct siginfo info;
1444 unsigned long flags;
1446 struct sighand_struct *psig;
1451 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1452 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1454 info.si_signo = sig;
1456 info.si_pid = tsk->pid;
1457 info.si_uid = tsk->uid;
1459 /* FIXME: find out whether or not this is supposed to be c*time. */
1460 info.si_utime = tsk->utime;
1461 info.si_stime = tsk->stime;
1463 status = tsk->exit_code & 0x7f;
1464 why = SI_KERNEL; /* shouldn't happen */
1465 switch (tsk->state) {
1467 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1468 if (tsk->ptrace & PT_PTRACED)
1475 if (tsk->exit_code & 0x80)
1477 else if (tsk->exit_code & 0x7f)
1481 status = tsk->exit_code >> 8;
1486 info.si_status = status;
1488 psig = tsk->parent->sighand;
1489 spin_lock_irqsave(&psig->siglock, flags);
1490 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1491 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1492 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1494 * We are exiting and our parent doesn't care. POSIX.1
1495 * defines special semantics for setting SIGCHLD to SIG_IGN
1496 * or setting the SA_NOCLDWAIT flag: we should be reaped
1497 * automatically and not left for our parent's wait4 call.
1498 * Rather than having the parent do it as a magic kind of
1499 * signal handler, we just set this to tell do_exit that we
1500 * can be cleaned up without becoming a zombie. Note that
1501 * we still call __wake_up_parent in this case, because a
1502 * blocked sys_wait4 might now return -ECHILD.
1504 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1505 * is implementation-defined: we do (if you don't want
1506 * it, just use SIG_IGN instead).
1508 tsk->exit_signal = -1;
1509 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1512 if (sig > 0 && sig <= _NSIG)
1513 __group_send_sig_info(sig, &info, tsk->parent);
1514 __wake_up_parent(tsk, tsk->parent);
1515 spin_unlock_irqrestore(&psig->siglock, flags);
1520 * We need the tasklist lock because it's the only
1521 * thing that protects out "parent" pointer.
1523 * exit.c calls "do_notify_parent()" directly, because
1524 * it already has the tasklist lock.
1527 notify_parent(struct task_struct *tsk, int sig)
1530 read_lock(&tasklist_lock);
1531 do_notify_parent(tsk, sig);
1532 read_unlock(&tasklist_lock);
1537 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1539 struct siginfo info;
1540 unsigned long flags;
1541 struct sighand_struct *sighand;
1543 info.si_signo = SIGCHLD;
1545 info.si_pid = tsk->pid;
1546 info.si_uid = tsk->uid;
1548 /* FIXME: find out whether or not this is supposed to be c*time. */
1549 info.si_utime = tsk->utime;
1550 info.si_stime = tsk->stime;
1552 info.si_status = tsk->exit_code & 0x7f;
1553 info.si_code = CLD_STOPPED;
1555 sighand = parent->sighand;
1556 spin_lock_irqsave(&sighand->siglock, flags);
1557 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1558 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1559 __group_send_sig_info(SIGCHLD, &info, parent);
1561 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1563 __wake_up_parent(tsk, parent);
1564 spin_unlock_irqrestore(&sighand->siglock, flags);
1567 int print_fatal_signals = 0;
1569 static void print_fatal_signal(struct pt_regs *regs, int signr)
1573 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1574 current->comm, current->pid, signr);
1577 printk("code at %08lx: ", regs->eip);
1578 for (i = 0; i < 16; i++) {
1579 __get_user(insn, (unsigned char *)(regs->eip + i));
1580 printk("%02x ", insn);
1587 static int __init setup_print_fatal_signals(char *str)
1589 get_option (&str, &print_fatal_signals);
1594 __setup("print-fatal-signals=", setup_print_fatal_signals);
1596 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1599 finish_stop(int stop_count)
1602 * If there are no other threads in the group, or if there is
1603 * a group stop in progress and we are the last to stop,
1604 * report to the parent. When ptraced, every thread reports itself.
1606 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1607 read_lock(&tasklist_lock);
1608 do_notify_parent_cldstop(current, current->parent);
1609 read_unlock(&tasklist_lock);
1611 else if (stop_count == 0) {
1612 read_lock(&tasklist_lock);
1613 do_notify_parent_cldstop(current->group_leader,
1614 current->group_leader->real_parent);
1615 read_unlock(&tasklist_lock);
1620 * Now we don't run again until continued.
1622 current->exit_code = 0;
1626 * This performs the stopping for SIGSTOP and other stop signals.
1627 * We have to stop all threads in the thread group.
1630 do_signal_stop(int signr)
1632 struct signal_struct *sig = current->signal;
1633 struct sighand_struct *sighand = current->sighand;
1634 int stop_count = -1;
1636 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1638 if (sig->group_stop_count > 0) {
1640 * There is a group stop in progress. We don't need to
1641 * start another one.
1643 signr = sig->group_exit_code;
1644 stop_count = --sig->group_stop_count;
1645 current->exit_code = signr;
1646 set_current_state(TASK_STOPPED);
1647 spin_unlock_irq(&sighand->siglock);
1649 else if (thread_group_empty(current)) {
1651 * Lock must be held through transition to stopped state.
1653 current->exit_code = signr;
1654 set_current_state(TASK_STOPPED);
1655 spin_unlock_irq(&sighand->siglock);
1659 * There is no group stop already in progress.
1660 * We must initiate one now, but that requires
1661 * dropping siglock to get both the tasklist lock
1662 * and siglock again in the proper order. Note that
1663 * this allows an intervening SIGCONT to be posted.
1664 * We need to check for that and bail out if necessary.
1666 struct task_struct *t;
1668 spin_unlock_irq(&sighand->siglock);
1670 /* signals can be posted during this window */
1672 read_lock(&tasklist_lock);
1673 spin_lock_irq(&sighand->siglock);
1675 if (unlikely(sig->group_exit)) {
1677 * There is a group exit in progress now.
1678 * We'll just ignore the stop and process the
1679 * associated fatal signal.
1681 spin_unlock_irq(&sighand->siglock);
1682 read_unlock(&tasklist_lock);
1686 if (unlikely(sig_avoid_stop_race())) {
1688 * Either a SIGCONT or a SIGKILL signal was
1689 * posted in the siglock-not-held window.
1691 spin_unlock_irq(&sighand->siglock);
1692 read_unlock(&tasklist_lock);
1696 if (sig->group_stop_count == 0) {
1697 sig->group_exit_code = signr;
1699 for (t = next_thread(current); t != current;
1702 * Setting state to TASK_STOPPED for a group
1703 * stop is always done with the siglock held,
1704 * so this check has no races.
1706 if (t->state < TASK_STOPPED) {
1708 signal_wake_up(t, 0);
1710 sig->group_stop_count = stop_count;
1713 /* A race with another thread while unlocked. */
1714 signr = sig->group_exit_code;
1715 stop_count = --sig->group_stop_count;
1718 current->exit_code = signr;
1719 set_current_state(TASK_STOPPED);
1721 spin_unlock_irq(&sighand->siglock);
1722 read_unlock(&tasklist_lock);
1725 finish_stop(stop_count);
1729 * Do appropriate magic when group_stop_count > 0.
1730 * We return nonzero if we stopped, after releasing the siglock.
1731 * We return zero if we still hold the siglock and should look
1732 * for another signal without checking group_stop_count again.
1734 static inline int handle_group_stop(void)
1738 if (current->signal->group_exit_task == current) {
1740 * Group stop is so we can do a core dump,
1741 * We are the initiating thread, so get on with it.
1743 current->signal->group_exit_task = NULL;
1747 if (current->signal->group_exit)
1749 * Group stop is so another thread can do a core dump,
1750 * or else we are racing against a death signal.
1751 * Just punt the stop so we can get the next signal.
1756 * There is a group stop in progress. We stop
1757 * without any associated signal being in our queue.
1759 stop_count = --current->signal->group_stop_count;
1760 current->exit_code = current->signal->group_exit_code;
1761 set_current_state(TASK_STOPPED);
1762 spin_unlock_irq(¤t->sighand->siglock);
1763 finish_stop(stop_count);
1767 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1769 sigset_t *mask = ¤t->blocked;
1773 spin_lock_irq(¤t->sighand->siglock);
1775 struct k_sigaction *ka;
1777 if (unlikely(current->signal->group_stop_count > 0) &&
1778 handle_group_stop())
1781 signr = dequeue_signal(current, mask, info);
1784 break; /* will return 0 */
1786 if ((signr == SIGSEGV) && print_fatal_signals) {
1787 spin_unlock_irq(¤t->sighand->siglock);
1788 print_fatal_signal(regs, signr);
1789 spin_lock_irq(¤t->sighand->siglock);
1791 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1792 ptrace_signal_deliver(regs, cookie);
1795 * If there is a group stop in progress,
1796 * we must participate in the bookkeeping.
1798 if (current->signal->group_stop_count > 0)
1799 --current->signal->group_stop_count;
1801 /* Let the debugger run. */
1802 current->exit_code = signr;
1803 current->last_siginfo = info;
1804 set_current_state(TASK_STOPPED);
1805 spin_unlock_irq(¤t->sighand->siglock);
1806 notify_parent(current, SIGCHLD);
1809 current->last_siginfo = NULL;
1811 /* We're back. Did the debugger cancel the sig? */
1812 spin_lock_irq(¤t->sighand->siglock);
1813 signr = current->exit_code;
1817 current->exit_code = 0;
1819 /* Update the siginfo structure if the signal has
1820 changed. If the debugger wanted something
1821 specific in the siginfo structure then it should
1822 have updated *info via PTRACE_SETSIGINFO. */
1823 if (signr != info->si_signo) {
1824 info->si_signo = signr;
1826 info->si_code = SI_USER;
1827 info->si_pid = current->parent->pid;
1828 info->si_uid = current->parent->uid;
1831 /* If the (new) signal is now blocked, requeue it. */
1832 if (sigismember(¤t->blocked, signr)) {
1833 specific_send_sig_info(signr, info, current);
1838 ka = ¤t->sighand->action[signr-1];
1839 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1841 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1842 break; /* will return non-zero "signr" value */
1845 * Now we are doing the default action for this signal.
1847 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1850 /* Init gets no signals it doesn't want. */
1851 if (current->pid == 1)
1854 if (sig_kernel_stop(signr)) {
1856 * The default action is to stop all threads in
1857 * the thread group. The job control signals
1858 * do nothing in an orphaned pgrp, but SIGSTOP
1859 * always works. Note that siglock needs to be
1860 * dropped during the call to is_orphaned_pgrp()
1861 * because of lock ordering with tasklist_lock.
1862 * This allows an intervening SIGCONT to be posted.
1863 * We need to check for that and bail out if necessary.
1865 if (signr == SIGSTOP) {
1866 do_signal_stop(signr); /* releases siglock */
1869 spin_unlock_irq(¤t->sighand->siglock);
1871 /* signals can be posted during this window */
1873 if (is_orphaned_pgrp(process_group(current)))
1876 spin_lock_irq(¤t->sighand->siglock);
1877 if (unlikely(sig_avoid_stop_race())) {
1879 * Either a SIGCONT or a SIGKILL signal was
1880 * posted in the siglock-not-held window.
1885 do_signal_stop(signr); /* releases siglock */
1889 spin_unlock_irq(¤t->sighand->siglock);
1892 * Anything else is fatal, maybe with a core dump.
1894 current->flags |= PF_SIGNALED;
1895 if (print_fatal_signals)
1896 print_fatal_signal(regs, signr);
1897 if (sig_kernel_coredump(signr) &&
1898 do_coredump((long)signr, signr, regs)) {
1900 * That killed all other threads in the group and
1901 * synchronized with their demise, so there can't
1902 * be any more left to kill now. The group_exit
1903 * flags are set by do_coredump. Note that
1904 * thread_group_empty won't always be true yet,
1905 * because those threads were blocked in __exit_mm
1906 * and we just let them go to finish dying.
1908 const int code = signr | 0x80;
1909 BUG_ON(!current->signal->group_exit);
1910 BUG_ON(current->signal->group_exit_code != code);
1916 * Death signals, no core dump.
1918 do_group_exit(signr);
1921 spin_unlock_irq(¤t->sighand->siglock);
1927 EXPORT_SYMBOL(recalc_sigpending);
1928 EXPORT_SYMBOL_GPL(dequeue_signal);
1929 EXPORT_SYMBOL(flush_signals);
1930 EXPORT_SYMBOL(force_sig);
1931 EXPORT_SYMBOL(force_sig_info);
1932 EXPORT_SYMBOL(kill_pg);
1933 EXPORT_SYMBOL(kill_pg_info);
1934 EXPORT_SYMBOL(kill_proc);
1935 EXPORT_SYMBOL(kill_proc_info);
1936 EXPORT_SYMBOL(kill_sl);
1937 EXPORT_SYMBOL(kill_sl_info);
1938 EXPORT_SYMBOL(notify_parent);
1939 EXPORT_SYMBOL(send_sig);
1940 EXPORT_SYMBOL(send_sig_info);
1941 EXPORT_SYMBOL(send_group_sig_info);
1942 EXPORT_SYMBOL(sigqueue_alloc);
1943 EXPORT_SYMBOL(sigqueue_free);
1944 EXPORT_SYMBOL(send_sigqueue);
1945 EXPORT_SYMBOL(send_group_sigqueue);
1946 EXPORT_SYMBOL(sigprocmask);
1947 EXPORT_SYMBOL(block_all_signals);
1948 EXPORT_SYMBOL(unblock_all_signals);
1952 * System call entry points.
1955 asmlinkage long sys_restart_syscall(void)
1957 struct restart_block *restart = ¤t_thread_info()->restart_block;
1958 return restart->fn(restart);
1961 long do_no_restart_syscall(struct restart_block *param)
1967 * We don't need to get the kernel lock - this is all local to this
1968 * particular thread.. (and that's good, because this is _heavily_
1969 * used by various programs)
1973 * This is also useful for kernel threads that want to temporarily
1974 * (or permanently) block certain signals.
1976 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1977 * interface happily blocks "unblockable" signals like SIGKILL
1980 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1985 spin_lock_irq(¤t->sighand->siglock);
1986 old_block = current->blocked;
1990 sigorsets(¤t->blocked, ¤t->blocked, set);
1993 signandsets(¤t->blocked, ¤t->blocked, set);
1996 current->blocked = *set;
2001 recalc_sigpending();
2002 spin_unlock_irq(¤t->sighand->siglock);
2004 *oldset = old_block;
2009 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2011 int error = -EINVAL;
2012 sigset_t old_set, new_set;
2014 /* XXX: Don't preclude handling different sized sigset_t's. */
2015 if (sigsetsize != sizeof(sigset_t))
2020 if (copy_from_user(&new_set, set, sizeof(*set)))
2022 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2024 error = sigprocmask(how, &new_set, &old_set);
2030 spin_lock_irq(¤t->sighand->siglock);
2031 old_set = current->blocked;
2032 spin_unlock_irq(¤t->sighand->siglock);
2036 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2044 long do_sigpending(void __user *set, unsigned long sigsetsize)
2046 long error = -EINVAL;
2049 if (sigsetsize > sizeof(sigset_t))
2052 spin_lock_irq(¤t->sighand->siglock);
2053 sigorsets(&pending, ¤t->pending.signal,
2054 ¤t->signal->shared_pending.signal);
2055 spin_unlock_irq(¤t->sighand->siglock);
2057 /* Outside the lock because only this thread touches it. */
2058 sigandsets(&pending, ¤t->blocked, &pending);
2061 if (!copy_to_user(set, &pending, sigsetsize))
2069 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2071 return do_sigpending(set, sigsetsize);
2074 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2076 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2080 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2082 if (from->si_code < 0)
2083 return __copy_to_user(to, from, sizeof(siginfo_t))
2086 * If you change siginfo_t structure, please be sure
2087 * this code is fixed accordingly.
2088 * It should never copy any pad contained in the structure
2089 * to avoid security leaks, but must copy the generic
2090 * 3 ints plus the relevant union member.
2092 err = __put_user(from->si_signo, &to->si_signo);
2093 err |= __put_user(from->si_errno, &to->si_errno);
2094 err |= __put_user((short)from->si_code, &to->si_code);
2095 switch (from->si_code & __SI_MASK) {
2097 err |= __put_user(from->si_pid, &to->si_pid);
2098 err |= __put_user(from->si_uid, &to->si_uid);
2101 err |= __put_user(from->si_tid, &to->si_tid);
2102 err |= __put_user(from->si_overrun, &to->si_overrun);
2103 err |= __put_user(from->si_ptr, &to->si_ptr);
2106 err |= __put_user(from->si_band, &to->si_band);
2107 err |= __put_user(from->si_fd, &to->si_fd);
2110 err |= __put_user(from->si_addr, &to->si_addr);
2111 #ifdef __ARCH_SI_TRAPNO
2112 err |= __put_user(from->si_trapno, &to->si_trapno);
2116 err |= __put_user(from->si_pid, &to->si_pid);
2117 err |= __put_user(from->si_uid, &to->si_uid);
2118 err |= __put_user(from->si_status, &to->si_status);
2119 err |= __put_user(from->si_utime, &to->si_utime);
2120 err |= __put_user(from->si_stime, &to->si_stime);
2122 case __SI_RT: /* This is not generated by the kernel as of now. */
2123 case __SI_MESGQ: /* But this is */
2124 err |= __put_user(from->si_pid, &to->si_pid);
2125 err |= __put_user(from->si_uid, &to->si_uid);
2126 err |= __put_user(from->si_ptr, &to->si_ptr);
2128 default: /* this is just in case for now ... */
2129 err |= __put_user(from->si_pid, &to->si_pid);
2130 err |= __put_user(from->si_uid, &to->si_uid);
2139 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2140 siginfo_t __user *uinfo,
2141 const struct timespec __user *uts,
2150 /* XXX: Don't preclude handling different sized sigset_t's. */
2151 if (sigsetsize != sizeof(sigset_t))
2154 if (copy_from_user(&these, uthese, sizeof(these)))
2158 * Invert the set of allowed signals to get those we
2161 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2165 if (copy_from_user(&ts, uts, sizeof(ts)))
2167 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2172 spin_lock_irq(¤t->sighand->siglock);
2173 sig = dequeue_signal(current, &these, &info);
2175 timeout = MAX_SCHEDULE_TIMEOUT;
2177 timeout = (timespec_to_jiffies(&ts)
2178 + (ts.tv_sec || ts.tv_nsec));
2181 /* None ready -- temporarily unblock those we're
2182 * interested while we are sleeping in so that we'll
2183 * be awakened when they arrive. */
2184 current->real_blocked = current->blocked;
2185 sigandsets(¤t->blocked, ¤t->blocked, &these);
2186 recalc_sigpending();
2187 spin_unlock_irq(¤t->sighand->siglock);
2189 current->state = TASK_INTERRUPTIBLE;
2190 timeout = schedule_timeout(timeout);
2192 spin_lock_irq(¤t->sighand->siglock);
2193 sig = dequeue_signal(current, &these, &info);
2194 current->blocked = current->real_blocked;
2195 siginitset(¤t->real_blocked, 0);
2196 recalc_sigpending();
2199 spin_unlock_irq(¤t->sighand->siglock);
2204 if (copy_siginfo_to_user(uinfo, &info))
2217 sys_kill(int pid, int sig)
2219 struct siginfo info;
2221 info.si_signo = sig;
2223 info.si_code = SI_USER;
2224 info.si_pid = current->tgid;
2225 info.si_uid = current->uid;
2227 return kill_something_info(sig, &info, pid);
2231 * sys_tgkill - send signal to one specific thread
2232 * @tgid: the thread group ID of the thread
2233 * @pid: the PID of the thread
2234 * @sig: signal to be sent
2236 * This syscall also checks the tgid and returns -ESRCH even if the PID
2237 * exists but it's not belonging to the target process anymore. This
2238 * method solves the problem of threads exiting and PIDs getting reused.
2240 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2242 struct siginfo info;
2244 struct task_struct *p;
2246 /* This is only valid for single tasks */
2247 if (pid <= 0 || tgid <= 0)
2250 info.si_signo = sig;
2252 info.si_code = SI_TKILL;
2253 info.si_pid = current->tgid;
2254 info.si_uid = current->uid;
2256 read_lock(&tasklist_lock);
2257 p = find_task_by_pid(pid);
2259 if (p && (p->tgid == tgid)) {
2260 error = check_kill_permission(sig, &info, p);
2262 * The null signal is a permissions and process existence
2263 * probe. No signal is actually delivered.
2265 if (!error && sig && p->sighand) {
2266 spin_lock_irq(&p->sighand->siglock);
2267 handle_stop_signal(sig, p);
2268 error = specific_send_sig_info(sig, &info, p);
2269 spin_unlock_irq(&p->sighand->siglock);
2272 read_unlock(&tasklist_lock);
2277 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2280 sys_tkill(int pid, int sig)
2282 struct siginfo info;
2284 struct task_struct *p;
2286 /* This is only valid for single tasks */
2290 info.si_signo = sig;
2292 info.si_code = SI_TKILL;
2293 info.si_pid = current->tgid;
2294 info.si_uid = current->uid;
2296 read_lock(&tasklist_lock);
2297 p = find_task_by_pid(pid);
2300 error = check_kill_permission(sig, &info, p);
2302 * The null signal is a permissions and process existence
2303 * probe. No signal is actually delivered.
2305 if (!error && sig && p->sighand) {
2306 spin_lock_irq(&p->sighand->siglock);
2307 handle_stop_signal(sig, p);
2308 error = specific_send_sig_info(sig, &info, p);
2309 spin_unlock_irq(&p->sighand->siglock);
2312 read_unlock(&tasklist_lock);
2317 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2321 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2324 /* Not even root can pretend to send signals from the kernel.
2325 Nor can they impersonate a kill(), which adds source info. */
2326 if (info.si_code >= 0)
2328 info.si_signo = sig;
2330 /* POSIX.1b doesn't mention process groups. */
2331 return kill_proc_info(sig, &info, pid);
2335 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2337 struct k_sigaction *k;
2339 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2342 k = ¤t->sighand->action[sig-1];
2344 spin_lock_irq(¤t->sighand->siglock);
2345 if (signal_pending(current)) {
2347 * If there might be a fatal signal pending on multiple
2348 * threads, make sure we take it before changing the action.
2350 spin_unlock_irq(¤t->sighand->siglock);
2351 return -ERESTARTNOINTR;
2360 * "Setting a signal action to SIG_IGN for a signal that is
2361 * pending shall cause the pending signal to be discarded,
2362 * whether or not it is blocked."
2364 * "Setting a signal action to SIG_DFL for a signal that is
2365 * pending and whose default action is to ignore the signal
2366 * (for example, SIGCHLD), shall cause the pending signal to
2367 * be discarded, whether or not it is blocked"
2369 if (act->sa.sa_handler == SIG_IGN ||
2370 (act->sa.sa_handler == SIG_DFL &&
2371 sig_kernel_ignore(sig))) {
2373 * This is a fairly rare case, so we only take the
2374 * tasklist_lock once we're sure we'll need it.
2375 * Now we must do this little unlock and relock
2376 * dance to maintain the lock hierarchy.
2378 struct task_struct *t = current;
2379 spin_unlock_irq(&t->sighand->siglock);
2380 read_lock(&tasklist_lock);
2381 spin_lock_irq(&t->sighand->siglock);
2383 sigdelsetmask(&k->sa.sa_mask,
2384 sigmask(SIGKILL) | sigmask(SIGSTOP));
2385 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2387 rm_from_queue(sigmask(sig), &t->pending);
2388 recalc_sigpending_tsk(t);
2390 } while (t != current);
2391 spin_unlock_irq(¤t->sighand->siglock);
2392 read_unlock(&tasklist_lock);
2397 sigdelsetmask(&k->sa.sa_mask,
2398 sigmask(SIGKILL) | sigmask(SIGSTOP));
2401 spin_unlock_irq(¤t->sighand->siglock);
2406 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2412 oss.ss_sp = (void __user *) current->sas_ss_sp;
2413 oss.ss_size = current->sas_ss_size;
2414 oss.ss_flags = sas_ss_flags(sp);
2423 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2424 || __get_user(ss_sp, &uss->ss_sp)
2425 || __get_user(ss_flags, &uss->ss_flags)
2426 || __get_user(ss_size, &uss->ss_size))
2430 if (on_sig_stack(sp))
2436 * Note - this code used to test ss_flags incorrectly
2437 * old code may have been written using ss_flags==0
2438 * to mean ss_flags==SS_ONSTACK (as this was the only
2439 * way that worked) - this fix preserves that older
2442 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2445 if (ss_flags == SS_DISABLE) {
2450 if (ss_size < MINSIGSTKSZ)
2454 current->sas_ss_sp = (unsigned long) ss_sp;
2455 current->sas_ss_size = ss_size;
2460 if (copy_to_user(uoss, &oss, sizeof(oss)))
2469 #ifdef __ARCH_WANT_SYS_SIGPENDING
2472 sys_sigpending(old_sigset_t __user *set)
2474 return do_sigpending(set, sizeof(*set));
2479 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2480 /* Some platforms have their own version with special arguments others
2481 support only sys_rt_sigprocmask. */
2484 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2487 old_sigset_t old_set, new_set;
2491 if (copy_from_user(&new_set, set, sizeof(*set)))
2493 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2495 spin_lock_irq(¤t->sighand->siglock);
2496 old_set = current->blocked.sig[0];
2504 sigaddsetmask(¤t->blocked, new_set);
2507 sigdelsetmask(¤t->blocked, new_set);
2510 current->blocked.sig[0] = new_set;
2514 recalc_sigpending();
2515 spin_unlock_irq(¤t->sighand->siglock);
2521 old_set = current->blocked.sig[0];
2524 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2531 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2533 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2535 sys_rt_sigaction(int sig,
2536 const struct sigaction __user *act,
2537 struct sigaction __user *oact,
2540 struct k_sigaction new_sa, old_sa;
2543 /* XXX: Don't preclude handling different sized sigset_t's. */
2544 if (sigsetsize != sizeof(sigset_t))
2548 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2552 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2555 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2561 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2563 #ifdef __ARCH_WANT_SYS_SGETMASK
2566 * For backwards compatibility. Functionality superseded by sigprocmask.
2572 return current->blocked.sig[0];
2576 sys_ssetmask(int newmask)
2580 spin_lock_irq(¤t->sighand->siglock);
2581 old = current->blocked.sig[0];
2583 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2585 recalc_sigpending();
2586 spin_unlock_irq(¤t->sighand->siglock);
2590 #endif /* __ARCH_WANT_SGETMASK */
2592 #ifdef __ARCH_WANT_SYS_SIGNAL
2594 * For backwards compatibility. Functionality superseded by sigaction.
2596 asmlinkage unsigned long
2597 sys_signal(int sig, __sighandler_t handler)
2599 struct k_sigaction new_sa, old_sa;
2602 new_sa.sa.sa_handler = handler;
2603 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2605 ret = do_sigaction(sig, &new_sa, &old_sa);
2607 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2609 #endif /* __ARCH_WANT_SYS_SIGNAL */
2611 #ifdef __ARCH_WANT_SYS_PAUSE
2616 current->state = TASK_INTERRUPTIBLE;
2618 return -ERESTARTNOHAND;
2623 void __init signals_init(void)
2626 kmem_cache_create("sigqueue",
2627 sizeof(struct sigqueue),
2628 __alignof__(struct sigqueue),
2629 SLAB_PANIC, NULL, NULL);