2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/tracehook.h>
24 #include <linux/signal.h>
25 #include <linux/capability.h>
26 #include <asm/param.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
29 #include <asm/siginfo.h>
30 #include "audit.h" /* audit_signal_info() */
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_needs_tasklist(sig) ((sig) == SIGCONT)
150 #define sig_user_defined(t, signr) \
151 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
152 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
154 #define sig_fatal(t, signr) \
155 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
156 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
158 static int sig_ignored(struct task_struct *t, int sig)
160 void __user * handler;
163 * Blocked signals are never ignored, since the
164 * signal handler may change by the time it is
167 if (sigismember(&t->blocked, sig))
170 /* Is it explicitly or implicitly ignored? */
171 handler = t->sighand->action[sig-1].sa.sa_handler;
172 if (handler != SIG_IGN &&
173 (handler != SIG_DFL || !sig_kernel_ignore(sig)))
176 /* It's ignored, we can short-circuit unless a debugger wants it. */
177 return !tracehook_consider_ignored_signal(t, sig, handler);
181 * Re-calculate pending state from the set of locally pending
182 * signals, globally pending signals, and blocked signals.
184 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
189 switch (_NSIG_WORDS) {
191 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
192 ready |= signal->sig[i] &~ blocked->sig[i];
195 case 4: ready = signal->sig[3] &~ blocked->sig[3];
196 ready |= signal->sig[2] &~ blocked->sig[2];
197 ready |= signal->sig[1] &~ blocked->sig[1];
198 ready |= signal->sig[0] &~ blocked->sig[0];
201 case 2: ready = signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 1: ready = signal->sig[0] &~ blocked->sig[0];
210 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 if (t->signal->group_stop_count > 0 ||
216 PENDING(&t->pending, &t->blocked) ||
217 PENDING(&t->signal->shared_pending, &t->blocked) ||
218 tracehook_induce_sigpending(t))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
268 struct sigqueue *q = NULL;
270 atomic_inc(&t->user->sigpending);
271 if (override_rlimit ||
272 atomic_read(&t->user->sigpending) <=
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
275 if (unlikely(q == NULL)) {
276 atomic_dec(&t->user->sigpending);
278 INIT_LIST_HEAD(&q->list);
280 q->user = get_uid(t->user);
285 static void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
289 atomic_dec(&q->user->sigpending);
291 kmem_cache_free(sigqueue_cachep, q);
294 void flush_sigqueue(struct sigpending *queue)
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
307 * Flush all pending signals for a task.
309 void flush_signals(struct task_struct *t)
313 spin_lock_irqsave(&t->sighand->siglock, flags);
314 clear_tsk_thread_flag(t,TIF_SIGPENDING);
315 flush_sigqueue(&t->pending);
316 flush_sigqueue(&t->signal->shared_pending);
317 spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 * Flush all handlers for a task.
325 flush_signal_handlers(struct task_struct *t, int force_default)
328 struct k_sigaction *ka = &t->sighand->action[0];
329 for (i = _NSIG ; i != 0 ; i--) {
330 if (force_default || ka->sa.sa_handler != SIG_IGN)
331 ka->sa.sa_handler = SIG_DFL;
333 sigemptyset(&ka->sa.sa_mask);
338 EXPORT_SYMBOL_GPL(flush_signal_handlers);
340 /* Notify the system that a driver wants to block all signals for this
341 * process, and wants to be notified if any signals at all were to be
342 * sent/acted upon. If the notifier routine returns non-zero, then the
343 * signal will be acted upon after all. If the notifier routine returns 0,
344 * then then signal will be blocked. Only one block per process is
345 * allowed. priv is a pointer to private data that the notifier routine
346 * can use to determine if the signal should be blocked or not. */
349 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
353 spin_lock_irqsave(¤t->sighand->siglock, flags);
354 current->notifier_mask = mask;
355 current->notifier_data = priv;
356 current->notifier = notifier;
357 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
360 /* Notify the system that blocking has ended. */
363 unblock_all_signals(void)
367 spin_lock_irqsave(¤t->sighand->siglock, flags);
368 current->notifier = NULL;
369 current->notifier_data = NULL;
371 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
374 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
376 struct sigqueue *q, *first = NULL;
377 int still_pending = 0;
379 if (unlikely(!sigismember(&list->signal, sig)))
383 * Collect the siginfo appropriate to this signal. Check if
384 * there is another siginfo for the same signal.
386 list_for_each_entry(q, &list->list, list) {
387 if (q->info.si_signo == sig) {
396 list_del_init(&first->list);
397 copy_siginfo(info, &first->info);
398 __sigqueue_free(first);
400 sigdelset(&list->signal, sig);
403 /* Ok, it wasn't in the queue. This must be
404 a fast-pathed signal or we must have been
405 out of queue space. So zero out the info.
407 sigdelset(&list->signal, sig);
408 info->si_signo = sig;
417 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
422 sig = next_signal(pending, mask);
424 if (current->notifier) {
425 if (sigismember(current->notifier_mask, sig)) {
426 if (!(current->notifier)(current->notifier_data)) {
427 clear_thread_flag(TIF_SIGPENDING);
433 if (!collect_signal(sig, pending, info))
443 * Dequeue a signal and return the element to the caller, which is
444 * expected to free it.
446 * All callers have to hold the siglock.
448 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
450 int signr = __dequeue_signal(&tsk->pending, mask, info);
452 signr = __dequeue_signal(&tsk->signal->shared_pending,
454 if (signr && unlikely(sig_kernel_stop(signr))) {
456 * Set a marker that we have dequeued a stop signal. Our
457 * caller might release the siglock and then the pending
458 * stop signal it is about to process is no longer in the
459 * pending bitmasks, but must still be cleared by a SIGCONT
460 * (and overruled by a SIGKILL). So those cases clear this
461 * shared flag after we've set it. Note that this flag may
462 * remain set after the signal we return is ignored or
463 * handled. That doesn't matter because its only purpose
464 * is to alert stop-signal processing code when another
465 * processor has come along and cleared the flag.
467 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
468 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
471 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
472 info->si_sys_private){
474 * Release the siglock to ensure proper locking order
475 * of timer locks outside of siglocks. Note, we leave
476 * irqs disabled here, since the posix-timers code is
477 * about to disable them again anyway.
479 spin_unlock(&tsk->sighand->siglock);
480 do_schedule_next_timer(info);
481 spin_lock(&tsk->sighand->siglock);
487 * Tell a process that it has a new active signal..
489 * NOTE! we rely on the previous spin_lock to
490 * lock interrupts for us! We can only be called with
491 * "siglock" held, and the local interrupt must
492 * have been disabled when that got acquired!
494 * No need to set need_resched since signal event passing
495 * goes through ->blocked
497 void signal_wake_up(struct task_struct *t, int resume)
501 set_tsk_thread_flag(t, TIF_SIGPENDING);
504 * For SIGKILL, we want to wake it up in the stopped/traced case.
505 * We don't check t->state here because there is a race with it
506 * executing another processor and just now entering stopped state.
507 * By using wake_up_state, we ensure the process will wake up and
508 * handle its death signal.
510 mask = TASK_INTERRUPTIBLE;
512 mask |= TASK_STOPPED | TASK_TRACED;
513 if (!wake_up_state(t, mask))
518 * Remove signals in mask from the pending set and queue.
519 * Returns 1 if any signals were found.
521 * All callers must be holding the siglock.
523 * This version takes a sigset mask and looks at all signals,
524 * not just those in the first mask word.
526 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
528 struct sigqueue *q, *n;
531 sigandsets(&m, mask, &s->signal);
532 if (sigisemptyset(&m))
535 signandsets(&s->signal, &s->signal, mask);
536 list_for_each_entry_safe(q, n, &s->list, list) {
537 if (sigismember(mask, q->info.si_signo)) {
538 list_del_init(&q->list);
545 * Remove signals in mask from the pending set and queue.
546 * Returns 1 if any signals were found.
548 * All callers must be holding the siglock.
550 static int rm_from_queue(unsigned long mask, struct sigpending *s)
552 struct sigqueue *q, *n;
554 if (!sigtestsetmask(&s->signal, mask))
557 sigdelsetmask(&s->signal, mask);
558 list_for_each_entry_safe(q, n, &s->list, list) {
559 if (q->info.si_signo < SIGRTMIN &&
560 (mask & sigmask(q->info.si_signo))) {
561 list_del_init(&q->list);
569 * Bad permissions for sending the signal
571 static int check_kill_permission(int sig, struct siginfo *info,
572 struct task_struct *t)
577 if (!valid_signal(sig))
580 user = ((info == SEND_SIG_NOINFO) ||
581 (!is_si_special(info) && SI_FROMUSER(info)));
584 if (user && ((sig != SIGCONT) ||
585 (current->signal->session != t->signal->session))
586 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
587 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
588 && !capable(CAP_KILL))
592 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
595 error = security_task_kill(t, info, sig, 0);
597 audit_signal_info(sig, t); /* Let audit system see the signal */
603 * Handle magic process-wide effects of stop/continue signals.
604 * Unlike the signal actions, these happen immediately at signal-generation
605 * time regardless of blocking, ignoring, or handling. This does the
606 * actual continuing for SIGCONT, but not the actual stopping for stop
607 * signals. The process stop is done as a signal action for SIG_DFL.
609 static void handle_stop_signal(int sig, struct task_struct *p)
611 struct task_struct *t;
613 if (p->signal->flags & SIGNAL_GROUP_EXIT)
615 * The process is in the middle of dying already.
619 if (sig_kernel_stop(sig)) {
621 * This is a stop signal. Remove SIGCONT from all queues.
623 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
626 rm_from_queue(sigmask(SIGCONT), &t->pending);
629 } else if (sig == SIGCONT) {
631 * Remove all stop signals from all queues,
632 * and wake all threads.
634 if (unlikely(p->signal->group_stop_count > 0)) {
636 * There was a group stop in progress. We'll
637 * pretend it finished before we got here. We are
638 * obliged to report it to the parent: if the
639 * SIGSTOP happened "after" this SIGCONT, then it
640 * would have cleared this pending SIGCONT. If it
641 * happened "before" this SIGCONT, then the parent
642 * got the SIGCHLD about the stop finishing before
643 * the continue happened. We do the notification
644 * now, and it's as if the stop had finished and
645 * the SIGCHLD was pending on entry to this kill.
647 p->signal->group_stop_count = 0;
648 p->signal->flags = SIGNAL_STOP_CONTINUED;
649 spin_unlock(&p->sighand->siglock);
650 do_notify_parent_cldstop(p, CLD_STOPPED);
651 spin_lock(&p->sighand->siglock);
653 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
657 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
660 * If there is a handler for SIGCONT, we must make
661 * sure that no thread returns to user mode before
662 * we post the signal, in case it was the only
663 * thread eligible to run the signal handler--then
664 * it must not do anything between resuming and
665 * running the handler. With the TIF_SIGPENDING
666 * flag set, the thread will pause and acquire the
667 * siglock that we hold now and until we've queued
668 * the pending signal.
670 * Wake up the stopped thread _after_ setting
673 state = TASK_STOPPED;
674 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
675 set_tsk_thread_flag(t, TIF_SIGPENDING);
676 state |= TASK_INTERRUPTIBLE;
678 wake_up_state(t, state);
683 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
685 * We were in fact stopped, and are now continued.
686 * Notify the parent with CLD_CONTINUED.
688 p->signal->flags = SIGNAL_STOP_CONTINUED;
689 p->signal->group_exit_code = 0;
690 spin_unlock(&p->sighand->siglock);
691 do_notify_parent_cldstop(p, CLD_CONTINUED);
692 spin_lock(&p->sighand->siglock);
695 * We are not stopped, but there could be a stop
696 * signal in the middle of being processed after
697 * being removed from the queue. Clear that too.
699 p->signal->flags = 0;
701 } else if (sig == SIGKILL) {
703 * Make sure that any pending stop signal already dequeued
704 * is undone by the wakeup for SIGKILL.
706 p->signal->flags = 0;
710 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
711 struct sigpending *signals)
713 struct sigqueue * q = NULL;
717 * fast-pathed signals for kernel-internal things like SIGSTOP
720 if (info == SEND_SIG_FORCED)
723 /* Real-time signals must be queued if sent by sigqueue, or
724 some other real-time mechanism. It is implementation
725 defined whether kill() does so. We attempt to do so, on
726 the principle of least surprise, but since kill is not
727 allowed to fail with EAGAIN when low on memory we just
728 make sure at least one signal gets delivered and don't
729 pass on the info struct. */
731 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
732 (is_si_special(info) ||
733 info->si_code >= 0)));
735 list_add_tail(&q->list, &signals->list);
736 switch ((unsigned long) info) {
737 case (unsigned long) SEND_SIG_NOINFO:
738 q->info.si_signo = sig;
739 q->info.si_errno = 0;
740 q->info.si_code = SI_USER;
741 q->info.si_pid = current->pid;
742 q->info.si_uid = current->uid;
744 case (unsigned long) SEND_SIG_PRIV:
745 q->info.si_signo = sig;
746 q->info.si_errno = 0;
747 q->info.si_code = SI_KERNEL;
752 copy_siginfo(&q->info, info);
755 } else if (!is_si_special(info)) {
756 if (sig >= SIGRTMIN && info->si_code != SI_USER)
758 * Queue overflow, abort. We may abort if the signal was rt
759 * and sent by user using something other than kill().
765 sigaddset(&signals->signal, sig);
769 #define LEGACY_QUEUE(sigptr, sig) \
770 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
772 int print_fatal_signals = 0;
774 static void print_fatal_signal(struct pt_regs *regs, int signr)
776 printk("%s/%d: potentially unexpected fatal signal %d.\n",
777 current->comm, current->pid, signr);
780 printk("code at %08lx: ", regs->eip);
783 for (i = 0; i < 16; i++) {
786 __get_user(insn, (unsigned char *)(regs->eip + i));
787 printk("%02x ", insn);
795 static int __init setup_print_fatal_signals(char *str)
797 get_option (&str, &print_fatal_signals);
802 __setup("print-fatal-signals=", setup_print_fatal_signals);
805 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
809 BUG_ON(!irqs_disabled());
810 assert_spin_locked(&t->sighand->siglock);
812 /* Short-circuit ignored signals. */
813 if (sig_ignored(t, sig))
816 /* Support queueing exactly one non-rt signal, so that we
817 can get more detailed information about the cause of
819 if (LEGACY_QUEUE(&t->pending, sig))
822 ret = send_signal(sig, info, t, &t->pending);
823 if (!ret && !sigismember(&t->blocked, sig))
824 signal_wake_up(t, sig == SIGKILL);
830 * Force a signal that the process can't ignore: if necessary
831 * we unblock the signal and change any SIG_IGN to SIG_DFL.
833 * Note: If we unblock the signal, we always reset it to SIG_DFL,
834 * since we do not want to have a signal handler that was blocked
835 * be invoked when user space had explicitly blocked it.
837 * We don't want to have recursive SIGSEGV's etc, for example.
840 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
842 unsigned long int flags;
843 int ret, blocked, ignored;
844 struct k_sigaction *action;
846 spin_lock_irqsave(&t->sighand->siglock, flags);
847 action = &t->sighand->action[sig-1];
848 ignored = action->sa.sa_handler == SIG_IGN;
849 blocked = sigismember(&t->blocked, sig);
850 if (blocked || ignored) {
851 action->sa.sa_handler = SIG_DFL;
853 sigdelset(&t->blocked, sig);
854 recalc_sigpending_tsk(t);
857 ret = specific_send_sig_info(sig, info, t);
858 spin_unlock_irqrestore(&t->sighand->siglock, flags);
864 force_sig_specific(int sig, struct task_struct *t)
866 force_sig_info(sig, SEND_SIG_FORCED, t);
870 * Test if P wants to take SIG. After we've checked all threads with this,
871 * it's equivalent to finding no threads not blocking SIG. Any threads not
872 * blocking SIG were ruled out because they are not running and already
873 * have pending signals. Such threads will dequeue from the shared queue
874 * as soon as they're available, so putting the signal on the shared queue
875 * will be equivalent to sending it to one such thread.
877 static inline int wants_signal(int sig, struct task_struct *p)
879 if (sigismember(&p->blocked, sig))
881 if (p->flags & PF_EXITING)
885 if (p->state & (TASK_STOPPED | TASK_TRACED))
887 return task_curr(p) || !signal_pending(p);
891 __group_complete_signal(int sig, struct task_struct *p)
893 struct task_struct *t;
896 * Now find a thread we can wake up to take the signal off the queue.
898 * If the main thread wants the signal, it gets first crack.
899 * Probably the least surprising to the average bear.
901 if (wants_signal(sig, p))
903 else if (thread_group_empty(p))
905 * There is just one thread and it does not need to be woken.
906 * It will dequeue unblocked signals before it runs again.
911 * Otherwise try to find a suitable thread.
913 t = p->signal->curr_target;
915 /* restart balancing at this thread */
916 t = p->signal->curr_target = p;
918 while (!wants_signal(sig, t)) {
920 if (t == p->signal->curr_target)
922 * No thread needs to be woken.
923 * Any eligible threads will see
924 * the signal in the queue soon.
928 p->signal->curr_target = t;
932 * Found a killable thread. If the signal will be fatal,
933 * then start taking the whole group down immediately.
935 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
936 !sigismember(&t->real_blocked, sig) &&
937 (sig == SIGKILL || !tracehook_consider_fatal_signal(t, sig))) {
939 * This signal will be fatal to the whole group.
941 if (!sig_kernel_coredump(sig)) {
943 * Start a group exit and wake everybody up.
944 * This way we don't have other threads
945 * running and doing things after a slower
946 * thread has the fatal signal pending.
948 p->signal->flags = SIGNAL_GROUP_EXIT;
949 p->signal->group_exit_code = sig;
950 p->signal->group_stop_count = 0;
953 sigaddset(&t->pending.signal, SIGKILL);
954 signal_wake_up(t, 1);
961 * There will be a core dump. We make all threads other
962 * than the chosen one go into a group stop so that nothing
963 * happens until it gets scheduled, takes the signal off
964 * the shared queue, and does the core dump. This is a
965 * little more complicated than strictly necessary, but it
966 * keeps the signal state that winds up in the core dump
967 * unchanged from the death state, e.g. which thread had
968 * the core-dump signal unblocked.
970 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
971 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
972 p->signal->group_stop_count = 0;
973 p->signal->group_exit_task = t;
976 p->signal->group_stop_count++;
977 signal_wake_up(t, 0);
980 wake_up_process(p->signal->group_exit_task);
985 * The signal is already in the shared-pending queue.
986 * Tell the chosen thread to wake up and dequeue it.
988 signal_wake_up(t, sig == SIGKILL);
993 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
997 assert_spin_locked(&p->sighand->siglock);
998 handle_stop_signal(sig, p);
1000 /* Short-circuit ignored signals. */
1001 if (sig_ignored(p, sig))
1004 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1005 /* This is a non-RT signal and we already have one queued. */
1009 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1010 * We always use the shared queue for process-wide signals,
1011 * to avoid several races.
1013 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1017 __group_complete_signal(sig, p);
1022 * Nuke all other threads in the group.
1024 void zap_other_threads(struct task_struct *p)
1026 struct task_struct *t;
1028 p->signal->flags = SIGNAL_GROUP_EXIT;
1029 p->signal->group_stop_count = 0;
1031 if (thread_group_empty(p))
1034 for (t = next_thread(p); t != p; t = next_thread(t)) {
1036 * Don't bother with already dead threads
1042 * We don't want to notify the parent, since we are
1043 * killed as part of a thread group due to another
1044 * thread doing an execve() or similar. So set the
1045 * exit signal to -1 to allow immediate reaping of
1046 * the process. But don't detach the thread group
1049 if (t != p->group_leader)
1050 t->exit_signal = -1;
1052 /* SIGKILL will be handled before any pending SIGSTOP */
1053 sigaddset(&t->pending.signal, SIGKILL);
1054 signal_wake_up(t, 1);
1059 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1061 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1063 struct sighand_struct *sighand;
1066 sighand = rcu_dereference(tsk->sighand);
1067 if (unlikely(sighand == NULL))
1070 spin_lock_irqsave(&sighand->siglock, *flags);
1071 if (likely(sighand == tsk->sighand))
1073 spin_unlock_irqrestore(&sighand->siglock, *flags);
1079 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1081 unsigned long flags;
1084 ret = check_kill_permission(sig, info, p);
1088 if (lock_task_sighand(p, &flags)) {
1089 ret = __group_send_sig_info(sig, info, p);
1090 unlock_task_sighand(p, &flags);
1098 * kill_pg_info() sends a signal to a process group: this is what the tty
1099 * control characters do (^C, ^Z etc)
1102 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1104 struct task_struct *p = NULL;
1105 int retval, success;
1112 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1113 int err = group_send_sig_info(sig, info, p);
1116 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1117 return success ? 0 : retval;
1121 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1125 read_lock(&tasklist_lock);
1126 retval = __kill_pg_info(sig, info, pgrp);
1127 read_unlock(&tasklist_lock);
1133 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1136 int acquired_tasklist_lock = 0;
1137 struct task_struct *p;
1140 if (unlikely(sig_needs_tasklist(sig))) {
1141 read_lock(&tasklist_lock);
1142 acquired_tasklist_lock = 1;
1144 p = find_task_by_pid(pid);
1146 if (p && vx_check(vx_task_xid(p), VX_IDENT))
1147 error = group_send_sig_info(sig, info, p);
1148 if (unlikely(acquired_tasklist_lock))
1149 read_unlock(&tasklist_lock);
1154 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1155 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1156 uid_t uid, uid_t euid, u32 secid)
1159 struct task_struct *p;
1161 if (!valid_signal(sig))
1164 read_lock(&tasklist_lock);
1165 p = find_task_by_pid(pid);
1170 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1171 && (euid != p->suid) && (euid != p->uid)
1172 && (uid != p->suid) && (uid != p->uid)) {
1176 ret = security_task_kill(p, info, sig, secid);
1179 if (sig && p->sighand) {
1180 unsigned long flags;
1181 spin_lock_irqsave(&p->sighand->siglock, flags);
1182 ret = __group_send_sig_info(sig, info, p);
1183 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1186 read_unlock(&tasklist_lock);
1189 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1192 * kill_something_info() interprets pid in interesting ways just like kill(2).
1194 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1195 * is probably wrong. Should make it like BSD or SYSV.
1198 static int kill_something_info(int sig, struct siginfo *info, int pid)
1201 return kill_pg_info(sig, info, process_group(current));
1202 } else if (pid == -1) {
1203 int retval = 0, count = 0;
1204 struct task_struct * p;
1206 read_lock(&tasklist_lock);
1207 for_each_process(p) {
1208 if (vx_check(vx_task_xid(p), VX_ADMIN|VX_IDENT) &&
1209 p->pid > 1 && p->tgid != current->tgid) {
1210 int err = group_send_sig_info(sig, info, p);
1216 read_unlock(&tasklist_lock);
1217 return count ? retval : -ESRCH;
1218 } else if (pid < 0) {
1219 return kill_pg_info(sig, info, -pid);
1221 return kill_proc_info(sig, info, pid);
1226 * These are for backward compatibility with the rest of the kernel source.
1230 * These two are the most common entry points. They send a signal
1231 * just to the specific thread.
1234 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1237 unsigned long flags;
1240 * Make sure legacy kernel users don't send in bad values
1241 * (normal paths check this in check_kill_permission).
1243 if (!valid_signal(sig))
1247 * We need the tasklist lock even for the specific
1248 * thread case (when we don't need to follow the group
1249 * lists) in order to avoid races with "p->sighand"
1250 * going away or changing from under us.
1252 read_lock(&tasklist_lock);
1253 spin_lock_irqsave(&p->sighand->siglock, flags);
1254 ret = specific_send_sig_info(sig, info, p);
1255 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1256 read_unlock(&tasklist_lock);
1260 #define __si_special(priv) \
1261 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1264 send_sig(int sig, struct task_struct *p, int priv)
1266 return send_sig_info(sig, __si_special(priv), p);
1270 * This is the entry point for "process-wide" signals.
1271 * They will go to an appropriate thread in the thread group.
1274 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1277 read_lock(&tasklist_lock);
1278 ret = group_send_sig_info(sig, info, p);
1279 read_unlock(&tasklist_lock);
1284 force_sig(int sig, struct task_struct *p)
1286 force_sig_info(sig, SEND_SIG_PRIV, p);
1290 * When things go south during signal handling, we
1291 * will force a SIGSEGV. And if the signal that caused
1292 * the problem was already a SIGSEGV, we'll want to
1293 * make sure we don't even try to deliver the signal..
1296 force_sigsegv(int sig, struct task_struct *p)
1298 if (sig == SIGSEGV) {
1299 unsigned long flags;
1300 spin_lock_irqsave(&p->sighand->siglock, flags);
1301 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1302 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1304 force_sig(SIGSEGV, p);
1309 kill_pg(pid_t pgrp, int sig, int priv)
1311 return kill_pg_info(sig, __si_special(priv), pgrp);
1315 kill_proc(pid_t pid, int sig, int priv)
1317 return kill_proc_info(sig, __si_special(priv), pid);
1321 * These functions support sending signals using preallocated sigqueue
1322 * structures. This is needed "because realtime applications cannot
1323 * afford to lose notifications of asynchronous events, like timer
1324 * expirations or I/O completions". In the case of Posix Timers
1325 * we allocate the sigqueue structure from the timer_create. If this
1326 * allocation fails we are able to report the failure to the application
1327 * with an EAGAIN error.
1330 struct sigqueue *sigqueue_alloc(void)
1334 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1335 q->flags |= SIGQUEUE_PREALLOC;
1339 void sigqueue_free(struct sigqueue *q)
1341 unsigned long flags;
1342 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1344 * If the signal is still pending remove it from the
1347 if (unlikely(!list_empty(&q->list))) {
1348 spinlock_t *lock = ¤t->sighand->siglock;
1349 read_lock(&tasklist_lock);
1350 spin_lock_irqsave(lock, flags);
1351 if (!list_empty(&q->list))
1352 list_del_init(&q->list);
1353 spin_unlock_irqrestore(lock, flags);
1354 read_unlock(&tasklist_lock);
1356 q->flags &= ~SIGQUEUE_PREALLOC;
1360 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1362 unsigned long flags;
1365 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1368 * The rcu based delayed sighand destroy makes it possible to
1369 * run this without tasklist lock held. The task struct itself
1370 * cannot go away as create_timer did get_task_struct().
1372 * We return -1, when the task is marked exiting, so
1373 * posix_timer_event can redirect it to the group leader
1377 if (!likely(lock_task_sighand(p, &flags))) {
1382 if (unlikely(!list_empty(&q->list))) {
1384 * If an SI_TIMER entry is already queue just increment
1385 * the overrun count.
1387 BUG_ON(q->info.si_code != SI_TIMER);
1388 q->info.si_overrun++;
1391 /* Short-circuit ignored signals. */
1392 if (sig_ignored(p, sig)) {
1397 list_add_tail(&q->list, &p->pending.list);
1398 sigaddset(&p->pending.signal, sig);
1399 if (!sigismember(&p->blocked, sig))
1400 signal_wake_up(p, sig == SIGKILL);
1403 unlock_task_sighand(p, &flags);
1411 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1413 unsigned long flags;
1416 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1418 read_lock(&tasklist_lock);
1419 /* Since it_lock is held, p->sighand cannot be NULL. */
1420 spin_lock_irqsave(&p->sighand->siglock, flags);
1421 handle_stop_signal(sig, p);
1423 /* Short-circuit ignored signals. */
1424 if (sig_ignored(p, sig)) {
1429 if (unlikely(!list_empty(&q->list))) {
1431 * If an SI_TIMER entry is already queue just increment
1432 * the overrun count. Other uses should not try to
1433 * send the signal multiple times.
1435 BUG_ON(q->info.si_code != SI_TIMER);
1436 q->info.si_overrun++;
1441 * Put this signal on the shared-pending queue.
1442 * We always use the shared queue for process-wide signals,
1443 * to avoid several races.
1445 list_add_tail(&q->list, &p->signal->shared_pending.list);
1446 sigaddset(&p->signal->shared_pending.signal, sig);
1448 __group_complete_signal(sig, p);
1450 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1451 read_unlock(&tasklist_lock);
1456 * Wake up any threads in the parent blocked in wait* syscalls.
1458 static inline void __wake_up_parent(struct task_struct *p,
1459 struct task_struct *parent)
1461 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1465 * Let a parent know about the death of a child.
1466 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1469 void do_notify_parent(struct task_struct *tsk, int sig)
1471 struct siginfo info;
1472 unsigned long flags;
1473 struct sighand_struct *psig;
1477 /* do_notify_parent_cldstop should have been called instead. */
1478 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1480 BUG_ON(tsk->group_leader != tsk || !thread_group_empty(tsk));
1482 info.si_signo = sig;
1484 info.si_pid = tsk->pid;
1485 info.si_uid = tsk->uid;
1487 /* FIXME: find out whether or not this is supposed to be c*time. */
1488 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1489 tsk->signal->utime));
1490 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1491 tsk->signal->stime));
1493 info.si_status = tsk->exit_code & 0x7f;
1494 if (tsk->exit_code & 0x80)
1495 info.si_code = CLD_DUMPED;
1496 else if (tsk->exit_code & 0x7f)
1497 info.si_code = CLD_KILLED;
1499 info.si_code = CLD_EXITED;
1500 info.si_status = tsk->exit_code >> 8;
1503 psig = tsk->parent->sighand;
1504 spin_lock_irqsave(&psig->siglock, flags);
1505 if (sig == SIGCHLD &&
1506 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1507 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1509 * We are exiting and our parent doesn't care. POSIX.1
1510 * defines special semantics for setting SIGCHLD to SIG_IGN
1511 * or setting the SA_NOCLDWAIT flag: we should be reaped
1512 * automatically and not left for our parent's wait4 call.
1513 * Rather than having the parent do it as a magic kind of
1514 * signal handler, we just set this to tell do_exit that we
1515 * can be cleaned up without becoming a zombie. Note that
1516 * we still call __wake_up_parent in this case, because a
1517 * blocked sys_wait4 might now return -ECHILD.
1519 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1520 * is implementation-defined: we do (if you don't want
1521 * it, just use SIG_IGN instead).
1523 tsk->exit_signal = -1;
1524 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1527 if (valid_signal(sig) && sig > 0)
1528 __group_send_sig_info(sig, &info, tsk->parent);
1529 __wake_up_parent(tsk, tsk->parent);
1530 spin_unlock_irqrestore(&psig->siglock, flags);
1533 void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1535 struct siginfo info;
1536 unsigned long flags;
1537 struct task_struct *parent;
1538 struct sighand_struct *sighand;
1540 info.si_signo = SIGCHLD;
1542 info.si_pid = tsk->pid;
1543 info.si_uid = tsk->uid;
1545 /* FIXME: find out whether or not this is supposed to be c*time. */
1546 info.si_utime = cputime_to_jiffies(tsk->utime);
1547 info.si_stime = cputime_to_jiffies(tsk->stime);
1552 info.si_status = SIGCONT;
1555 info.si_status = tsk->signal->group_exit_code & 0x7f;
1558 info.si_status = tsk->exit_code & 0x7f;
1565 * Tracing can decide that we should not do the normal notification.
1567 if (tracehook_notify_cldstop(tsk, &info))
1570 tsk = tsk->group_leader;
1571 parent = tsk->parent;
1573 sighand = parent->sighand;
1574 spin_lock_irqsave(&sighand->siglock, flags);
1575 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1576 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1577 __group_send_sig_info(SIGCHLD, &info, parent);
1579 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1581 __wake_up_parent(tsk, parent);
1582 spin_unlock_irqrestore(&sighand->siglock, flags);
1586 finish_stop(int stop_count)
1589 * If there are no other threads in the group, or if there is
1590 * a group stop in progress and we are the last to stop,
1591 * report to the parent. When ptraced, every thread reports itself.
1593 if (!tracehook_finish_stop(stop_count <= 0) && stop_count <= 0) {
1594 read_lock(&tasklist_lock);
1595 do_notify_parent_cldstop(current, CLD_STOPPED);
1596 read_unlock(&tasklist_lock);
1601 * Now we don't run again until continued.
1603 current->exit_code = 0;
1607 * This performs the stopping for SIGSTOP and other stop signals.
1608 * We have to stop all threads in the thread group.
1609 * Returns nonzero if we've actually stopped and released the siglock.
1610 * Returns zero if we didn't stop and still hold the siglock.
1612 static int do_signal_stop(int signr)
1614 struct signal_struct *sig = current->signal;
1617 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1620 if (sig->group_stop_count > 0) {
1622 * There is a group stop in progress. We don't need to
1623 * start another one.
1625 stop_count = --sig->group_stop_count;
1628 * There is no group stop already in progress.
1629 * We must initiate one now.
1631 struct task_struct *t;
1633 sig->group_exit_code = signr;
1636 for (t = next_thread(current); t != current; t = next_thread(t))
1638 * Setting state to TASK_STOPPED for a group
1639 * stop is always done with the siglock held,
1640 * so this check has no races.
1642 if (!t->exit_state &&
1643 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1645 signal_wake_up(t, 0);
1647 sig->group_stop_count = stop_count;
1650 if (stop_count == 0)
1651 sig->flags = SIGNAL_STOP_STOPPED;
1652 current->exit_code = sig->group_exit_code;
1653 __set_current_state(TASK_STOPPED);
1655 spin_unlock_irq(¤t->sighand->siglock);
1656 finish_stop(stop_count);
1661 * Do appropriate magic when group_stop_count > 0.
1662 * We return nonzero if we stopped, after releasing the siglock.
1663 * We return zero if we still hold the siglock and should look
1664 * for another signal without checking group_stop_count again.
1666 static int handle_group_stop(void)
1670 if (current->signal->group_exit_task == current) {
1672 * Group stop is so we can do a core dump,
1673 * We are the initiating thread, so get on with it.
1675 current->signal->group_exit_task = NULL;
1679 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1681 * Group stop is so another thread can do a core dump,
1682 * or else we are racing against a death signal.
1683 * Just punt the stop so we can get the next signal.
1688 * There is a group stop in progress. We stop
1689 * without any associated signal being in our queue.
1691 stop_count = --current->signal->group_stop_count;
1692 if (stop_count == 0)
1693 current->signal->flags = SIGNAL_STOP_STOPPED;
1694 current->exit_code = current->signal->group_exit_code;
1695 set_current_state(TASK_STOPPED);
1696 spin_unlock_irq(¤t->sighand->siglock);
1697 finish_stop(stop_count);
1701 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1702 struct pt_regs *regs, void *cookie)
1704 sigset_t *mask = ¤t->blocked;
1710 spin_lock_irq(¤t->sighand->siglock);
1712 struct k_sigaction *ka;
1714 if (unlikely(current->signal->group_stop_count > 0) &&
1715 handle_group_stop())
1719 * Tracing can induce an artifical signal and choose sigaction.
1720 * The return value in signr determines the default action,
1721 * but info->si_signo is the signal number we will report.
1723 signr = tracehook_get_signal(current, regs, info, return_ka);
1724 if (unlikely(signr < 0))
1726 if (unlikely(signr != 0))
1729 signr = dequeue_signal(current, mask, info);
1732 break; /* will return 0 */
1733 ka = ¤t->sighand->action[signr-1];
1736 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1738 if (ka->sa.sa_handler != SIG_DFL) {
1739 /* Run the handler. */
1742 if (ka->sa.sa_flags & SA_ONESHOT)
1743 ka->sa.sa_handler = SIG_DFL;
1745 break; /* will return non-zero "signr" value */
1749 * Now we are doing the default action for this signal.
1751 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1754 /* Init gets no signals it doesn't want. */
1755 if (current == child_reaper)
1758 /* virtual init is protected against user signals */
1759 if ((info->si_code == SI_USER) &&
1760 vx_current_initpid(current->pid))
1763 if (sig_kernel_stop(signr)) {
1765 * The default action is to stop all threads in
1766 * the thread group. The job control signals
1767 * do nothing in an orphaned pgrp, but SIGSTOP
1768 * always works. Note that siglock needs to be
1769 * dropped during the call to is_orphaned_pgrp()
1770 * because of lock ordering with tasklist_lock.
1771 * This allows an intervening SIGCONT to be posted.
1772 * We need to check for that and bail out if necessary.
1774 if (signr != SIGSTOP) {
1775 spin_unlock_irq(¤t->sighand->siglock);
1777 /* signals can be posted during this window */
1779 if (is_orphaned_pgrp(process_group(current)))
1782 spin_lock_irq(¤t->sighand->siglock);
1785 if (likely(do_signal_stop(info->si_signo))) {
1786 /* It released the siglock. */
1791 * We didn't actually stop, due to a race
1792 * with SIGCONT or something like that.
1797 spin_unlock_irq(¤t->sighand->siglock);
1800 * Anything else is fatal, maybe with a core dump.
1802 current->flags |= PF_SIGNALED;
1803 if (print_fatal_signals)
1804 print_fatal_signal(regs, signr);
1805 if (sig_kernel_coredump(signr)) {
1807 * If it was able to dump core, this kills all
1808 * other threads in the group and synchronizes with
1809 * their demise. If we lost the race with another
1810 * thread getting here, it set group_exit_code
1811 * first and our do_group_exit call below will use
1812 * that value and ignore the one we pass it.
1814 do_coredump(info->si_signo, info->si_signo, regs);
1818 * Death signals, no core dump.
1820 do_group_exit(info->si_signo);
1823 spin_unlock_irq(¤t->sighand->siglock);
1827 EXPORT_SYMBOL(recalc_sigpending);
1828 EXPORT_SYMBOL_GPL(dequeue_signal);
1829 EXPORT_SYMBOL(flush_signals);
1830 EXPORT_SYMBOL(force_sig);
1831 EXPORT_SYMBOL(kill_pg);
1832 EXPORT_SYMBOL(kill_proc);
1833 EXPORT_SYMBOL(send_sig);
1834 EXPORT_SYMBOL(send_sig_info);
1835 EXPORT_SYMBOL(sigprocmask);
1836 EXPORT_SYMBOL(block_all_signals);
1837 EXPORT_SYMBOL(unblock_all_signals);
1841 * System call entry points.
1844 asmlinkage long sys_restart_syscall(void)
1846 struct restart_block *restart = ¤t_thread_info()->restart_block;
1847 return restart->fn(restart);
1850 long do_no_restart_syscall(struct restart_block *param)
1856 * We don't need to get the kernel lock - this is all local to this
1857 * particular thread.. (and that's good, because this is _heavily_
1858 * used by various programs)
1862 * This is also useful for kernel threads that want to temporarily
1863 * (or permanently) block certain signals.
1865 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1866 * interface happily blocks "unblockable" signals like SIGKILL
1869 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1873 spin_lock_irq(¤t->sighand->siglock);
1875 *oldset = current->blocked;
1880 sigorsets(¤t->blocked, ¤t->blocked, set);
1883 signandsets(¤t->blocked, ¤t->blocked, set);
1886 current->blocked = *set;
1891 recalc_sigpending();
1892 spin_unlock_irq(¤t->sighand->siglock);
1898 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1900 int error = -EINVAL;
1901 sigset_t old_set, new_set;
1903 /* XXX: Don't preclude handling different sized sigset_t's. */
1904 if (sigsetsize != sizeof(sigset_t))
1909 if (copy_from_user(&new_set, set, sizeof(*set)))
1911 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1913 error = sigprocmask(how, &new_set, &old_set);
1919 spin_lock_irq(¤t->sighand->siglock);
1920 old_set = current->blocked;
1921 spin_unlock_irq(¤t->sighand->siglock);
1925 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1933 long do_sigpending(void __user *set, unsigned long sigsetsize)
1935 long error = -EINVAL;
1938 if (sigsetsize > sizeof(sigset_t))
1941 spin_lock_irq(¤t->sighand->siglock);
1942 sigorsets(&pending, ¤t->pending.signal,
1943 ¤t->signal->shared_pending.signal);
1944 spin_unlock_irq(¤t->sighand->siglock);
1946 /* Outside the lock because only this thread touches it. */
1947 sigandsets(&pending, ¤t->blocked, &pending);
1950 if (!copy_to_user(set, &pending, sigsetsize))
1958 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1960 return do_sigpending(set, sigsetsize);
1963 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1965 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1969 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1971 if (from->si_code < 0)
1972 return __copy_to_user(to, from, sizeof(siginfo_t))
1975 * If you change siginfo_t structure, please be sure
1976 * this code is fixed accordingly.
1977 * It should never copy any pad contained in the structure
1978 * to avoid security leaks, but must copy the generic
1979 * 3 ints plus the relevant union member.
1981 err = __put_user(from->si_signo, &to->si_signo);
1982 err |= __put_user(from->si_errno, &to->si_errno);
1983 err |= __put_user((short)from->si_code, &to->si_code);
1984 switch (from->si_code & __SI_MASK) {
1986 err |= __put_user(from->si_pid, &to->si_pid);
1987 err |= __put_user(from->si_uid, &to->si_uid);
1990 err |= __put_user(from->si_tid, &to->si_tid);
1991 err |= __put_user(from->si_overrun, &to->si_overrun);
1992 err |= __put_user(from->si_ptr, &to->si_ptr);
1995 err |= __put_user(from->si_band, &to->si_band);
1996 err |= __put_user(from->si_fd, &to->si_fd);
1999 err |= __put_user(from->si_addr, &to->si_addr);
2000 #ifdef __ARCH_SI_TRAPNO
2001 err |= __put_user(from->si_trapno, &to->si_trapno);
2005 err |= __put_user(from->si_pid, &to->si_pid);
2006 err |= __put_user(from->si_uid, &to->si_uid);
2007 err |= __put_user(from->si_status, &to->si_status);
2008 err |= __put_user(from->si_utime, &to->si_utime);
2009 err |= __put_user(from->si_stime, &to->si_stime);
2011 case __SI_RT: /* This is not generated by the kernel as of now. */
2012 case __SI_MESGQ: /* But this is */
2013 err |= __put_user(from->si_pid, &to->si_pid);
2014 err |= __put_user(from->si_uid, &to->si_uid);
2015 err |= __put_user(from->si_ptr, &to->si_ptr);
2017 default: /* this is just in case for now ... */
2018 err |= __put_user(from->si_pid, &to->si_pid);
2019 err |= __put_user(from->si_uid, &to->si_uid);
2028 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2029 siginfo_t __user *uinfo,
2030 const struct timespec __user *uts,
2039 /* XXX: Don't preclude handling different sized sigset_t's. */
2040 if (sigsetsize != sizeof(sigset_t))
2043 if (copy_from_user(&these, uthese, sizeof(these)))
2047 * Invert the set of allowed signals to get those we
2050 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2054 if (copy_from_user(&ts, uts, sizeof(ts)))
2056 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2061 spin_lock_irq(¤t->sighand->siglock);
2062 sig = dequeue_signal(current, &these, &info);
2064 timeout = MAX_SCHEDULE_TIMEOUT;
2066 timeout = (timespec_to_jiffies(&ts)
2067 + (ts.tv_sec || ts.tv_nsec));
2070 /* None ready -- temporarily unblock those we're
2071 * interested while we are sleeping in so that we'll
2072 * be awakened when they arrive. */
2073 current->real_blocked = current->blocked;
2074 sigandsets(¤t->blocked, ¤t->blocked, &these);
2075 recalc_sigpending();
2076 spin_unlock_irq(¤t->sighand->siglock);
2078 timeout = schedule_timeout_interruptible(timeout);
2080 spin_lock_irq(¤t->sighand->siglock);
2081 sig = dequeue_signal(current, &these, &info);
2082 current->blocked = current->real_blocked;
2083 siginitset(¤t->real_blocked, 0);
2084 recalc_sigpending();
2087 spin_unlock_irq(¤t->sighand->siglock);
2092 if (copy_siginfo_to_user(uinfo, &info))
2105 sys_kill(int pid, int sig)
2107 struct siginfo info;
2109 info.si_signo = sig;
2111 info.si_code = SI_USER;
2112 info.si_pid = current->tgid;
2113 info.si_uid = current->uid;
2115 return kill_something_info(sig, &info, pid);
2118 static int do_tkill(int tgid, int pid, int sig)
2121 struct siginfo info;
2122 struct task_struct *p;
2125 info.si_signo = sig;
2127 info.si_code = SI_TKILL;
2128 info.si_pid = current->tgid;
2129 info.si_uid = current->uid;
2131 read_lock(&tasklist_lock);
2132 p = find_task_by_pid(pid);
2133 if (p && (tgid <= 0 || p->tgid == tgid)) {
2134 error = check_kill_permission(sig, &info, p);
2136 * The null signal is a permissions and process existence
2137 * probe. No signal is actually delivered.
2139 if (!error && sig && p->sighand) {
2140 spin_lock_irq(&p->sighand->siglock);
2141 handle_stop_signal(sig, p);
2142 error = specific_send_sig_info(sig, &info, p);
2143 spin_unlock_irq(&p->sighand->siglock);
2146 read_unlock(&tasklist_lock);
2152 * sys_tgkill - send signal to one specific thread
2153 * @tgid: the thread group ID of the thread
2154 * @pid: the PID of the thread
2155 * @sig: signal to be sent
2157 * This syscall also checks the tgid and returns -ESRCH even if the PID
2158 * exists but it's not belonging to the target process anymore. This
2159 * method solves the problem of threads exiting and PIDs getting reused.
2161 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2163 /* This is only valid for single tasks */
2164 if (pid <= 0 || tgid <= 0)
2167 return do_tkill(tgid, pid, sig);
2171 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2174 sys_tkill(int pid, int sig)
2176 /* This is only valid for single tasks */
2180 return do_tkill(0, pid, sig);
2184 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2188 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2191 /* Not even root can pretend to send signals from the kernel.
2192 Nor can they impersonate a kill(), which adds source info. */
2193 if (info.si_code >= 0)
2195 info.si_signo = sig;
2197 /* POSIX.1b doesn't mention process groups. */
2198 return kill_proc_info(sig, &info, pid);
2201 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2203 struct k_sigaction *k;
2206 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2209 k = ¤t->sighand->action[sig-1];
2211 spin_lock_irq(¤t->sighand->siglock);
2212 if (signal_pending(current)) {
2214 * If there might be a fatal signal pending on multiple
2215 * threads, make sure we take it before changing the action.
2217 spin_unlock_irq(¤t->sighand->siglock);
2218 return -ERESTARTNOINTR;
2225 sigdelsetmask(&act->sa.sa_mask,
2226 sigmask(SIGKILL) | sigmask(SIGSTOP));
2230 * "Setting a signal action to SIG_IGN for a signal that is
2231 * pending shall cause the pending signal to be discarded,
2232 * whether or not it is blocked."
2234 * "Setting a signal action to SIG_DFL for a signal that is
2235 * pending and whose default action is to ignore the signal
2236 * (for example, SIGCHLD), shall cause the pending signal to
2237 * be discarded, whether or not it is blocked"
2239 if (act->sa.sa_handler == SIG_IGN ||
2240 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2241 struct task_struct *t = current;
2243 sigaddset(&mask, sig);
2244 rm_from_queue_full(&mask, &t->signal->shared_pending);
2246 rm_from_queue_full(&mask, &t->pending);
2247 recalc_sigpending_tsk(t);
2249 } while (t != current);
2253 spin_unlock_irq(¤t->sighand->siglock);
2258 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2264 oss.ss_sp = (void __user *) current->sas_ss_sp;
2265 oss.ss_size = current->sas_ss_size;
2266 oss.ss_flags = sas_ss_flags(sp);
2275 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2276 || __get_user(ss_sp, &uss->ss_sp)
2277 || __get_user(ss_flags, &uss->ss_flags)
2278 || __get_user(ss_size, &uss->ss_size))
2282 if (on_sig_stack(sp))
2288 * Note - this code used to test ss_flags incorrectly
2289 * old code may have been written using ss_flags==0
2290 * to mean ss_flags==SS_ONSTACK (as this was the only
2291 * way that worked) - this fix preserves that older
2294 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2297 if (ss_flags == SS_DISABLE) {
2302 if (ss_size < MINSIGSTKSZ)
2306 current->sas_ss_sp = (unsigned long) ss_sp;
2307 current->sas_ss_size = ss_size;
2312 if (copy_to_user(uoss, &oss, sizeof(oss)))
2321 #ifdef __ARCH_WANT_SYS_SIGPENDING
2324 sys_sigpending(old_sigset_t __user *set)
2326 return do_sigpending(set, sizeof(*set));
2331 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2332 /* Some platforms have their own version with special arguments others
2333 support only sys_rt_sigprocmask. */
2336 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2339 old_sigset_t old_set, new_set;
2343 if (copy_from_user(&new_set, set, sizeof(*set)))
2345 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2347 spin_lock_irq(¤t->sighand->siglock);
2348 old_set = current->blocked.sig[0];
2356 sigaddsetmask(¤t->blocked, new_set);
2359 sigdelsetmask(¤t->blocked, new_set);
2362 current->blocked.sig[0] = new_set;
2366 recalc_sigpending();
2367 spin_unlock_irq(¤t->sighand->siglock);
2373 old_set = current->blocked.sig[0];
2376 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2383 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2385 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2387 sys_rt_sigaction(int sig,
2388 const struct sigaction __user *act,
2389 struct sigaction __user *oact,
2392 struct k_sigaction new_sa, old_sa;
2395 /* XXX: Don't preclude handling different sized sigset_t's. */
2396 if (sigsetsize != sizeof(sigset_t))
2400 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2404 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2407 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2413 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2415 #ifdef __ARCH_WANT_SYS_SGETMASK
2418 * For backwards compatibility. Functionality superseded by sigprocmask.
2424 return current->blocked.sig[0];
2428 sys_ssetmask(int newmask)
2432 spin_lock_irq(¤t->sighand->siglock);
2433 old = current->blocked.sig[0];
2435 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2437 recalc_sigpending();
2438 spin_unlock_irq(¤t->sighand->siglock);
2442 #endif /* __ARCH_WANT_SGETMASK */
2444 #ifdef __ARCH_WANT_SYS_SIGNAL
2446 * For backwards compatibility. Functionality superseded by sigaction.
2448 asmlinkage unsigned long
2449 sys_signal(int sig, __sighandler_t handler)
2451 struct k_sigaction new_sa, old_sa;
2454 new_sa.sa.sa_handler = handler;
2455 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2456 sigemptyset(&new_sa.sa.sa_mask);
2458 ret = do_sigaction(sig, &new_sa, &old_sa);
2460 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2462 #endif /* __ARCH_WANT_SYS_SIGNAL */
2464 #ifdef __ARCH_WANT_SYS_PAUSE
2469 current->state = TASK_INTERRUPTIBLE;
2471 return -ERESTARTNOHAND;
2476 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2477 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2481 /* XXX: Don't preclude handling different sized sigset_t's. */
2482 if (sigsetsize != sizeof(sigset_t))
2485 if (copy_from_user(&newset, unewset, sizeof(newset)))
2487 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2489 spin_lock_irq(¤t->sighand->siglock);
2490 current->saved_sigmask = current->blocked;
2491 current->blocked = newset;
2492 recalc_sigpending();
2493 spin_unlock_irq(¤t->sighand->siglock);
2495 current->state = TASK_INTERRUPTIBLE;
2497 set_thread_flag(TIF_RESTORE_SIGMASK);
2498 return -ERESTARTNOHAND;
2500 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2502 void __init signals_init(void)
2505 kmem_cache_create("sigqueue",
2506 sizeof(struct sigqueue),
2507 __alignof__(struct sigqueue),
2508 SLAB_PANIC, NULL, NULL);