2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/tracehook.h>
24 #include <linux/signal.h>
25 #include <linux/capability.h>
26 #include <asm/param.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
29 #include <asm/siginfo.h>
30 #include "audit.h" /* audit_signal_info() */
31 #include <linux/vs_base.h>
34 * SLAB caches for signal bits.
37 static kmem_cache_t *sigqueue_cachep;
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
114 #define M_SIGEMT M(SIGEMT)
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
122 #define M(sig) (1UL << ((sig)-1))
124 #define T(sig, mask) (M(sig) & (mask))
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 #define sig_needs_tasklist(sig) ((sig) == SIGCONT)
151 #define sig_user_defined(t, signr) \
152 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
153 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
155 #define sig_fatal(t, signr) \
156 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
157 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
159 static int sig_ignored(struct task_struct *t, int sig)
161 void __user * handler;
164 * Blocked signals are never ignored, since the
165 * signal handler may change by the time it is
168 if (sigismember(&t->blocked, sig))
171 /* Is it explicitly or implicitly ignored? */
172 handler = t->sighand->action[sig-1].sa.sa_handler;
173 if (handler != SIG_IGN &&
174 (handler != SIG_DFL || !sig_kernel_ignore(sig)))
177 /* It's ignored, we can short-circuit unless a debugger wants it. */
178 return !tracehook_consider_ignored_signal(t, sig, handler);
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190 switch (_NSIG_WORDS) {
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked) ||
219 tracehook_induce_sigpending(t))
220 set_tsk_thread_flag(t, TIF_SIGPENDING);
222 clear_tsk_thread_flag(t, TIF_SIGPENDING);
225 void recalc_sigpending(void)
227 recalc_sigpending_tsk(current);
230 /* Given the mask, find the first available signal that should be serviced. */
233 next_signal(struct sigpending *pending, sigset_t *mask)
235 unsigned long i, *s, *m, x;
238 s = pending->signal.sig;
240 switch (_NSIG_WORDS) {
242 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
243 if ((x = *s &~ *m) != 0) {
244 sig = ffz(~x) + i*_NSIG_BPW + 1;
249 case 2: if ((x = s[0] &~ m[0]) != 0)
251 else if ((x = s[1] &~ m[1]) != 0)
258 case 1: if ((x = *s &~ *m) != 0)
266 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
269 struct sigqueue *q = NULL;
271 atomic_inc(&t->user->sigpending);
272 if (override_rlimit ||
273 atomic_read(&t->user->sigpending) <=
274 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
275 q = kmem_cache_alloc(sigqueue_cachep, flags);
276 if (unlikely(q == NULL)) {
277 atomic_dec(&t->user->sigpending);
279 INIT_LIST_HEAD(&q->list);
281 q->user = get_uid(t->user);
286 static void __sigqueue_free(struct sigqueue *q)
288 if (q->flags & SIGQUEUE_PREALLOC)
290 atomic_dec(&q->user->sigpending);
292 kmem_cache_free(sigqueue_cachep, q);
295 void flush_sigqueue(struct sigpending *queue)
299 sigemptyset(&queue->signal);
300 while (!list_empty(&queue->list)) {
301 q = list_entry(queue->list.next, struct sigqueue , list);
302 list_del_init(&q->list);
308 * Flush all pending signals for a task.
310 void flush_signals(struct task_struct *t)
314 spin_lock_irqsave(&t->sighand->siglock, flags);
315 clear_tsk_thread_flag(t,TIF_SIGPENDING);
316 flush_sigqueue(&t->pending);
317 flush_sigqueue(&t->signal->shared_pending);
318 spin_unlock_irqrestore(&t->sighand->siglock, flags);
322 * Flush all handlers for a task.
326 flush_signal_handlers(struct task_struct *t, int force_default)
329 struct k_sigaction *ka = &t->sighand->action[0];
330 for (i = _NSIG ; i != 0 ; i--) {
331 if (force_default || ka->sa.sa_handler != SIG_IGN)
332 ka->sa.sa_handler = SIG_DFL;
334 sigemptyset(&ka->sa.sa_mask);
339 EXPORT_SYMBOL_GPL(flush_signal_handlers);
341 /* Notify the system that a driver wants to block all signals for this
342 * process, and wants to be notified if any signals at all were to be
343 * sent/acted upon. If the notifier routine returns non-zero, then the
344 * signal will be acted upon after all. If the notifier routine returns 0,
345 * then then signal will be blocked. Only one block per process is
346 * allowed. priv is a pointer to private data that the notifier routine
347 * can use to determine if the signal should be blocked or not. */
350 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
354 spin_lock_irqsave(¤t->sighand->siglock, flags);
355 current->notifier_mask = mask;
356 current->notifier_data = priv;
357 current->notifier = notifier;
358 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
361 /* Notify the system that blocking has ended. */
364 unblock_all_signals(void)
368 spin_lock_irqsave(¤t->sighand->siglock, flags);
369 current->notifier = NULL;
370 current->notifier_data = NULL;
372 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
375 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
377 struct sigqueue *q, *first = NULL;
378 int still_pending = 0;
380 if (unlikely(!sigismember(&list->signal, sig)))
384 * Collect the siginfo appropriate to this signal. Check if
385 * there is another siginfo for the same signal.
387 list_for_each_entry(q, &list->list, list) {
388 if (q->info.si_signo == sig) {
397 list_del_init(&first->list);
398 copy_siginfo(info, &first->info);
399 __sigqueue_free(first);
401 sigdelset(&list->signal, sig);
404 /* Ok, it wasn't in the queue. This must be
405 a fast-pathed signal or we must have been
406 out of queue space. So zero out the info.
408 sigdelset(&list->signal, sig);
409 info->si_signo = sig;
418 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
423 sig = next_signal(pending, mask);
425 if (current->notifier) {
426 if (sigismember(current->notifier_mask, sig)) {
427 if (!(current->notifier)(current->notifier_data)) {
428 clear_thread_flag(TIF_SIGPENDING);
434 if (!collect_signal(sig, pending, info))
444 * Dequeue a signal and return the element to the caller, which is
445 * expected to free it.
447 * All callers have to hold the siglock.
449 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
451 int signr = __dequeue_signal(&tsk->pending, mask, info);
453 signr = __dequeue_signal(&tsk->signal->shared_pending,
455 if (signr && unlikely(sig_kernel_stop(signr))) {
457 * Set a marker that we have dequeued a stop signal. Our
458 * caller might release the siglock and then the pending
459 * stop signal it is about to process is no longer in the
460 * pending bitmasks, but must still be cleared by a SIGCONT
461 * (and overruled by a SIGKILL). So those cases clear this
462 * shared flag after we've set it. Note that this flag may
463 * remain set after the signal we return is ignored or
464 * handled. That doesn't matter because its only purpose
465 * is to alert stop-signal processing code when another
466 * processor has come along and cleared the flag.
468 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
469 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
472 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
473 info->si_sys_private){
475 * Release the siglock to ensure proper locking order
476 * of timer locks outside of siglocks. Note, we leave
477 * irqs disabled here, since the posix-timers code is
478 * about to disable them again anyway.
480 spin_unlock(&tsk->sighand->siglock);
481 do_schedule_next_timer(info);
482 spin_lock(&tsk->sighand->siglock);
488 * Tell a process that it has a new active signal..
490 * NOTE! we rely on the previous spin_lock to
491 * lock interrupts for us! We can only be called with
492 * "siglock" held, and the local interrupt must
493 * have been disabled when that got acquired!
495 * No need to set need_resched since signal event passing
496 * goes through ->blocked
498 void signal_wake_up(struct task_struct *t, int resume)
502 set_tsk_thread_flag(t, TIF_SIGPENDING);
505 * For SIGKILL, we want to wake it up in the stopped/traced case.
506 * We don't check t->state here because there is a race with it
507 * executing another processor and just now entering stopped state.
508 * By using wake_up_state, we ensure the process will wake up and
509 * handle its death signal.
511 mask = TASK_INTERRUPTIBLE;
513 mask |= TASK_STOPPED | TASK_TRACED;
514 if (!wake_up_state(t, mask))
519 * Remove signals in mask from the pending set and queue.
520 * Returns 1 if any signals were found.
522 * All callers must be holding the siglock.
524 * This version takes a sigset mask and looks at all signals,
525 * not just those in the first mask word.
527 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
529 struct sigqueue *q, *n;
532 sigandsets(&m, mask, &s->signal);
533 if (sigisemptyset(&m))
536 signandsets(&s->signal, &s->signal, mask);
537 list_for_each_entry_safe(q, n, &s->list, list) {
538 if (sigismember(mask, q->info.si_signo)) {
539 list_del_init(&q->list);
546 * Remove signals in mask from the pending set and queue.
547 * Returns 1 if any signals were found.
549 * All callers must be holding the siglock.
551 static int rm_from_queue(unsigned long mask, struct sigpending *s)
553 struct sigqueue *q, *n;
555 if (!sigtestsetmask(&s->signal, mask))
558 sigdelsetmask(&s->signal, mask);
559 list_for_each_entry_safe(q, n, &s->list, list) {
560 if (q->info.si_signo < SIGRTMIN &&
561 (mask & sigmask(q->info.si_signo))) {
562 list_del_init(&q->list);
570 * Bad permissions for sending the signal
572 static int check_kill_permission(int sig, struct siginfo *info,
573 struct task_struct *t)
578 if (!valid_signal(sig))
581 user = ((info == SEND_SIG_NOINFO) ||
582 (!is_si_special(info) && SI_FROMUSER(info)));
585 if (user && ((sig != SIGCONT) ||
586 (current->signal->session != t->signal->session))
587 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
588 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
589 && !capable(CAP_KILL))
593 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
596 error = security_task_kill(t, info, sig, 0);
598 audit_signal_info(sig, t); /* Let audit system see the signal */
604 * Handle magic process-wide effects of stop/continue signals.
605 * Unlike the signal actions, these happen immediately at signal-generation
606 * time regardless of blocking, ignoring, or handling. This does the
607 * actual continuing for SIGCONT, but not the actual stopping for stop
608 * signals. The process stop is done as a signal action for SIG_DFL.
610 static void handle_stop_signal(int sig, struct task_struct *p)
612 struct task_struct *t;
614 if (p->signal->flags & SIGNAL_GROUP_EXIT)
616 * The process is in the middle of dying already.
620 if (sig_kernel_stop(sig)) {
622 * This is a stop signal. Remove SIGCONT from all queues.
624 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
627 rm_from_queue(sigmask(SIGCONT), &t->pending);
630 } else if (sig == SIGCONT) {
632 * Remove all stop signals from all queues,
633 * and wake all threads.
635 if (unlikely(p->signal->group_stop_count > 0)) {
637 * There was a group stop in progress. We'll
638 * pretend it finished before we got here. We are
639 * obliged to report it to the parent: if the
640 * SIGSTOP happened "after" this SIGCONT, then it
641 * would have cleared this pending SIGCONT. If it
642 * happened "before" this SIGCONT, then the parent
643 * got the SIGCHLD about the stop finishing before
644 * the continue happened. We do the notification
645 * now, and it's as if the stop had finished and
646 * the SIGCHLD was pending on entry to this kill.
648 p->signal->group_stop_count = 0;
649 p->signal->flags = SIGNAL_STOP_CONTINUED;
650 spin_unlock(&p->sighand->siglock);
651 do_notify_parent_cldstop(p, CLD_STOPPED);
652 spin_lock(&p->sighand->siglock);
654 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
658 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
661 * If there is a handler for SIGCONT, we must make
662 * sure that no thread returns to user mode before
663 * we post the signal, in case it was the only
664 * thread eligible to run the signal handler--then
665 * it must not do anything between resuming and
666 * running the handler. With the TIF_SIGPENDING
667 * flag set, the thread will pause and acquire the
668 * siglock that we hold now and until we've queued
669 * the pending signal.
671 * Wake up the stopped thread _after_ setting
674 state = TASK_STOPPED;
675 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
676 set_tsk_thread_flag(t, TIF_SIGPENDING);
677 state |= TASK_INTERRUPTIBLE;
679 wake_up_state(t, state);
684 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
686 * We were in fact stopped, and are now continued.
687 * Notify the parent with CLD_CONTINUED.
689 p->signal->flags = SIGNAL_STOP_CONTINUED;
690 p->signal->group_exit_code = 0;
691 spin_unlock(&p->sighand->siglock);
692 do_notify_parent_cldstop(p, CLD_CONTINUED);
693 spin_lock(&p->sighand->siglock);
696 * We are not stopped, but there could be a stop
697 * signal in the middle of being processed after
698 * being removed from the queue. Clear that too.
700 p->signal->flags = 0;
702 } else if (sig == SIGKILL) {
704 * Make sure that any pending stop signal already dequeued
705 * is undone by the wakeup for SIGKILL.
707 p->signal->flags = 0;
711 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
712 struct sigpending *signals)
714 struct sigqueue * q = NULL;
718 * fast-pathed signals for kernel-internal things like SIGSTOP
721 if (info == SEND_SIG_FORCED)
724 /* Real-time signals must be queued if sent by sigqueue, or
725 some other real-time mechanism. It is implementation
726 defined whether kill() does so. We attempt to do so, on
727 the principle of least surprise, but since kill is not
728 allowed to fail with EAGAIN when low on memory we just
729 make sure at least one signal gets delivered and don't
730 pass on the info struct. */
732 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
733 (is_si_special(info) ||
734 info->si_code >= 0)));
736 list_add_tail(&q->list, &signals->list);
737 switch ((unsigned long) info) {
738 case (unsigned long) SEND_SIG_NOINFO:
739 q->info.si_signo = sig;
740 q->info.si_errno = 0;
741 q->info.si_code = SI_USER;
742 q->info.si_pid = current->pid;
743 q->info.si_uid = current->uid;
745 case (unsigned long) SEND_SIG_PRIV:
746 q->info.si_signo = sig;
747 q->info.si_errno = 0;
748 q->info.si_code = SI_KERNEL;
753 copy_siginfo(&q->info, info);
756 } else if (!is_si_special(info)) {
757 if (sig >= SIGRTMIN && info->si_code != SI_USER)
759 * Queue overflow, abort. We may abort if the signal was rt
760 * and sent by user using something other than kill().
766 sigaddset(&signals->signal, sig);
770 #define LEGACY_QUEUE(sigptr, sig) \
771 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
773 int print_fatal_signals = 0;
775 static void print_fatal_signal(struct pt_regs *regs, int signr)
777 printk("%s/%d: potentially unexpected fatal signal %d.\n",
778 current->comm, current->pid, signr);
781 printk("code at %08lx: ", regs->eip);
784 for (i = 0; i < 16; i++) {
787 __get_user(insn, (unsigned char *)(regs->eip + i));
788 printk("%02x ", insn);
796 static int __init setup_print_fatal_signals(char *str)
798 get_option (&str, &print_fatal_signals);
803 __setup("print-fatal-signals=", setup_print_fatal_signals);
806 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
810 BUG_ON(!irqs_disabled());
811 assert_spin_locked(&t->sighand->siglock);
813 /* Short-circuit ignored signals. */
814 if (sig_ignored(t, sig))
817 /* Support queueing exactly one non-rt signal, so that we
818 can get more detailed information about the cause of
820 if (LEGACY_QUEUE(&t->pending, sig))
823 ret = send_signal(sig, info, t, &t->pending);
824 if (!ret && !sigismember(&t->blocked, sig))
825 signal_wake_up(t, sig == SIGKILL);
831 * Force a signal that the process can't ignore: if necessary
832 * we unblock the signal and change any SIG_IGN to SIG_DFL.
834 * Note: If we unblock the signal, we always reset it to SIG_DFL,
835 * since we do not want to have a signal handler that was blocked
836 * be invoked when user space had explicitly blocked it.
838 * We don't want to have recursive SIGSEGV's etc, for example.
841 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
843 unsigned long int flags;
844 int ret, blocked, ignored;
845 struct k_sigaction *action;
847 spin_lock_irqsave(&t->sighand->siglock, flags);
848 action = &t->sighand->action[sig-1];
849 ignored = action->sa.sa_handler == SIG_IGN;
850 blocked = sigismember(&t->blocked, sig);
851 if (blocked || ignored) {
852 action->sa.sa_handler = SIG_DFL;
854 sigdelset(&t->blocked, sig);
855 recalc_sigpending_tsk(t);
858 ret = specific_send_sig_info(sig, info, t);
859 spin_unlock_irqrestore(&t->sighand->siglock, flags);
865 force_sig_specific(int sig, struct task_struct *t)
867 force_sig_info(sig, SEND_SIG_FORCED, t);
871 * Test if P wants to take SIG. After we've checked all threads with this,
872 * it's equivalent to finding no threads not blocking SIG. Any threads not
873 * blocking SIG were ruled out because they are not running and already
874 * have pending signals. Such threads will dequeue from the shared queue
875 * as soon as they're available, so putting the signal on the shared queue
876 * will be equivalent to sending it to one such thread.
878 static inline int wants_signal(int sig, struct task_struct *p)
880 if (sigismember(&p->blocked, sig))
882 if (p->flags & PF_EXITING)
886 if (p->state & (TASK_STOPPED | TASK_TRACED))
888 return task_curr(p) || !signal_pending(p);
892 __group_complete_signal(int sig, struct task_struct *p)
894 struct task_struct *t;
897 * Now find a thread we can wake up to take the signal off the queue.
899 * If the main thread wants the signal, it gets first crack.
900 * Probably the least surprising to the average bear.
902 if (wants_signal(sig, p))
904 else if (thread_group_empty(p))
906 * There is just one thread and it does not need to be woken.
907 * It will dequeue unblocked signals before it runs again.
912 * Otherwise try to find a suitable thread.
914 t = p->signal->curr_target;
916 /* restart balancing at this thread */
917 t = p->signal->curr_target = p;
919 while (!wants_signal(sig, t)) {
921 if (t == p->signal->curr_target)
923 * No thread needs to be woken.
924 * Any eligible threads will see
925 * the signal in the queue soon.
929 p->signal->curr_target = t;
933 * Found a killable thread. If the signal will be fatal,
934 * then start taking the whole group down immediately.
936 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
937 !sigismember(&t->real_blocked, sig) &&
938 (sig == SIGKILL || !tracehook_consider_fatal_signal(t, sig))) {
940 * This signal will be fatal to the whole group.
942 if (!sig_kernel_coredump(sig)) {
944 * Start a group exit and wake everybody up.
945 * This way we don't have other threads
946 * running and doing things after a slower
947 * thread has the fatal signal pending.
949 p->signal->flags = SIGNAL_GROUP_EXIT;
950 p->signal->group_exit_code = sig;
951 p->signal->group_stop_count = 0;
954 sigaddset(&t->pending.signal, SIGKILL);
955 signal_wake_up(t, 1);
962 * There will be a core dump. We make all threads other
963 * than the chosen one go into a group stop so that nothing
964 * happens until it gets scheduled, takes the signal off
965 * the shared queue, and does the core dump. This is a
966 * little more complicated than strictly necessary, but it
967 * keeps the signal state that winds up in the core dump
968 * unchanged from the death state, e.g. which thread had
969 * the core-dump signal unblocked.
971 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
972 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
973 p->signal->group_stop_count = 0;
974 p->signal->group_exit_task = t;
977 p->signal->group_stop_count++;
978 signal_wake_up(t, 0);
981 wake_up_process(p->signal->group_exit_task);
986 * The signal is already in the shared-pending queue.
987 * Tell the chosen thread to wake up and dequeue it.
989 signal_wake_up(t, sig == SIGKILL);
994 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
998 assert_spin_locked(&p->sighand->siglock);
999 handle_stop_signal(sig, p);
1001 /* Short-circuit ignored signals. */
1002 if (sig_ignored(p, sig))
1005 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1006 /* This is a non-RT signal and we already have one queued. */
1010 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1011 * We always use the shared queue for process-wide signals,
1012 * to avoid several races.
1014 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1018 __group_complete_signal(sig, p);
1023 * Nuke all other threads in the group.
1025 void zap_other_threads(struct task_struct *p)
1027 struct task_struct *t;
1029 p->signal->flags = SIGNAL_GROUP_EXIT;
1030 p->signal->group_stop_count = 0;
1032 if (thread_group_empty(p))
1035 for (t = next_thread(p); t != p; t = next_thread(t)) {
1037 * Don't bother with already dead threads
1043 * We don't want to notify the parent, since we are
1044 * killed as part of a thread group due to another
1045 * thread doing an execve() or similar. So set the
1046 * exit signal to -1 to allow immediate reaping of
1047 * the process. But don't detach the thread group
1050 if (t != p->group_leader)
1051 t->exit_signal = -1;
1053 /* SIGKILL will be handled before any pending SIGSTOP */
1054 sigaddset(&t->pending.signal, SIGKILL);
1055 signal_wake_up(t, 1);
1060 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1062 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1064 struct sighand_struct *sighand;
1067 sighand = rcu_dereference(tsk->sighand);
1068 if (unlikely(sighand == NULL))
1071 spin_lock_irqsave(&sighand->siglock, *flags);
1072 if (likely(sighand == tsk->sighand))
1074 spin_unlock_irqrestore(&sighand->siglock, *flags);
1080 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1082 unsigned long flags;
1085 ret = check_kill_permission(sig, info, p);
1089 if (lock_task_sighand(p, &flags)) {
1090 ret = __group_send_sig_info(sig, info, p);
1091 unlock_task_sighand(p, &flags);
1099 * kill_pg_info() sends a signal to a process group: this is what the tty
1100 * control characters do (^C, ^Z etc)
1103 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1105 struct task_struct *p = NULL;
1106 int retval, success;
1113 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1114 int err = group_send_sig_info(sig, info, p);
1117 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1118 return success ? 0 : retval;
1122 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1126 read_lock(&tasklist_lock);
1127 retval = __kill_pg_info(sig, info, pgrp);
1128 read_unlock(&tasklist_lock);
1134 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1137 int acquired_tasklist_lock = 0;
1138 struct task_struct *p;
1141 if (unlikely(sig_needs_tasklist(sig))) {
1142 read_lock(&tasklist_lock);
1143 acquired_tasklist_lock = 1;
1145 p = find_task_by_pid(pid);
1147 if (p && vx_check(vx_task_xid(p), VX_IDENT))
1148 error = group_send_sig_info(sig, info, p);
1149 if (unlikely(acquired_tasklist_lock))
1150 read_unlock(&tasklist_lock);
1155 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1156 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1157 uid_t uid, uid_t euid, u32 secid)
1160 struct task_struct *p;
1162 if (!valid_signal(sig))
1165 read_lock(&tasklist_lock);
1166 p = find_task_by_pid(pid);
1171 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1172 && (euid != p->suid) && (euid != p->uid)
1173 && (uid != p->suid) && (uid != p->uid)) {
1177 ret = security_task_kill(p, info, sig, secid);
1180 if (sig && p->sighand) {
1181 unsigned long flags;
1182 spin_lock_irqsave(&p->sighand->siglock, flags);
1183 ret = __group_send_sig_info(sig, info, p);
1184 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1187 read_unlock(&tasklist_lock);
1190 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1193 * kill_something_info() interprets pid in interesting ways just like kill(2).
1195 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1196 * is probably wrong. Should make it like BSD or SYSV.
1199 static int kill_something_info(int sig, struct siginfo *info, int pid)
1202 return kill_pg_info(sig, info, process_group(current));
1203 } else if (pid == -1) {
1204 int retval = 0, count = 0;
1205 struct task_struct * p;
1207 read_lock(&tasklist_lock);
1208 for_each_process(p) {
1209 if (vx_check(vx_task_xid(p), VX_ADMIN|VX_IDENT) &&
1210 p->pid > 1 && p->tgid != current->tgid) {
1211 int err = group_send_sig_info(sig, info, p);
1217 read_unlock(&tasklist_lock);
1218 return count ? retval : -ESRCH;
1219 } else if (pid < 0) {
1220 return kill_pg_info(sig, info, -pid);
1222 return kill_proc_info(sig, info, pid);
1227 * These are for backward compatibility with the rest of the kernel source.
1231 * These two are the most common entry points. They send a signal
1232 * just to the specific thread.
1235 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1238 unsigned long flags;
1241 * Make sure legacy kernel users don't send in bad values
1242 * (normal paths check this in check_kill_permission).
1244 if (!valid_signal(sig))
1248 * We need the tasklist lock even for the specific
1249 * thread case (when we don't need to follow the group
1250 * lists) in order to avoid races with "p->sighand"
1251 * going away or changing from under us.
1253 read_lock(&tasklist_lock);
1254 spin_lock_irqsave(&p->sighand->siglock, flags);
1255 ret = specific_send_sig_info(sig, info, p);
1256 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1257 read_unlock(&tasklist_lock);
1261 #define __si_special(priv) \
1262 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1265 send_sig(int sig, struct task_struct *p, int priv)
1267 return send_sig_info(sig, __si_special(priv), p);
1271 * This is the entry point for "process-wide" signals.
1272 * They will go to an appropriate thread in the thread group.
1275 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1278 read_lock(&tasklist_lock);
1279 ret = group_send_sig_info(sig, info, p);
1280 read_unlock(&tasklist_lock);
1285 force_sig(int sig, struct task_struct *p)
1287 force_sig_info(sig, SEND_SIG_PRIV, p);
1291 * When things go south during signal handling, we
1292 * will force a SIGSEGV. And if the signal that caused
1293 * the problem was already a SIGSEGV, we'll want to
1294 * make sure we don't even try to deliver the signal..
1297 force_sigsegv(int sig, struct task_struct *p)
1299 if (sig == SIGSEGV) {
1300 unsigned long flags;
1301 spin_lock_irqsave(&p->sighand->siglock, flags);
1302 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1303 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1305 force_sig(SIGSEGV, p);
1310 kill_pg(pid_t pgrp, int sig, int priv)
1312 return kill_pg_info(sig, __si_special(priv), pgrp);
1316 kill_proc(pid_t pid, int sig, int priv)
1318 return kill_proc_info(sig, __si_special(priv), pid);
1322 * These functions support sending signals using preallocated sigqueue
1323 * structures. This is needed "because realtime applications cannot
1324 * afford to lose notifications of asynchronous events, like timer
1325 * expirations or I/O completions". In the case of Posix Timers
1326 * we allocate the sigqueue structure from the timer_create. If this
1327 * allocation fails we are able to report the failure to the application
1328 * with an EAGAIN error.
1331 struct sigqueue *sigqueue_alloc(void)
1335 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1336 q->flags |= SIGQUEUE_PREALLOC;
1340 void sigqueue_free(struct sigqueue *q)
1342 unsigned long flags;
1343 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1345 * If the signal is still pending remove it from the
1348 if (unlikely(!list_empty(&q->list))) {
1349 spinlock_t *lock = ¤t->sighand->siglock;
1350 read_lock(&tasklist_lock);
1351 spin_lock_irqsave(lock, flags);
1352 if (!list_empty(&q->list))
1353 list_del_init(&q->list);
1354 spin_unlock_irqrestore(lock, flags);
1355 read_unlock(&tasklist_lock);
1357 q->flags &= ~SIGQUEUE_PREALLOC;
1361 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1363 unsigned long flags;
1366 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1369 * The rcu based delayed sighand destroy makes it possible to
1370 * run this without tasklist lock held. The task struct itself
1371 * cannot go away as create_timer did get_task_struct().
1373 * We return -1, when the task is marked exiting, so
1374 * posix_timer_event can redirect it to the group leader
1378 if (!likely(lock_task_sighand(p, &flags))) {
1383 if (unlikely(!list_empty(&q->list))) {
1385 * If an SI_TIMER entry is already queue just increment
1386 * the overrun count.
1388 BUG_ON(q->info.si_code != SI_TIMER);
1389 q->info.si_overrun++;
1392 /* Short-circuit ignored signals. */
1393 if (sig_ignored(p, sig)) {
1398 list_add_tail(&q->list, &p->pending.list);
1399 sigaddset(&p->pending.signal, sig);
1400 if (!sigismember(&p->blocked, sig))
1401 signal_wake_up(p, sig == SIGKILL);
1404 unlock_task_sighand(p, &flags);
1412 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1414 unsigned long flags;
1417 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1419 read_lock(&tasklist_lock);
1420 /* Since it_lock is held, p->sighand cannot be NULL. */
1421 spin_lock_irqsave(&p->sighand->siglock, flags);
1422 handle_stop_signal(sig, p);
1424 /* Short-circuit ignored signals. */
1425 if (sig_ignored(p, sig)) {
1430 if (unlikely(!list_empty(&q->list))) {
1432 * If an SI_TIMER entry is already queue just increment
1433 * the overrun count. Other uses should not try to
1434 * send the signal multiple times.
1436 BUG_ON(q->info.si_code != SI_TIMER);
1437 q->info.si_overrun++;
1442 * Put this signal on the shared-pending queue.
1443 * We always use the shared queue for process-wide signals,
1444 * to avoid several races.
1446 list_add_tail(&q->list, &p->signal->shared_pending.list);
1447 sigaddset(&p->signal->shared_pending.signal, sig);
1449 __group_complete_signal(sig, p);
1451 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1452 read_unlock(&tasklist_lock);
1457 * Wake up any threads in the parent blocked in wait* syscalls.
1459 static inline void __wake_up_parent(struct task_struct *p,
1460 struct task_struct *parent)
1462 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1466 * Let a parent know about the death of a child.
1467 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1470 void do_notify_parent(struct task_struct *tsk, int sig)
1472 struct siginfo info;
1473 unsigned long flags;
1474 struct sighand_struct *psig;
1478 /* do_notify_parent_cldstop should have been called instead. */
1479 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1481 BUG_ON(tsk->group_leader != tsk || !thread_group_empty(tsk));
1483 info.si_signo = sig;
1485 info.si_pid = tsk->pid;
1486 info.si_uid = tsk->uid;
1488 /* FIXME: find out whether or not this is supposed to be c*time. */
1489 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1490 tsk->signal->utime));
1491 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1492 tsk->signal->stime));
1494 info.si_status = tsk->exit_code & 0x7f;
1495 if (tsk->exit_code & 0x80)
1496 info.si_code = CLD_DUMPED;
1497 else if (tsk->exit_code & 0x7f)
1498 info.si_code = CLD_KILLED;
1500 info.si_code = CLD_EXITED;
1501 info.si_status = tsk->exit_code >> 8;
1504 psig = tsk->parent->sighand;
1505 spin_lock_irqsave(&psig->siglock, flags);
1506 if (sig == SIGCHLD &&
1507 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1508 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1510 * We are exiting and our parent doesn't care. POSIX.1
1511 * defines special semantics for setting SIGCHLD to SIG_IGN
1512 * or setting the SA_NOCLDWAIT flag: we should be reaped
1513 * automatically and not left for our parent's wait4 call.
1514 * Rather than having the parent do it as a magic kind of
1515 * signal handler, we just set this to tell do_exit that we
1516 * can be cleaned up without becoming a zombie. Note that
1517 * we still call __wake_up_parent in this case, because a
1518 * blocked sys_wait4 might now return -ECHILD.
1520 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1521 * is implementation-defined: we do (if you don't want
1522 * it, just use SIG_IGN instead).
1524 tsk->exit_signal = -1;
1525 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1528 if (valid_signal(sig) && sig > 0)
1529 __group_send_sig_info(sig, &info, tsk->parent);
1530 __wake_up_parent(tsk, tsk->parent);
1531 spin_unlock_irqrestore(&psig->siglock, flags);
1534 void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1536 struct siginfo info;
1537 unsigned long flags;
1538 struct task_struct *parent;
1539 struct sighand_struct *sighand;
1541 info.si_signo = SIGCHLD;
1543 info.si_pid = tsk->pid;
1544 info.si_uid = tsk->uid;
1546 /* FIXME: find out whether or not this is supposed to be c*time. */
1547 info.si_utime = cputime_to_jiffies(tsk->utime);
1548 info.si_stime = cputime_to_jiffies(tsk->stime);
1553 info.si_status = SIGCONT;
1556 info.si_status = tsk->signal->group_exit_code & 0x7f;
1559 info.si_status = tsk->exit_code & 0x7f;
1566 * Tracing can decide that we should not do the normal notification.
1568 if (tracehook_notify_cldstop(tsk, &info))
1571 tsk = tsk->group_leader;
1572 parent = tsk->parent;
1574 sighand = parent->sighand;
1575 spin_lock_irqsave(&sighand->siglock, flags);
1576 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1577 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1578 __group_send_sig_info(SIGCHLD, &info, parent);
1580 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1582 __wake_up_parent(tsk, parent);
1583 spin_unlock_irqrestore(&sighand->siglock, flags);
1587 finish_stop(int stop_count)
1590 * If there are no other threads in the group, or if there is
1591 * a group stop in progress and we are the last to stop,
1592 * report to the parent. When ptraced, every thread reports itself.
1594 if (!tracehook_finish_stop(stop_count <= 0) && stop_count <= 0) {
1595 read_lock(&tasklist_lock);
1596 do_notify_parent_cldstop(current, CLD_STOPPED);
1597 read_unlock(&tasklist_lock);
1602 * Now we don't run again until continued.
1604 current->exit_code = 0;
1608 * This performs the stopping for SIGSTOP and other stop signals.
1609 * We have to stop all threads in the thread group.
1610 * Returns nonzero if we've actually stopped and released the siglock.
1611 * Returns zero if we didn't stop and still hold the siglock.
1613 static int do_signal_stop(int signr)
1615 struct signal_struct *sig = current->signal;
1618 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1621 if (sig->group_stop_count > 0) {
1623 * There is a group stop in progress. We don't need to
1624 * start another one.
1626 stop_count = --sig->group_stop_count;
1629 * There is no group stop already in progress.
1630 * We must initiate one now.
1632 struct task_struct *t;
1634 sig->group_exit_code = signr;
1637 for (t = next_thread(current); t != current; t = next_thread(t))
1639 * Setting state to TASK_STOPPED for a group
1640 * stop is always done with the siglock held,
1641 * so this check has no races.
1643 if (!t->exit_state &&
1644 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1646 signal_wake_up(t, 0);
1648 sig->group_stop_count = stop_count;
1651 if (stop_count == 0)
1652 sig->flags = SIGNAL_STOP_STOPPED;
1653 current->exit_code = sig->group_exit_code;
1654 __set_current_state(TASK_STOPPED);
1656 spin_unlock_irq(¤t->sighand->siglock);
1657 finish_stop(stop_count);
1662 * Do appropriate magic when group_stop_count > 0.
1663 * We return nonzero if we stopped, after releasing the siglock.
1664 * We return zero if we still hold the siglock and should look
1665 * for another signal without checking group_stop_count again.
1667 static int handle_group_stop(void)
1671 if (current->signal->group_exit_task == current) {
1673 * Group stop is so we can do a core dump,
1674 * We are the initiating thread, so get on with it.
1676 current->signal->group_exit_task = NULL;
1680 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1682 * Group stop is so another thread can do a core dump,
1683 * or else we are racing against a death signal.
1684 * Just punt the stop so we can get the next signal.
1689 * There is a group stop in progress. We stop
1690 * without any associated signal being in our queue.
1692 stop_count = --current->signal->group_stop_count;
1693 if (stop_count == 0)
1694 current->signal->flags = SIGNAL_STOP_STOPPED;
1695 current->exit_code = current->signal->group_exit_code;
1696 set_current_state(TASK_STOPPED);
1697 spin_unlock_irq(¤t->sighand->siglock);
1698 finish_stop(stop_count);
1702 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1703 struct pt_regs *regs, void *cookie)
1705 sigset_t *mask = ¤t->blocked;
1711 spin_lock_irq(¤t->sighand->siglock);
1713 struct k_sigaction *ka;
1715 if (unlikely(current->signal->group_stop_count > 0) &&
1716 handle_group_stop())
1720 * Tracing can induce an artifical signal and choose sigaction.
1721 * The return value in signr determines the default action,
1722 * but info->si_signo is the signal number we will report.
1724 signr = tracehook_get_signal(current, regs, info, return_ka);
1725 if (unlikely(signr < 0))
1727 if (unlikely(signr != 0))
1730 signr = dequeue_signal(current, mask, info);
1733 break; /* will return 0 */
1734 ka = ¤t->sighand->action[signr-1];
1737 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1739 if (ka->sa.sa_handler != SIG_DFL) {
1740 /* Run the handler. */
1743 if (ka->sa.sa_flags & SA_ONESHOT)
1744 ka->sa.sa_handler = SIG_DFL;
1746 break; /* will return non-zero "signr" value */
1750 * Now we are doing the default action for this signal.
1752 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1755 /* Init gets no signals it doesn't want. */
1756 if (current == child_reaper)
1759 /* virtual init is protected against user signals */
1760 if ((info->si_code == SI_USER) &&
1761 vx_current_initpid(current->pid))
1764 if (sig_kernel_stop(signr)) {
1766 * The default action is to stop all threads in
1767 * the thread group. The job control signals
1768 * do nothing in an orphaned pgrp, but SIGSTOP
1769 * always works. Note that siglock needs to be
1770 * dropped during the call to is_orphaned_pgrp()
1771 * because of lock ordering with tasklist_lock.
1772 * This allows an intervening SIGCONT to be posted.
1773 * We need to check for that and bail out if necessary.
1775 if (signr != SIGSTOP) {
1776 spin_unlock_irq(¤t->sighand->siglock);
1778 /* signals can be posted during this window */
1780 if (is_orphaned_pgrp(process_group(current)))
1783 spin_lock_irq(¤t->sighand->siglock);
1786 if (likely(do_signal_stop(info->si_signo))) {
1787 /* It released the siglock. */
1792 * We didn't actually stop, due to a race
1793 * with SIGCONT or something like that.
1798 spin_unlock_irq(¤t->sighand->siglock);
1801 * Anything else is fatal, maybe with a core dump.
1803 current->flags |= PF_SIGNALED;
1804 if (print_fatal_signals)
1805 print_fatal_signal(regs, signr);
1806 if (sig_kernel_coredump(signr)) {
1808 * If it was able to dump core, this kills all
1809 * other threads in the group and synchronizes with
1810 * their demise. If we lost the race with another
1811 * thread getting here, it set group_exit_code
1812 * first and our do_group_exit call below will use
1813 * that value and ignore the one we pass it.
1815 do_coredump(info->si_signo, info->si_signo, regs);
1819 * Death signals, no core dump.
1821 do_group_exit(info->si_signo);
1824 spin_unlock_irq(¤t->sighand->siglock);
1828 EXPORT_SYMBOL(recalc_sigpending);
1829 EXPORT_SYMBOL_GPL(dequeue_signal);
1830 EXPORT_SYMBOL(flush_signals);
1831 EXPORT_SYMBOL(force_sig);
1832 EXPORT_SYMBOL(kill_pg);
1833 EXPORT_SYMBOL(kill_proc);
1834 EXPORT_SYMBOL(send_sig);
1835 EXPORT_SYMBOL(send_sig_info);
1836 EXPORT_SYMBOL(sigprocmask);
1837 EXPORT_SYMBOL(block_all_signals);
1838 EXPORT_SYMBOL(unblock_all_signals);
1842 * System call entry points.
1845 asmlinkage long sys_restart_syscall(void)
1847 struct restart_block *restart = ¤t_thread_info()->restart_block;
1848 return restart->fn(restart);
1851 long do_no_restart_syscall(struct restart_block *param)
1857 * We don't need to get the kernel lock - this is all local to this
1858 * particular thread.. (and that's good, because this is _heavily_
1859 * used by various programs)
1863 * This is also useful for kernel threads that want to temporarily
1864 * (or permanently) block certain signals.
1866 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1867 * interface happily blocks "unblockable" signals like SIGKILL
1870 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1874 spin_lock_irq(¤t->sighand->siglock);
1876 *oldset = current->blocked;
1881 sigorsets(¤t->blocked, ¤t->blocked, set);
1884 signandsets(¤t->blocked, ¤t->blocked, set);
1887 current->blocked = *set;
1892 recalc_sigpending();
1893 spin_unlock_irq(¤t->sighand->siglock);
1899 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1901 int error = -EINVAL;
1902 sigset_t old_set, new_set;
1904 /* XXX: Don't preclude handling different sized sigset_t's. */
1905 if (sigsetsize != sizeof(sigset_t))
1910 if (copy_from_user(&new_set, set, sizeof(*set)))
1912 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1914 error = sigprocmask(how, &new_set, &old_set);
1920 spin_lock_irq(¤t->sighand->siglock);
1921 old_set = current->blocked;
1922 spin_unlock_irq(¤t->sighand->siglock);
1926 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1934 long do_sigpending(void __user *set, unsigned long sigsetsize)
1936 long error = -EINVAL;
1939 if (sigsetsize > sizeof(sigset_t))
1942 spin_lock_irq(¤t->sighand->siglock);
1943 sigorsets(&pending, ¤t->pending.signal,
1944 ¤t->signal->shared_pending.signal);
1945 spin_unlock_irq(¤t->sighand->siglock);
1947 /* Outside the lock because only this thread touches it. */
1948 sigandsets(&pending, ¤t->blocked, &pending);
1951 if (!copy_to_user(set, &pending, sigsetsize))
1959 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1961 return do_sigpending(set, sigsetsize);
1964 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1966 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1970 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1972 if (from->si_code < 0)
1973 return __copy_to_user(to, from, sizeof(siginfo_t))
1976 * If you change siginfo_t structure, please be sure
1977 * this code is fixed accordingly.
1978 * It should never copy any pad contained in the structure
1979 * to avoid security leaks, but must copy the generic
1980 * 3 ints plus the relevant union member.
1982 err = __put_user(from->si_signo, &to->si_signo);
1983 err |= __put_user(from->si_errno, &to->si_errno);
1984 err |= __put_user((short)from->si_code, &to->si_code);
1985 switch (from->si_code & __SI_MASK) {
1987 err |= __put_user(from->si_pid, &to->si_pid);
1988 err |= __put_user(from->si_uid, &to->si_uid);
1991 err |= __put_user(from->si_tid, &to->si_tid);
1992 err |= __put_user(from->si_overrun, &to->si_overrun);
1993 err |= __put_user(from->si_ptr, &to->si_ptr);
1996 err |= __put_user(from->si_band, &to->si_band);
1997 err |= __put_user(from->si_fd, &to->si_fd);
2000 err |= __put_user(from->si_addr, &to->si_addr);
2001 #ifdef __ARCH_SI_TRAPNO
2002 err |= __put_user(from->si_trapno, &to->si_trapno);
2006 err |= __put_user(from->si_pid, &to->si_pid);
2007 err |= __put_user(from->si_uid, &to->si_uid);
2008 err |= __put_user(from->si_status, &to->si_status);
2009 err |= __put_user(from->si_utime, &to->si_utime);
2010 err |= __put_user(from->si_stime, &to->si_stime);
2012 case __SI_RT: /* This is not generated by the kernel as of now. */
2013 case __SI_MESGQ: /* But this is */
2014 err |= __put_user(from->si_pid, &to->si_pid);
2015 err |= __put_user(from->si_uid, &to->si_uid);
2016 err |= __put_user(from->si_ptr, &to->si_ptr);
2018 default: /* this is just in case for now ... */
2019 err |= __put_user(from->si_pid, &to->si_pid);
2020 err |= __put_user(from->si_uid, &to->si_uid);
2029 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2030 siginfo_t __user *uinfo,
2031 const struct timespec __user *uts,
2040 /* XXX: Don't preclude handling different sized sigset_t's. */
2041 if (sigsetsize != sizeof(sigset_t))
2044 if (copy_from_user(&these, uthese, sizeof(these)))
2048 * Invert the set of allowed signals to get those we
2051 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2055 if (copy_from_user(&ts, uts, sizeof(ts)))
2057 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2062 spin_lock_irq(¤t->sighand->siglock);
2063 sig = dequeue_signal(current, &these, &info);
2065 timeout = MAX_SCHEDULE_TIMEOUT;
2067 timeout = (timespec_to_jiffies(&ts)
2068 + (ts.tv_sec || ts.tv_nsec));
2071 /* None ready -- temporarily unblock those we're
2072 * interested while we are sleeping in so that we'll
2073 * be awakened when they arrive. */
2074 current->real_blocked = current->blocked;
2075 sigandsets(¤t->blocked, ¤t->blocked, &these);
2076 recalc_sigpending();
2077 spin_unlock_irq(¤t->sighand->siglock);
2079 timeout = schedule_timeout_interruptible(timeout);
2081 spin_lock_irq(¤t->sighand->siglock);
2082 sig = dequeue_signal(current, &these, &info);
2083 current->blocked = current->real_blocked;
2084 siginitset(¤t->real_blocked, 0);
2085 recalc_sigpending();
2088 spin_unlock_irq(¤t->sighand->siglock);
2093 if (copy_siginfo_to_user(uinfo, &info))
2106 sys_kill(int pid, int sig)
2108 struct siginfo info;
2110 info.si_signo = sig;
2112 info.si_code = SI_USER;
2113 info.si_pid = current->tgid;
2114 info.si_uid = current->uid;
2116 return kill_something_info(sig, &info, pid);
2119 static int do_tkill(int tgid, int pid, int sig)
2122 struct siginfo info;
2123 struct task_struct *p;
2126 info.si_signo = sig;
2128 info.si_code = SI_TKILL;
2129 info.si_pid = current->tgid;
2130 info.si_uid = current->uid;
2132 read_lock(&tasklist_lock);
2133 p = find_task_by_pid(pid);
2134 if (p && (tgid <= 0 || p->tgid == tgid)) {
2135 error = check_kill_permission(sig, &info, p);
2137 * The null signal is a permissions and process existence
2138 * probe. No signal is actually delivered.
2140 if (!error && sig && p->sighand) {
2141 spin_lock_irq(&p->sighand->siglock);
2142 handle_stop_signal(sig, p);
2143 error = specific_send_sig_info(sig, &info, p);
2144 spin_unlock_irq(&p->sighand->siglock);
2147 read_unlock(&tasklist_lock);
2153 * sys_tgkill - send signal to one specific thread
2154 * @tgid: the thread group ID of the thread
2155 * @pid: the PID of the thread
2156 * @sig: signal to be sent
2158 * This syscall also checks the tgid and returns -ESRCH even if the PID
2159 * exists but it's not belonging to the target process anymore. This
2160 * method solves the problem of threads exiting and PIDs getting reused.
2162 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2164 /* This is only valid for single tasks */
2165 if (pid <= 0 || tgid <= 0)
2168 return do_tkill(tgid, pid, sig);
2172 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2175 sys_tkill(int pid, int sig)
2177 /* This is only valid for single tasks */
2181 return do_tkill(0, pid, sig);
2185 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2189 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2192 /* Not even root can pretend to send signals from the kernel.
2193 Nor can they impersonate a kill(), which adds source info. */
2194 if (info.si_code >= 0)
2196 info.si_signo = sig;
2198 /* POSIX.1b doesn't mention process groups. */
2199 return kill_proc_info(sig, &info, pid);
2202 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2204 struct k_sigaction *k;
2207 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2210 k = ¤t->sighand->action[sig-1];
2212 spin_lock_irq(¤t->sighand->siglock);
2213 if (signal_pending(current)) {
2215 * If there might be a fatal signal pending on multiple
2216 * threads, make sure we take it before changing the action.
2218 spin_unlock_irq(¤t->sighand->siglock);
2219 return -ERESTARTNOINTR;
2226 sigdelsetmask(&act->sa.sa_mask,
2227 sigmask(SIGKILL) | sigmask(SIGSTOP));
2231 * "Setting a signal action to SIG_IGN for a signal that is
2232 * pending shall cause the pending signal to be discarded,
2233 * whether or not it is blocked."
2235 * "Setting a signal action to SIG_DFL for a signal that is
2236 * pending and whose default action is to ignore the signal
2237 * (for example, SIGCHLD), shall cause the pending signal to
2238 * be discarded, whether or not it is blocked"
2240 if (act->sa.sa_handler == SIG_IGN ||
2241 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2242 struct task_struct *t = current;
2244 sigaddset(&mask, sig);
2245 rm_from_queue_full(&mask, &t->signal->shared_pending);
2247 rm_from_queue_full(&mask, &t->pending);
2248 recalc_sigpending_tsk(t);
2250 } while (t != current);
2254 spin_unlock_irq(¤t->sighand->siglock);
2259 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2265 oss.ss_sp = (void __user *) current->sas_ss_sp;
2266 oss.ss_size = current->sas_ss_size;
2267 oss.ss_flags = sas_ss_flags(sp);
2276 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2277 || __get_user(ss_sp, &uss->ss_sp)
2278 || __get_user(ss_flags, &uss->ss_flags)
2279 || __get_user(ss_size, &uss->ss_size))
2283 if (on_sig_stack(sp))
2289 * Note - this code used to test ss_flags incorrectly
2290 * old code may have been written using ss_flags==0
2291 * to mean ss_flags==SS_ONSTACK (as this was the only
2292 * way that worked) - this fix preserves that older
2295 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2298 if (ss_flags == SS_DISABLE) {
2303 if (ss_size < MINSIGSTKSZ)
2307 current->sas_ss_sp = (unsigned long) ss_sp;
2308 current->sas_ss_size = ss_size;
2313 if (copy_to_user(uoss, &oss, sizeof(oss)))
2322 #ifdef __ARCH_WANT_SYS_SIGPENDING
2325 sys_sigpending(old_sigset_t __user *set)
2327 return do_sigpending(set, sizeof(*set));
2332 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2333 /* Some platforms have their own version with special arguments others
2334 support only sys_rt_sigprocmask. */
2337 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2340 old_sigset_t old_set, new_set;
2344 if (copy_from_user(&new_set, set, sizeof(*set)))
2346 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2348 spin_lock_irq(¤t->sighand->siglock);
2349 old_set = current->blocked.sig[0];
2357 sigaddsetmask(¤t->blocked, new_set);
2360 sigdelsetmask(¤t->blocked, new_set);
2363 current->blocked.sig[0] = new_set;
2367 recalc_sigpending();
2368 spin_unlock_irq(¤t->sighand->siglock);
2374 old_set = current->blocked.sig[0];
2377 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2384 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2386 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2388 sys_rt_sigaction(int sig,
2389 const struct sigaction __user *act,
2390 struct sigaction __user *oact,
2393 struct k_sigaction new_sa, old_sa;
2396 /* XXX: Don't preclude handling different sized sigset_t's. */
2397 if (sigsetsize != sizeof(sigset_t))
2401 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2405 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2408 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2414 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2416 #ifdef __ARCH_WANT_SYS_SGETMASK
2419 * For backwards compatibility. Functionality superseded by sigprocmask.
2425 return current->blocked.sig[0];
2429 sys_ssetmask(int newmask)
2433 spin_lock_irq(¤t->sighand->siglock);
2434 old = current->blocked.sig[0];
2436 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2438 recalc_sigpending();
2439 spin_unlock_irq(¤t->sighand->siglock);
2443 #endif /* __ARCH_WANT_SGETMASK */
2445 #ifdef __ARCH_WANT_SYS_SIGNAL
2447 * For backwards compatibility. Functionality superseded by sigaction.
2449 asmlinkage unsigned long
2450 sys_signal(int sig, __sighandler_t handler)
2452 struct k_sigaction new_sa, old_sa;
2455 new_sa.sa.sa_handler = handler;
2456 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2457 sigemptyset(&new_sa.sa.sa_mask);
2459 ret = do_sigaction(sig, &new_sa, &old_sa);
2461 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2463 #endif /* __ARCH_WANT_SYS_SIGNAL */
2465 #ifdef __ARCH_WANT_SYS_PAUSE
2470 current->state = TASK_INTERRUPTIBLE;
2472 return -ERESTARTNOHAND;
2477 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2478 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2482 /* XXX: Don't preclude handling different sized sigset_t's. */
2483 if (sigsetsize != sizeof(sigset_t))
2486 if (copy_from_user(&newset, unewset, sizeof(newset)))
2488 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2490 spin_lock_irq(¤t->sighand->siglock);
2491 current->saved_sigmask = current->blocked;
2492 current->blocked = newset;
2493 recalc_sigpending();
2494 spin_unlock_irq(¤t->sighand->siglock);
2496 current->state = TASK_INTERRUPTIBLE;
2498 set_thread_flag(TIF_RESTORE_SIGMASK);
2499 return -ERESTARTNOHAND;
2501 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2503 void __init signals_init(void)
2506 kmem_cache_create("sigqueue",
2507 sizeof(struct sigqueue),
2508 __alignof__(struct sigqueue),
2509 SLAB_PANIC, NULL, NULL);