2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/init.h>
17 #include <linux/sched.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/tracehook.h>
24 #include <linux/signal.h>
25 #include <linux/capability.h>
26 #include <linux/freezer.h>
27 #include <linux/pid_namespace.h>
28 #include <linux/nsproxy.h>
29 #include <linux/vs_context.h>
30 #include <linux/vs_pid.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache *sigqueue_cachep;
45 * In POSIX a signal is sent either to a specific thread (Linux task)
46 * or to the process as a whole (Linux thread group). How the signal
47 * is sent determines whether it's to one thread or the whole group,
48 * which determines which signal mask(s) are involved in blocking it
49 * from being delivered until later. When the signal is delivered,
50 * either it's caught or ignored by a user handler or it has a default
51 * effect that applies to the whole thread group (POSIX process).
53 * The possible effects an unblocked signal set to SIG_DFL can have are:
54 * ignore - Nothing Happens
55 * terminate - kill the process, i.e. all threads in the group,
56 * similar to exit_group. The group leader (only) reports
57 * WIFSIGNALED status to its parent.
58 * coredump - write a core dump file describing all threads using
59 * the same mm and then kill all those threads
60 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
62 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
63 * Other signals when not blocked and set to SIG_DFL behaves as follows.
64 * The job control signals also have other special effects.
66 * +--------------------+------------------+
67 * | POSIX signal | default action |
68 * +--------------------+------------------+
69 * | SIGHUP | terminate |
70 * | SIGINT | terminate |
71 * | SIGQUIT | coredump |
72 * | SIGILL | coredump |
73 * | SIGTRAP | coredump |
74 * | SIGABRT/SIGIOT | coredump |
75 * | SIGBUS | coredump |
76 * | SIGFPE | coredump |
77 * | SIGKILL | terminate(+) |
78 * | SIGUSR1 | terminate |
79 * | SIGSEGV | coredump |
80 * | SIGUSR2 | terminate |
81 * | SIGPIPE | terminate |
82 * | SIGALRM | terminate |
83 * | SIGTERM | terminate |
84 * | SIGCHLD | ignore |
85 * | SIGCONT | ignore(*) |
86 * | SIGSTOP | stop(*)(+) |
87 * | SIGTSTP | stop(*) |
88 * | SIGTTIN | stop(*) |
89 * | SIGTTOU | stop(*) |
91 * | SIGXCPU | coredump |
92 * | SIGXFSZ | coredump |
93 * | SIGVTALRM | terminate |
94 * | SIGPROF | terminate |
95 * | SIGPOLL/SIGIO | terminate |
96 * | SIGSYS/SIGUNUSED | coredump |
97 * | SIGSTKFLT | terminate |
98 * | SIGWINCH | ignore |
99 * | SIGPWR | terminate |
100 * | SIGRTMIN-SIGRTMAX | terminate |
101 * +--------------------+------------------+
102 * | non-POSIX signal | default action |
103 * +--------------------+------------------+
104 * | SIGEMT | coredump |
105 * +--------------------+------------------+
107 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
108 * (*) Special job control effects:
109 * When SIGCONT is sent, it resumes the process (all threads in the group)
110 * from TASK_STOPPED state and also clears any pending/queued stop signals
111 * (any of those marked with "stop(*)"). This happens regardless of blocking,
112 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
113 * any pending/queued SIGCONT signals; this happens regardless of blocking,
114 * catching, or ignored the stop signal, though (except for SIGSTOP) the
115 * default action of stopping the process may happen later or never.
119 #define M_SIGEMT M(SIGEMT)
124 #if SIGRTMIN > BITS_PER_LONG
125 #define M(sig) (1ULL << ((sig)-1))
127 #define M(sig) (1UL << ((sig)-1))
129 #define T(sig, mask) (M(sig) & (mask))
131 #define SIG_KERNEL_ONLY_MASK (\
132 M(SIGKILL) | M(SIGSTOP) )
134 #define SIG_KERNEL_STOP_MASK (\
135 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
137 #define SIG_KERNEL_COREDUMP_MASK (\
138 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
139 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
140 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
142 #define SIG_KERNEL_IGNORE_MASK (\
143 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
145 #define sig_kernel_only(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
147 #define sig_kernel_coredump(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
149 #define sig_kernel_ignore(sig) \
150 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
151 #define sig_kernel_stop(sig) \
152 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
154 #define sig_needs_tasklist(sig) ((sig) == SIGCONT)
156 #define sig_user_defined(t, signr) \
157 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
158 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
160 #define sig_fatal(t, signr) \
161 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
162 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
164 static int sig_ignored(struct task_struct *t, int sig)
166 void __user * handler;
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 if (handler != SIG_IGN &&
179 (handler != SIG_DFL || !sig_kernel_ignore(sig)))
182 /* It's ignored, we can short-circuit unless a debugger wants it. */
183 return !tracehook_consider_ignored_signal(t, sig, handler);
187 * Re-calculate pending state from the set of locally pending
188 * signals, globally pending signals, and blocked signals.
190 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
195 switch (_NSIG_WORDS) {
197 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
198 ready |= signal->sig[i] &~ blocked->sig[i];
201 case 4: ready = signal->sig[3] &~ blocked->sig[3];
202 ready |= signal->sig[2] &~ blocked->sig[2];
203 ready |= signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 2: ready = signal->sig[1] &~ blocked->sig[1];
208 ready |= signal->sig[0] &~ blocked->sig[0];
211 case 1: ready = signal->sig[0] &~ blocked->sig[0];
216 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
218 fastcall void recalc_sigpending_tsk(struct task_struct *t)
220 if (t->signal->group_stop_count > 0 ||
222 PENDING(&t->pending, &t->blocked) ||
223 PENDING(&t->signal->shared_pending, &t->blocked) ||
224 tracehook_induce_sigpending(t))
225 set_tsk_thread_flag(t, TIF_SIGPENDING);
227 clear_tsk_thread_flag(t, TIF_SIGPENDING);
230 void recalc_sigpending(void)
232 recalc_sigpending_tsk(current);
235 /* Given the mask, find the first available signal that should be serviced. */
238 next_signal(struct sigpending *pending, sigset_t *mask)
240 unsigned long i, *s, *m, x;
243 s = pending->signal.sig;
245 switch (_NSIG_WORDS) {
247 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
248 if ((x = *s &~ *m) != 0) {
249 sig = ffz(~x) + i*_NSIG_BPW + 1;
254 case 2: if ((x = s[0] &~ m[0]) != 0)
256 else if ((x = s[1] &~ m[1]) != 0)
263 case 1: if ((x = *s &~ *m) != 0)
271 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
274 struct sigqueue *q = NULL;
275 struct user_struct *user;
278 * In order to avoid problems with "switch_user()", we want to make
279 * sure that the compiler doesn't re-load "t->user"
283 atomic_inc(&user->sigpending);
284 if (override_rlimit ||
285 atomic_read(&user->sigpending) <=
286 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
287 q = kmem_cache_alloc(sigqueue_cachep, flags);
288 if (unlikely(q == NULL)) {
289 atomic_dec(&user->sigpending);
291 INIT_LIST_HEAD(&q->list);
293 q->user = get_uid(user);
298 static void __sigqueue_free(struct sigqueue *q)
300 if (q->flags & SIGQUEUE_PREALLOC)
302 atomic_dec(&q->user->sigpending);
304 kmem_cache_free(sigqueue_cachep, q);
307 void flush_sigqueue(struct sigpending *queue)
311 sigemptyset(&queue->signal);
312 while (!list_empty(&queue->list)) {
313 q = list_entry(queue->list.next, struct sigqueue , list);
314 list_del_init(&q->list);
320 * Flush all pending signals for a task.
322 void flush_signals(struct task_struct *t)
326 spin_lock_irqsave(&t->sighand->siglock, flags);
327 clear_tsk_thread_flag(t,TIF_SIGPENDING);
328 flush_sigqueue(&t->pending);
329 flush_sigqueue(&t->signal->shared_pending);
330 spin_unlock_irqrestore(&t->sighand->siglock, flags);
334 * Flush all handlers for a task.
338 flush_signal_handlers(struct task_struct *t, int force_default)
341 struct k_sigaction *ka = &t->sighand->action[0];
342 for (i = _NSIG ; i != 0 ; i--) {
343 if (force_default || ka->sa.sa_handler != SIG_IGN)
344 ka->sa.sa_handler = SIG_DFL;
346 sigemptyset(&ka->sa.sa_mask);
352 /* Notify the system that a driver wants to block all signals for this
353 * process, and wants to be notified if any signals at all were to be
354 * sent/acted upon. If the notifier routine returns non-zero, then the
355 * signal will be acted upon after all. If the notifier routine returns 0,
356 * then then signal will be blocked. Only one block per process is
357 * allowed. priv is a pointer to private data that the notifier routine
358 * can use to determine if the signal should be blocked or not. */
361 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
365 spin_lock_irqsave(¤t->sighand->siglock, flags);
366 current->notifier_mask = mask;
367 current->notifier_data = priv;
368 current->notifier = notifier;
369 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
372 /* Notify the system that blocking has ended. */
375 unblock_all_signals(void)
379 spin_lock_irqsave(¤t->sighand->siglock, flags);
380 current->notifier = NULL;
381 current->notifier_data = NULL;
383 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
386 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
388 struct sigqueue *q, *first = NULL;
389 int still_pending = 0;
391 if (unlikely(!sigismember(&list->signal, sig)))
395 * Collect the siginfo appropriate to this signal. Check if
396 * there is another siginfo for the same signal.
398 list_for_each_entry(q, &list->list, list) {
399 if (q->info.si_signo == sig) {
408 list_del_init(&first->list);
409 copy_siginfo(info, &first->info);
410 __sigqueue_free(first);
412 sigdelset(&list->signal, sig);
415 /* Ok, it wasn't in the queue. This must be
416 a fast-pathed signal or we must have been
417 out of queue space. So zero out the info.
419 sigdelset(&list->signal, sig);
420 info->si_signo = sig;
429 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
432 int sig = next_signal(pending, mask);
435 if (current->notifier) {
436 if (sigismember(current->notifier_mask, sig)) {
437 if (!(current->notifier)(current->notifier_data)) {
438 clear_thread_flag(TIF_SIGPENDING);
444 if (!collect_signal(sig, pending, info))
452 * Dequeue a signal and return the element to the caller, which is
453 * expected to free it.
455 * All callers have to hold the siglock.
457 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
459 int signr = __dequeue_signal(&tsk->pending, mask, info);
461 signr = __dequeue_signal(&tsk->signal->shared_pending,
463 recalc_sigpending_tsk(tsk);
464 if (signr && unlikely(sig_kernel_stop(signr))) {
466 * Set a marker that we have dequeued a stop signal. Our
467 * caller might release the siglock and then the pending
468 * stop signal it is about to process is no longer in the
469 * pending bitmasks, but must still be cleared by a SIGCONT
470 * (and overruled by a SIGKILL). So those cases clear this
471 * shared flag after we've set it. Note that this flag may
472 * remain set after the signal we return is ignored or
473 * handled. That doesn't matter because its only purpose
474 * is to alert stop-signal processing code when another
475 * processor has come along and cleared the flag.
477 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
478 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
481 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
482 info->si_sys_private){
484 * Release the siglock to ensure proper locking order
485 * of timer locks outside of siglocks. Note, we leave
486 * irqs disabled here, since the posix-timers code is
487 * about to disable them again anyway.
489 spin_unlock(&tsk->sighand->siglock);
490 do_schedule_next_timer(info);
491 spin_lock(&tsk->sighand->siglock);
497 * Tell a process that it has a new active signal..
499 * NOTE! we rely on the previous spin_lock to
500 * lock interrupts for us! We can only be called with
501 * "siglock" held, and the local interrupt must
502 * have been disabled when that got acquired!
504 * No need to set need_resched since signal event passing
505 * goes through ->blocked
507 void signal_wake_up(struct task_struct *t, int resume)
511 set_tsk_thread_flag(t, TIF_SIGPENDING);
514 * For SIGKILL, we want to wake it up in the stopped/traced case.
515 * We don't check t->state here because there is a race with it
516 * executing another processor and just now entering stopped state.
517 * By using wake_up_state, we ensure the process will wake up and
518 * handle its death signal.
520 mask = TASK_INTERRUPTIBLE;
522 mask |= TASK_STOPPED | TASK_TRACED;
523 if (!wake_up_state(t, mask))
528 * Remove signals in mask from the pending set and queue.
529 * Returns 1 if any signals were found.
531 * All callers must be holding the siglock.
533 * This version takes a sigset mask and looks at all signals,
534 * not just those in the first mask word.
536 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
538 struct sigqueue *q, *n;
541 sigandsets(&m, mask, &s->signal);
542 if (sigisemptyset(&m))
545 signandsets(&s->signal, &s->signal, mask);
546 list_for_each_entry_safe(q, n, &s->list, list) {
547 if (sigismember(mask, q->info.si_signo)) {
548 list_del_init(&q->list);
555 * Remove signals in mask from the pending set and queue.
556 * Returns 1 if any signals were found.
558 * All callers must be holding the siglock.
560 static int rm_from_queue(unsigned long mask, struct sigpending *s)
562 struct sigqueue *q, *n;
564 if (!sigtestsetmask(&s->signal, mask))
567 sigdelsetmask(&s->signal, mask);
568 list_for_each_entry_safe(q, n, &s->list, list) {
569 if (q->info.si_signo < SIGRTMIN &&
570 (mask & sigmask(q->info.si_signo))) {
571 list_del_init(&q->list);
579 * Bad permissions for sending the signal
581 static int check_kill_permission(int sig, struct siginfo *info,
582 struct task_struct *t)
586 if (!valid_signal(sig))
589 if ((info != SEND_SIG_NOINFO) &&
590 (is_si_special(info) || !SI_FROMUSER(info)))
593 vxdprintk(VXD_CBIT(misc, 7),
594 "check_kill_permission(%d,%p,%p[#%u,%u])",
595 sig, info, t, vx_task_xid(t), t->pid);
598 if (((sig != SIGCONT) ||
599 (process_session(current) != process_session(t)))
600 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
601 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
602 && !capable(CAP_KILL))
606 if (!vx_check(vx_task_xid(t), VS_WATCH_P|VS_IDENT)) {
607 vxdprintk(current->xid || VXD_CBIT(misc, 7),
608 "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
609 sig, info, t, vx_task_xid(t), t->pid, current->xid);
613 error = security_task_kill(t, info, sig, 0);
615 audit_signal_info(sig, t); /* Let audit system see the signal */
621 * Handle magic process-wide effects of stop/continue signals.
622 * Unlike the signal actions, these happen immediately at signal-generation
623 * time regardless of blocking, ignoring, or handling. This does the
624 * actual continuing for SIGCONT, but not the actual stopping for stop
625 * signals. The process stop is done as a signal action for SIG_DFL.
627 static void handle_stop_signal(int sig, struct task_struct *p)
629 struct task_struct *t;
631 if (p->signal->flags & SIGNAL_GROUP_EXIT)
633 * The process is in the middle of dying already.
637 if (sig_kernel_stop(sig)) {
639 * This is a stop signal. Remove SIGCONT from all queues.
641 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
644 rm_from_queue(sigmask(SIGCONT), &t->pending);
647 } else if (sig == SIGCONT) {
649 * Remove all stop signals from all queues,
650 * and wake all threads.
652 if (unlikely(p->signal->group_stop_count > 0)) {
654 * There was a group stop in progress. We'll
655 * pretend it finished before we got here. We are
656 * obliged to report it to the parent: if the
657 * SIGSTOP happened "after" this SIGCONT, then it
658 * would have cleared this pending SIGCONT. If it
659 * happened "before" this SIGCONT, then the parent
660 * got the SIGCHLD about the stop finishing before
661 * the continue happened. We do the notification
662 * now, and it's as if the stop had finished and
663 * the SIGCHLD was pending on entry to this kill.
665 p->signal->group_stop_count = 0;
666 p->signal->flags = SIGNAL_STOP_CONTINUED;
667 spin_unlock(&p->sighand->siglock);
668 do_notify_parent_cldstop(p, CLD_STOPPED);
669 spin_lock(&p->sighand->siglock);
671 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
675 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
678 * If there is a handler for SIGCONT, we must make
679 * sure that no thread returns to user mode before
680 * we post the signal, in case it was the only
681 * thread eligible to run the signal handler--then
682 * it must not do anything between resuming and
683 * running the handler. With the TIF_SIGPENDING
684 * flag set, the thread will pause and acquire the
685 * siglock that we hold now and until we've queued
686 * the pending signal.
688 * Wake up the stopped thread _after_ setting
691 state = TASK_STOPPED;
692 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
693 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 state |= TASK_INTERRUPTIBLE;
696 wake_up_state(t, state);
701 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
703 * We were in fact stopped, and are now continued.
704 * Notify the parent with CLD_CONTINUED.
706 p->signal->flags = SIGNAL_STOP_CONTINUED;
707 p->signal->group_exit_code = 0;
708 spin_unlock(&p->sighand->siglock);
709 do_notify_parent_cldstop(p, CLD_CONTINUED);
710 spin_lock(&p->sighand->siglock);
713 * We are not stopped, but there could be a stop
714 * signal in the middle of being processed after
715 * being removed from the queue. Clear that too.
717 p->signal->flags = 0;
719 } else if (sig == SIGKILL) {
721 * Make sure that any pending stop signal already dequeued
722 * is undone by the wakeup for SIGKILL.
724 p->signal->flags = 0;
728 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
729 struct sigpending *signals)
731 struct sigqueue * q = NULL;
735 * fast-pathed signals for kernel-internal things like SIGSTOP
738 if (info == SEND_SIG_FORCED)
741 /* Real-time signals must be queued if sent by sigqueue, or
742 some other real-time mechanism. It is implementation
743 defined whether kill() does so. We attempt to do so, on
744 the principle of least surprise, but since kill is not
745 allowed to fail with EAGAIN when low on memory we just
746 make sure at least one signal gets delivered and don't
747 pass on the info struct. */
749 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
750 (is_si_special(info) ||
751 info->si_code >= 0)));
753 list_add_tail(&q->list, &signals->list);
754 switch ((unsigned long) info) {
755 case (unsigned long) SEND_SIG_NOINFO:
756 q->info.si_signo = sig;
757 q->info.si_errno = 0;
758 q->info.si_code = SI_USER;
759 q->info.si_pid = current->pid;
760 q->info.si_uid = current->uid;
762 case (unsigned long) SEND_SIG_PRIV:
763 q->info.si_signo = sig;
764 q->info.si_errno = 0;
765 q->info.si_code = SI_KERNEL;
770 copy_siginfo(&q->info, info);
773 } else if (!is_si_special(info)) {
774 if (sig >= SIGRTMIN && info->si_code != SI_USER)
776 * Queue overflow, abort. We may abort if the signal was rt
777 * and sent by user using something other than kill().
783 sigaddset(&signals->signal, sig);
787 #define LEGACY_QUEUE(sigptr, sig) \
788 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
790 int print_fatal_signals = 0;
792 static void print_fatal_signal(struct pt_regs *regs, int signr)
794 printk("%s/%d: potentially unexpected fatal signal %d.\n",
795 current->comm, current->pid, signr);
798 printk("code at %08lx: ", regs->eip);
801 for (i = 0; i < 16; i++) {
804 __get_user(insn, (unsigned char *)(regs->eip + i));
805 printk("%02x ", insn);
813 static int __init setup_print_fatal_signals(char *str)
815 get_option (&str, &print_fatal_signals);
820 __setup("print-fatal-signals=", setup_print_fatal_signals);
823 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
827 BUG_ON(!irqs_disabled());
828 assert_spin_locked(&t->sighand->siglock);
830 /* Short-circuit ignored signals. */
831 if (sig_ignored(t, sig))
834 /* Support queueing exactly one non-rt signal, so that we
835 can get more detailed information about the cause of
837 if (LEGACY_QUEUE(&t->pending, sig))
840 ret = send_signal(sig, info, t, &t->pending);
841 if (!ret && !sigismember(&t->blocked, sig))
842 signal_wake_up(t, sig == SIGKILL);
848 * Force a signal that the process can't ignore: if necessary
849 * we unblock the signal and change any SIG_IGN to SIG_DFL.
851 * Note: If we unblock the signal, we always reset it to SIG_DFL,
852 * since we do not want to have a signal handler that was blocked
853 * be invoked when user space had explicitly blocked it.
855 * We don't want to have recursive SIGSEGV's etc, for example.
858 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
860 unsigned long int flags;
861 int ret, blocked, ignored;
862 struct k_sigaction *action;
864 spin_lock_irqsave(&t->sighand->siglock, flags);
865 action = &t->sighand->action[sig-1];
866 ignored = action->sa.sa_handler == SIG_IGN;
867 blocked = sigismember(&t->blocked, sig);
868 if (blocked || ignored) {
869 action->sa.sa_handler = SIG_DFL;
871 sigdelset(&t->blocked, sig);
872 recalc_sigpending_tsk(t);
875 ret = specific_send_sig_info(sig, info, t);
876 spin_unlock_irqrestore(&t->sighand->siglock, flags);
882 force_sig_specific(int sig, struct task_struct *t)
884 force_sig_info(sig, SEND_SIG_FORCED, t);
888 * Test if P wants to take SIG. After we've checked all threads with this,
889 * it's equivalent to finding no threads not blocking SIG. Any threads not
890 * blocking SIG were ruled out because they are not running and already
891 * have pending signals. Such threads will dequeue from the shared queue
892 * as soon as they're available, so putting the signal on the shared queue
893 * will be equivalent to sending it to one such thread.
895 static inline int wants_signal(int sig, struct task_struct *p)
897 if (sigismember(&p->blocked, sig))
899 if (p->flags & PF_EXITING)
903 if (p->state & (TASK_STOPPED | TASK_TRACED))
905 return task_curr(p) || !signal_pending(p);
909 __group_complete_signal(int sig, struct task_struct *p)
911 struct task_struct *t;
914 * Now find a thread we can wake up to take the signal off the queue.
916 * If the main thread wants the signal, it gets first crack.
917 * Probably the least surprising to the average bear.
919 if (wants_signal(sig, p))
921 else if (thread_group_empty(p))
923 * There is just one thread and it does not need to be woken.
924 * It will dequeue unblocked signals before it runs again.
929 * Otherwise try to find a suitable thread.
931 t = p->signal->curr_target;
933 /* restart balancing at this thread */
934 t = p->signal->curr_target = p;
936 while (!wants_signal(sig, t)) {
938 if (t == p->signal->curr_target)
940 * No thread needs to be woken.
941 * Any eligible threads will see
942 * the signal in the queue soon.
946 p->signal->curr_target = t;
950 * Found a killable thread. If the signal will be fatal,
951 * then start taking the whole group down immediately.
953 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
954 !sigismember(&t->real_blocked, sig) &&
955 (sig == SIGKILL || !tracehook_consider_fatal_signal(t, sig))) {
957 * This signal will be fatal to the whole group.
959 if (!sig_kernel_coredump(sig)) {
961 * Start a group exit and wake everybody up.
962 * This way we don't have other threads
963 * running and doing things after a slower
964 * thread has the fatal signal pending.
966 p->signal->flags = SIGNAL_GROUP_EXIT;
967 p->signal->group_exit_code = sig;
968 p->signal->group_stop_count = 0;
971 sigaddset(&t->pending.signal, SIGKILL);
972 signal_wake_up(t, 1);
979 * There will be a core dump. We make all threads other
980 * than the chosen one go into a group stop so that nothing
981 * happens until it gets scheduled, takes the signal off
982 * the shared queue, and does the core dump. This is a
983 * little more complicated than strictly necessary, but it
984 * keeps the signal state that winds up in the core dump
985 * unchanged from the death state, e.g. which thread had
986 * the core-dump signal unblocked.
988 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
989 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
990 p->signal->group_stop_count = 0;
991 p->signal->group_exit_task = t;
994 p->signal->group_stop_count++;
995 signal_wake_up(t, 0);
998 wake_up_process(p->signal->group_exit_task);
1003 * The signal is already in the shared-pending queue.
1004 * Tell the chosen thread to wake up and dequeue it.
1006 signal_wake_up(t, sig == SIGKILL);
1011 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1015 assert_spin_locked(&p->sighand->siglock);
1016 handle_stop_signal(sig, p);
1018 /* Short-circuit ignored signals. */
1019 if (sig_ignored(p, sig))
1022 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1023 /* This is a non-RT signal and we already have one queued. */
1027 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1028 * We always use the shared queue for process-wide signals,
1029 * to avoid several races.
1031 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1035 __group_complete_signal(sig, p);
1040 * Nuke all other threads in the group.
1042 void zap_other_threads(struct task_struct *p)
1044 struct task_struct *t;
1046 p->signal->flags = SIGNAL_GROUP_EXIT;
1047 p->signal->group_stop_count = 0;
1049 if (thread_group_empty(p))
1052 for (t = next_thread(p); t != p; t = next_thread(t)) {
1054 * Don't bother with already dead threads
1060 * We don't want to notify the parent, since we are
1061 * killed as part of a thread group due to another
1062 * thread doing an execve() or similar. So set the
1063 * exit signal to -1 to allow immediate reaping of
1064 * the process. But don't detach the thread group
1067 if (t != p->group_leader)
1068 t->exit_signal = -1;
1070 /* SIGKILL will be handled before any pending SIGSTOP */
1071 sigaddset(&t->pending.signal, SIGKILL);
1072 signal_wake_up(t, 1);
1077 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1079 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1081 struct sighand_struct *sighand;
1084 sighand = rcu_dereference(tsk->sighand);
1085 if (unlikely(sighand == NULL))
1088 spin_lock_irqsave(&sighand->siglock, *flags);
1089 if (likely(sighand == tsk->sighand))
1091 spin_unlock_irqrestore(&sighand->siglock, *flags);
1097 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1099 unsigned long flags;
1102 ret = check_kill_permission(sig, info, p);
1106 if (lock_task_sighand(p, &flags)) {
1107 ret = __group_send_sig_info(sig, info, p);
1108 unlock_task_sighand(p, &flags);
1116 * kill_pgrp_info() sends a signal to a process group: this is what the tty
1117 * control characters do (^C, ^Z etc)
1120 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1122 struct task_struct *p = NULL;
1123 int retval, success;
1127 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1128 int err = group_send_sig_info(sig, info, p);
1131 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1132 return success ? 0 : retval;
1135 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1139 read_lock(&tasklist_lock);
1140 retval = __kill_pgrp_info(sig, info, pgrp);
1141 read_unlock(&tasklist_lock);
1146 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1151 return __kill_pgrp_info(sig, info, find_pid(pgrp));
1155 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1159 read_lock(&tasklist_lock);
1160 retval = __kill_pg_info(sig, info, pgrp);
1161 read_unlock(&tasklist_lock);
1166 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1169 int acquired_tasklist_lock = 0;
1170 struct task_struct *p;
1173 if (unlikely(sig_needs_tasklist(sig))) {
1174 read_lock(&tasklist_lock);
1175 acquired_tasklist_lock = 1;
1177 p = pid_task(pid, PIDTYPE_PID);
1179 if (p && vx_check(vx_task_xid(p), VS_IDENT))
1180 error = group_send_sig_info(sig, info, p);
1181 if (unlikely(acquired_tasklist_lock))
1182 read_unlock(&tasklist_lock);
1187 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1191 error = kill_pid_info(sig, info, find_pid(vx_rmap_pid(pid)));
1196 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1197 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1198 uid_t uid, uid_t euid, u32 secid)
1201 struct task_struct *p;
1203 if (!valid_signal(sig))
1206 read_lock(&tasklist_lock);
1207 p = pid_task(pid, PIDTYPE_PID);
1212 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1213 && (euid != p->suid) && (euid != p->uid)
1214 && (uid != p->suid) && (uid != p->uid)) {
1218 ret = security_task_kill(p, info, sig, secid);
1221 if (sig && p->sighand) {
1222 unsigned long flags;
1223 spin_lock_irqsave(&p->sighand->siglock, flags);
1224 ret = __group_send_sig_info(sig, info, p);
1225 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1228 read_unlock(&tasklist_lock);
1231 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1234 * kill_something_info() interprets pid in interesting ways just like kill(2).
1236 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1237 * is probably wrong. Should make it like BSD or SYSV.
1240 static int kill_something_info(int sig, struct siginfo *info, int pid)
1243 return kill_pg_info(sig, info, process_group(current));
1244 } else if (pid == -1) {
1245 int retval = 0, count = 0;
1246 struct task_struct * p;
1248 read_lock(&tasklist_lock);
1249 for_each_process(p) {
1250 if (vx_check(vx_task_xid(p), VS_ADMIN_P|VS_IDENT) &&
1251 p->pid > 1 && p->tgid != current->tgid) {
1252 int err = group_send_sig_info(sig, info, p);
1258 read_unlock(&tasklist_lock);
1259 return count ? retval : -ESRCH;
1260 } else if (pid < 0) {
1261 return kill_pg_info(sig, info, -pid);
1263 return kill_proc_info(sig, info, pid);
1268 * These are for backward compatibility with the rest of the kernel source.
1272 * These two are the most common entry points. They send a signal
1273 * just to the specific thread.
1276 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1279 unsigned long flags;
1282 * Make sure legacy kernel users don't send in bad values
1283 * (normal paths check this in check_kill_permission).
1285 if (!valid_signal(sig))
1289 * We need the tasklist lock even for the specific
1290 * thread case (when we don't need to follow the group
1291 * lists) in order to avoid races with "p->sighand"
1292 * going away or changing from under us.
1294 read_lock(&tasklist_lock);
1295 spin_lock_irqsave(&p->sighand->siglock, flags);
1296 ret = specific_send_sig_info(sig, info, p);
1297 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1298 read_unlock(&tasklist_lock);
1302 #define __si_special(priv) \
1303 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1306 send_sig(int sig, struct task_struct *p, int priv)
1308 return send_sig_info(sig, __si_special(priv), p);
1312 * This is the entry point for "process-wide" signals.
1313 * They will go to an appropriate thread in the thread group.
1316 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1319 read_lock(&tasklist_lock);
1320 ret = group_send_sig_info(sig, info, p);
1321 read_unlock(&tasklist_lock);
1326 force_sig(int sig, struct task_struct *p)
1328 force_sig_info(sig, SEND_SIG_PRIV, p);
1332 * When things go south during signal handling, we
1333 * will force a SIGSEGV. And if the signal that caused
1334 * the problem was already a SIGSEGV, we'll want to
1335 * make sure we don't even try to deliver the signal..
1338 force_sigsegv(int sig, struct task_struct *p)
1340 if (sig == SIGSEGV) {
1341 unsigned long flags;
1342 spin_lock_irqsave(&p->sighand->siglock, flags);
1343 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1344 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1346 force_sig(SIGSEGV, p);
1350 int kill_pgrp(struct pid *pid, int sig, int priv)
1352 return kill_pgrp_info(sig, __si_special(priv), pid);
1354 EXPORT_SYMBOL(kill_pgrp);
1356 int kill_pid(struct pid *pid, int sig, int priv)
1358 return kill_pid_info(sig, __si_special(priv), pid);
1360 EXPORT_SYMBOL(kill_pid);
1363 kill_pg(pid_t pgrp, int sig, int priv)
1365 return kill_pg_info(sig, __si_special(priv), pgrp);
1369 kill_proc(pid_t pid, int sig, int priv)
1371 return kill_proc_info(sig, __si_special(priv), pid);
1375 * These functions support sending signals using preallocated sigqueue
1376 * structures. This is needed "because realtime applications cannot
1377 * afford to lose notifications of asynchronous events, like timer
1378 * expirations or I/O completions". In the case of Posix Timers
1379 * we allocate the sigqueue structure from the timer_create. If this
1380 * allocation fails we are able to report the failure to the application
1381 * with an EAGAIN error.
1384 struct sigqueue *sigqueue_alloc(void)
1388 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1389 q->flags |= SIGQUEUE_PREALLOC;
1393 void sigqueue_free(struct sigqueue *q)
1395 unsigned long flags;
1396 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1398 * If the signal is still pending remove it from the
1401 if (unlikely(!list_empty(&q->list))) {
1402 spinlock_t *lock = ¤t->sighand->siglock;
1403 read_lock(&tasklist_lock);
1404 spin_lock_irqsave(lock, flags);
1405 if (!list_empty(&q->list))
1406 list_del_init(&q->list);
1407 spin_unlock_irqrestore(lock, flags);
1408 read_unlock(&tasklist_lock);
1410 q->flags &= ~SIGQUEUE_PREALLOC;
1414 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1416 unsigned long flags;
1419 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1422 * The rcu based delayed sighand destroy makes it possible to
1423 * run this without tasklist lock held. The task struct itself
1424 * cannot go away as create_timer did get_task_struct().
1426 * We return -1, when the task is marked exiting, so
1427 * posix_timer_event can redirect it to the group leader
1431 if (!likely(lock_task_sighand(p, &flags))) {
1436 if (unlikely(!list_empty(&q->list))) {
1438 * If an SI_TIMER entry is already queue just increment
1439 * the overrun count.
1441 BUG_ON(q->info.si_code != SI_TIMER);
1442 q->info.si_overrun++;
1445 /* Short-circuit ignored signals. */
1446 if (sig_ignored(p, sig)) {
1451 list_add_tail(&q->list, &p->pending.list);
1452 sigaddset(&p->pending.signal, sig);
1453 if (!sigismember(&p->blocked, sig))
1454 signal_wake_up(p, sig == SIGKILL);
1457 unlock_task_sighand(p, &flags);
1465 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1467 unsigned long flags;
1470 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1472 read_lock(&tasklist_lock);
1473 /* Since it_lock is held, p->sighand cannot be NULL. */
1474 spin_lock_irqsave(&p->sighand->siglock, flags);
1475 handle_stop_signal(sig, p);
1477 /* Short-circuit ignored signals. */
1478 if (sig_ignored(p, sig)) {
1483 if (unlikely(!list_empty(&q->list))) {
1485 * If an SI_TIMER entry is already queue just increment
1486 * the overrun count. Other uses should not try to
1487 * send the signal multiple times.
1489 BUG_ON(q->info.si_code != SI_TIMER);
1490 q->info.si_overrun++;
1495 * Put this signal on the shared-pending queue.
1496 * We always use the shared queue for process-wide signals,
1497 * to avoid several races.
1499 list_add_tail(&q->list, &p->signal->shared_pending.list);
1500 sigaddset(&p->signal->shared_pending.signal, sig);
1502 __group_complete_signal(sig, p);
1504 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1505 read_unlock(&tasklist_lock);
1510 * Wake up any threads in the parent blocked in wait* syscalls.
1512 static inline void __wake_up_parent(struct task_struct *p,
1513 struct task_struct *parent)
1515 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1519 * Let a parent know about the death of a child.
1520 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1523 void do_notify_parent(struct task_struct *tsk, int sig)
1525 struct siginfo info;
1526 unsigned long flags;
1527 struct sighand_struct *psig;
1531 /* do_notify_parent_cldstop should have been called instead. */
1532 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1534 BUG_ON(tsk->group_leader != tsk || !thread_group_empty(tsk));
1536 info.si_signo = sig;
1538 info.si_pid = tsk->pid;
1539 info.si_uid = tsk->uid;
1541 /* FIXME: find out whether or not this is supposed to be c*time. */
1542 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1543 tsk->signal->utime));
1544 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1545 tsk->signal->stime));
1547 info.si_status = tsk->exit_code & 0x7f;
1548 if (tsk->exit_code & 0x80)
1549 info.si_code = CLD_DUMPED;
1550 else if (tsk->exit_code & 0x7f)
1551 info.si_code = CLD_KILLED;
1553 info.si_code = CLD_EXITED;
1554 info.si_status = tsk->exit_code >> 8;
1557 psig = tsk->parent->sighand;
1558 spin_lock_irqsave(&psig->siglock, flags);
1559 if (sig == SIGCHLD &&
1560 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1561 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1563 * We are exiting and our parent doesn't care. POSIX.1
1564 * defines special semantics for setting SIGCHLD to SIG_IGN
1565 * or setting the SA_NOCLDWAIT flag: we should be reaped
1566 * automatically and not left for our parent's wait4 call.
1567 * Rather than having the parent do it as a magic kind of
1568 * signal handler, we just set this to tell do_exit that we
1569 * can be cleaned up without becoming a zombie. Note that
1570 * we still call __wake_up_parent in this case, because a
1571 * blocked sys_wait4 might now return -ECHILD.
1573 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1574 * is implementation-defined: we do (if you don't want
1575 * it, just use SIG_IGN instead).
1577 tsk->exit_signal = -1;
1578 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1581 if (valid_signal(sig) && sig > 0)
1582 __group_send_sig_info(sig, &info, tsk->parent);
1583 __wake_up_parent(tsk, tsk->parent);
1584 spin_unlock_irqrestore(&psig->siglock, flags);
1587 void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1589 struct siginfo info;
1590 unsigned long flags;
1591 struct task_struct *parent;
1592 struct sighand_struct *sighand;
1594 info.si_signo = SIGCHLD;
1596 info.si_pid = tsk->pid;
1597 info.si_uid = tsk->uid;
1599 /* FIXME: find out whether or not this is supposed to be c*time. */
1600 info.si_utime = cputime_to_jiffies(tsk->utime);
1601 info.si_stime = cputime_to_jiffies(tsk->stime);
1606 info.si_status = SIGCONT;
1609 info.si_status = tsk->signal->group_exit_code & 0x7f;
1612 info.si_status = tsk->exit_code & 0x7f;
1619 * Tracing can decide that we should not do the normal notification.
1621 if (tracehook_notify_cldstop(tsk, &info))
1624 tsk = tsk->group_leader;
1625 parent = tsk->parent;
1627 sighand = parent->sighand;
1628 spin_lock_irqsave(&sighand->siglock, flags);
1629 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1630 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1631 __group_send_sig_info(SIGCHLD, &info, parent);
1633 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1635 __wake_up_parent(tsk, parent);
1636 spin_unlock_irqrestore(&sighand->siglock, flags);
1640 finish_stop(int stop_count)
1643 * If there are no other threads in the group, or if there is
1644 * a group stop in progress and we are the last to stop,
1645 * report to the parent. When ptraced, every thread reports itself.
1647 if (!tracehook_finish_stop(stop_count <= 0) && stop_count <= 0) {
1648 read_lock(&tasklist_lock);
1649 do_notify_parent_cldstop(current, CLD_STOPPED);
1650 read_unlock(&tasklist_lock);
1655 } while (try_to_freeze());
1657 * Now we don't run again until continued.
1659 current->exit_code = 0;
1663 * This performs the stopping for SIGSTOP and other stop signals.
1664 * We have to stop all threads in the thread group.
1665 * Returns nonzero if we've actually stopped and released the siglock.
1666 * Returns zero if we didn't stop and still hold the siglock.
1668 static int do_signal_stop(int signr)
1670 struct signal_struct *sig = current->signal;
1673 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1676 if (sig->group_stop_count > 0) {
1678 * There is a group stop in progress. We don't need to
1679 * start another one.
1681 stop_count = --sig->group_stop_count;
1684 * There is no group stop already in progress.
1685 * We must initiate one now.
1687 struct task_struct *t;
1689 sig->group_exit_code = signr;
1692 for (t = next_thread(current); t != current; t = next_thread(t))
1694 * Setting state to TASK_STOPPED for a group
1695 * stop is always done with the siglock held,
1696 * so this check has no races.
1698 if (!t->exit_state &&
1699 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1701 signal_wake_up(t, 0);
1703 sig->group_stop_count = stop_count;
1706 if (stop_count == 0)
1707 sig->flags = SIGNAL_STOP_STOPPED;
1708 current->exit_code = sig->group_exit_code;
1709 __set_current_state(TASK_STOPPED);
1711 spin_unlock_irq(¤t->sighand->siglock);
1712 finish_stop(stop_count);
1717 * Do appropriate magic when group_stop_count > 0.
1718 * We return nonzero if we stopped, after releasing the siglock.
1719 * We return zero if we still hold the siglock and should look
1720 * for another signal without checking group_stop_count again.
1722 static int handle_group_stop(void)
1726 if (current->signal->group_exit_task == current) {
1728 * Group stop is so we can do a core dump,
1729 * We are the initiating thread, so get on with it.
1731 current->signal->group_exit_task = NULL;
1735 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1737 * Group stop is so another thread can do a core dump,
1738 * or else we are racing against a death signal.
1739 * Just punt the stop so we can get the next signal.
1744 * There is a group stop in progress. We stop
1745 * without any associated signal being in our queue.
1747 stop_count = --current->signal->group_stop_count;
1748 if (stop_count == 0)
1749 current->signal->flags = SIGNAL_STOP_STOPPED;
1750 current->exit_code = current->signal->group_exit_code;
1751 set_current_state(TASK_STOPPED);
1752 spin_unlock_irq(¤t->sighand->siglock);
1753 finish_stop(stop_count);
1757 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1758 struct pt_regs *regs, void *cookie)
1760 sigset_t *mask = ¤t->blocked;
1766 spin_lock_irq(¤t->sighand->siglock);
1768 struct k_sigaction *ka;
1770 if (unlikely(current->signal->group_stop_count > 0) &&
1771 handle_group_stop())
1775 * Tracing can induce an artifical signal and choose sigaction.
1776 * The return value in signr determines the default action,
1777 * but info->si_signo is the signal number we will report.
1779 signr = tracehook_get_signal(current, regs, info, return_ka);
1780 if (unlikely(signr < 0))
1782 if (unlikely(signr != 0))
1785 signr = dequeue_signal(current, mask, info);
1788 break; /* will return 0 */
1789 ka = ¤t->sighand->action[signr-1];
1792 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1794 if (ka->sa.sa_handler != SIG_DFL) {
1795 /* Run the handler. */
1798 if (ka->sa.sa_flags & SA_ONESHOT)
1799 ka->sa.sa_handler = SIG_DFL;
1801 break; /* will return non-zero "signr" value */
1805 * Now we are doing the default action for this signal.
1807 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1811 * Init of a pid space gets no signals it doesn't want from
1812 * within that pid space. It can of course get signals from
1813 * its parent pid space.
1815 if (current == child_reaper(current))
1818 /* virtual init is protected against user signals */
1819 if ((info->si_code == SI_USER) &&
1820 vx_current_initpid(current->pid))
1823 if (sig_kernel_stop(signr)) {
1825 * The default action is to stop all threads in
1826 * the thread group. The job control signals
1827 * do nothing in an orphaned pgrp, but SIGSTOP
1828 * always works. Note that siglock needs to be
1829 * dropped during the call to is_orphaned_pgrp()
1830 * because of lock ordering with tasklist_lock.
1831 * This allows an intervening SIGCONT to be posted.
1832 * We need to check for that and bail out if necessary.
1834 if (signr != SIGSTOP) {
1835 spin_unlock_irq(¤t->sighand->siglock);
1837 /* signals can be posted during this window */
1839 if (is_orphaned_pgrp(process_group(current)))
1842 spin_lock_irq(¤t->sighand->siglock);
1845 if (likely(do_signal_stop(info->si_signo))) {
1846 /* It released the siglock. */
1851 * We didn't actually stop, due to a race
1852 * with SIGCONT or something like that.
1857 spin_unlock_irq(¤t->sighand->siglock);
1860 * Anything else is fatal, maybe with a core dump.
1862 current->flags |= PF_SIGNALED;
1863 if (print_fatal_signals)
1864 print_fatal_signal(regs, signr);
1865 if (sig_kernel_coredump(signr)) {
1867 * If it was able to dump core, this kills all
1868 * other threads in the group and synchronizes with
1869 * their demise. If we lost the race with another
1870 * thread getting here, it set group_exit_code
1871 * first and our do_group_exit call below will use
1872 * that value and ignore the one we pass it.
1874 do_coredump(info->si_signo, info->si_signo, regs);
1878 * Death signals, no core dump.
1880 do_group_exit(info->si_signo);
1883 spin_unlock_irq(¤t->sighand->siglock);
1887 EXPORT_SYMBOL(recalc_sigpending);
1888 EXPORT_SYMBOL_GPL(dequeue_signal);
1889 EXPORT_SYMBOL(flush_signals);
1890 EXPORT_SYMBOL(force_sig);
1891 EXPORT_SYMBOL(kill_pg);
1892 EXPORT_SYMBOL(kill_proc);
1893 EXPORT_SYMBOL(send_sig);
1894 EXPORT_SYMBOL(send_sig_info);
1895 EXPORT_SYMBOL(sigprocmask);
1896 EXPORT_SYMBOL(block_all_signals);
1897 EXPORT_SYMBOL(unblock_all_signals);
1901 * System call entry points.
1904 asmlinkage long sys_restart_syscall(void)
1906 struct restart_block *restart = ¤t_thread_info()->restart_block;
1907 return restart->fn(restart);
1910 long do_no_restart_syscall(struct restart_block *param)
1916 * We don't need to get the kernel lock - this is all local to this
1917 * particular thread.. (and that's good, because this is _heavily_
1918 * used by various programs)
1922 * This is also useful for kernel threads that want to temporarily
1923 * (or permanently) block certain signals.
1925 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1926 * interface happily blocks "unblockable" signals like SIGKILL
1929 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1933 spin_lock_irq(¤t->sighand->siglock);
1935 *oldset = current->blocked;
1940 sigorsets(¤t->blocked, ¤t->blocked, set);
1943 signandsets(¤t->blocked, ¤t->blocked, set);
1946 current->blocked = *set;
1951 recalc_sigpending();
1952 spin_unlock_irq(¤t->sighand->siglock);
1958 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1960 int error = -EINVAL;
1961 sigset_t old_set, new_set;
1963 /* XXX: Don't preclude handling different sized sigset_t's. */
1964 if (sigsetsize != sizeof(sigset_t))
1969 if (copy_from_user(&new_set, set, sizeof(*set)))
1971 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1973 error = sigprocmask(how, &new_set, &old_set);
1979 spin_lock_irq(¤t->sighand->siglock);
1980 old_set = current->blocked;
1981 spin_unlock_irq(¤t->sighand->siglock);
1985 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1993 long do_sigpending(void __user *set, unsigned long sigsetsize)
1995 long error = -EINVAL;
1998 if (sigsetsize > sizeof(sigset_t))
2001 spin_lock_irq(¤t->sighand->siglock);
2002 sigorsets(&pending, ¤t->pending.signal,
2003 ¤t->signal->shared_pending.signal);
2004 spin_unlock_irq(¤t->sighand->siglock);
2006 /* Outside the lock because only this thread touches it. */
2007 sigandsets(&pending, ¤t->blocked, &pending);
2010 if (!copy_to_user(set, &pending, sigsetsize))
2018 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2020 return do_sigpending(set, sigsetsize);
2023 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2025 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2029 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2031 if (from->si_code < 0)
2032 return __copy_to_user(to, from, sizeof(siginfo_t))
2035 * If you change siginfo_t structure, please be sure
2036 * this code is fixed accordingly.
2037 * It should never copy any pad contained in the structure
2038 * to avoid security leaks, but must copy the generic
2039 * 3 ints plus the relevant union member.
2041 err = __put_user(from->si_signo, &to->si_signo);
2042 err |= __put_user(from->si_errno, &to->si_errno);
2043 err |= __put_user((short)from->si_code, &to->si_code);
2044 switch (from->si_code & __SI_MASK) {
2046 err |= __put_user(from->si_pid, &to->si_pid);
2047 err |= __put_user(from->si_uid, &to->si_uid);
2050 err |= __put_user(from->si_tid, &to->si_tid);
2051 err |= __put_user(from->si_overrun, &to->si_overrun);
2052 err |= __put_user(from->si_ptr, &to->si_ptr);
2055 err |= __put_user(from->si_band, &to->si_band);
2056 err |= __put_user(from->si_fd, &to->si_fd);
2059 err |= __put_user(from->si_addr, &to->si_addr);
2060 #ifdef __ARCH_SI_TRAPNO
2061 err |= __put_user(from->si_trapno, &to->si_trapno);
2065 err |= __put_user(from->si_pid, &to->si_pid);
2066 err |= __put_user(from->si_uid, &to->si_uid);
2067 err |= __put_user(from->si_status, &to->si_status);
2068 err |= __put_user(from->si_utime, &to->si_utime);
2069 err |= __put_user(from->si_stime, &to->si_stime);
2071 case __SI_RT: /* This is not generated by the kernel as of now. */
2072 case __SI_MESGQ: /* But this is */
2073 err |= __put_user(from->si_pid, &to->si_pid);
2074 err |= __put_user(from->si_uid, &to->si_uid);
2075 err |= __put_user(from->si_ptr, &to->si_ptr);
2077 default: /* this is just in case for now ... */
2078 err |= __put_user(from->si_pid, &to->si_pid);
2079 err |= __put_user(from->si_uid, &to->si_uid);
2088 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2089 siginfo_t __user *uinfo,
2090 const struct timespec __user *uts,
2099 /* XXX: Don't preclude handling different sized sigset_t's. */
2100 if (sigsetsize != sizeof(sigset_t))
2103 if (copy_from_user(&these, uthese, sizeof(these)))
2107 * Invert the set of allowed signals to get those we
2110 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2114 if (copy_from_user(&ts, uts, sizeof(ts)))
2116 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2121 spin_lock_irq(¤t->sighand->siglock);
2122 sig = dequeue_signal(current, &these, &info);
2124 timeout = MAX_SCHEDULE_TIMEOUT;
2126 timeout = (timespec_to_jiffies(&ts)
2127 + (ts.tv_sec || ts.tv_nsec));
2130 /* None ready -- temporarily unblock those we're
2131 * interested while we are sleeping in so that we'll
2132 * be awakened when they arrive. */
2133 current->real_blocked = current->blocked;
2134 sigandsets(¤t->blocked, ¤t->blocked, &these);
2135 recalc_sigpending();
2136 spin_unlock_irq(¤t->sighand->siglock);
2138 timeout = schedule_timeout_interruptible(timeout);
2140 spin_lock_irq(¤t->sighand->siglock);
2141 sig = dequeue_signal(current, &these, &info);
2142 current->blocked = current->real_blocked;
2143 siginitset(¤t->real_blocked, 0);
2144 recalc_sigpending();
2147 spin_unlock_irq(¤t->sighand->siglock);
2152 if (copy_siginfo_to_user(uinfo, &info))
2165 sys_kill(int pid, int sig)
2167 struct siginfo info;
2169 info.si_signo = sig;
2171 info.si_code = SI_USER;
2172 info.si_pid = current->tgid;
2173 info.si_uid = current->uid;
2175 return kill_something_info(sig, &info, pid);
2178 static int do_tkill(int tgid, int pid, int sig)
2181 struct siginfo info;
2182 struct task_struct *p;
2185 info.si_signo = sig;
2187 info.si_code = SI_TKILL;
2188 info.si_pid = current->tgid;
2189 info.si_uid = current->uid;
2191 read_lock(&tasklist_lock);
2192 p = find_task_by_pid(pid);
2193 if (p && (tgid <= 0 || p->tgid == tgid)) {
2194 error = check_kill_permission(sig, &info, p);
2196 * The null signal is a permissions and process existence
2197 * probe. No signal is actually delivered.
2199 if (!error && sig && p->sighand) {
2200 spin_lock_irq(&p->sighand->siglock);
2201 handle_stop_signal(sig, p);
2202 error = specific_send_sig_info(sig, &info, p);
2203 spin_unlock_irq(&p->sighand->siglock);
2206 read_unlock(&tasklist_lock);
2212 * sys_tgkill - send signal to one specific thread
2213 * @tgid: the thread group ID of the thread
2214 * @pid: the PID of the thread
2215 * @sig: signal to be sent
2217 * This syscall also checks the tgid and returns -ESRCH even if the PID
2218 * exists but it's not belonging to the target process anymore. This
2219 * method solves the problem of threads exiting and PIDs getting reused.
2221 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2223 /* This is only valid for single tasks */
2224 if (pid <= 0 || tgid <= 0)
2227 return do_tkill(tgid, pid, sig);
2231 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2234 sys_tkill(int pid, int sig)
2236 /* This is only valid for single tasks */
2240 return do_tkill(0, pid, sig);
2244 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2248 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2251 /* Not even root can pretend to send signals from the kernel.
2252 Nor can they impersonate a kill(), which adds source info. */
2253 if (info.si_code >= 0)
2255 info.si_signo = sig;
2257 /* POSIX.1b doesn't mention process groups. */
2258 return kill_proc_info(sig, &info, pid);
2261 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2263 struct k_sigaction *k;
2266 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2269 k = ¤t->sighand->action[sig-1];
2271 spin_lock_irq(¤t->sighand->siglock);
2272 if (signal_pending(current)) {
2274 * If there might be a fatal signal pending on multiple
2275 * threads, make sure we take it before changing the action.
2277 spin_unlock_irq(¤t->sighand->siglock);
2278 return -ERESTARTNOINTR;
2285 sigdelsetmask(&act->sa.sa_mask,
2286 sigmask(SIGKILL) | sigmask(SIGSTOP));
2290 * "Setting a signal action to SIG_IGN for a signal that is
2291 * pending shall cause the pending signal to be discarded,
2292 * whether or not it is blocked."
2294 * "Setting a signal action to SIG_DFL for a signal that is
2295 * pending and whose default action is to ignore the signal
2296 * (for example, SIGCHLD), shall cause the pending signal to
2297 * be discarded, whether or not it is blocked"
2299 if (act->sa.sa_handler == SIG_IGN ||
2300 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2301 struct task_struct *t = current;
2303 sigaddset(&mask, sig);
2304 rm_from_queue_full(&mask, &t->signal->shared_pending);
2306 rm_from_queue_full(&mask, &t->pending);
2307 recalc_sigpending_tsk(t);
2309 } while (t != current);
2313 spin_unlock_irq(¤t->sighand->siglock);
2318 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2324 oss.ss_sp = (void __user *) current->sas_ss_sp;
2325 oss.ss_size = current->sas_ss_size;
2326 oss.ss_flags = sas_ss_flags(sp);
2335 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2336 || __get_user(ss_sp, &uss->ss_sp)
2337 || __get_user(ss_flags, &uss->ss_flags)
2338 || __get_user(ss_size, &uss->ss_size))
2342 if (on_sig_stack(sp))
2348 * Note - this code used to test ss_flags incorrectly
2349 * old code may have been written using ss_flags==0
2350 * to mean ss_flags==SS_ONSTACK (as this was the only
2351 * way that worked) - this fix preserves that older
2354 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2357 if (ss_flags == SS_DISABLE) {
2362 if (ss_size < MINSIGSTKSZ)
2366 current->sas_ss_sp = (unsigned long) ss_sp;
2367 current->sas_ss_size = ss_size;
2372 if (copy_to_user(uoss, &oss, sizeof(oss)))
2381 #ifdef __ARCH_WANT_SYS_SIGPENDING
2384 sys_sigpending(old_sigset_t __user *set)
2386 return do_sigpending(set, sizeof(*set));
2391 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2392 /* Some platforms have their own version with special arguments others
2393 support only sys_rt_sigprocmask. */
2396 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2399 old_sigset_t old_set, new_set;
2403 if (copy_from_user(&new_set, set, sizeof(*set)))
2405 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2407 spin_lock_irq(¤t->sighand->siglock);
2408 old_set = current->blocked.sig[0];
2416 sigaddsetmask(¤t->blocked, new_set);
2419 sigdelsetmask(¤t->blocked, new_set);
2422 current->blocked.sig[0] = new_set;
2426 recalc_sigpending();
2427 spin_unlock_irq(¤t->sighand->siglock);
2433 old_set = current->blocked.sig[0];
2436 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2443 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2445 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2447 sys_rt_sigaction(int sig,
2448 const struct sigaction __user *act,
2449 struct sigaction __user *oact,
2452 struct k_sigaction new_sa, old_sa;
2455 /* XXX: Don't preclude handling different sized sigset_t's. */
2456 if (sigsetsize != sizeof(sigset_t))
2460 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2464 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2467 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2473 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2475 #ifdef __ARCH_WANT_SYS_SGETMASK
2478 * For backwards compatibility. Functionality superseded by sigprocmask.
2484 return current->blocked.sig[0];
2488 sys_ssetmask(int newmask)
2492 spin_lock_irq(¤t->sighand->siglock);
2493 old = current->blocked.sig[0];
2495 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2497 recalc_sigpending();
2498 spin_unlock_irq(¤t->sighand->siglock);
2502 #endif /* __ARCH_WANT_SGETMASK */
2504 #ifdef __ARCH_WANT_SYS_SIGNAL
2506 * For backwards compatibility. Functionality superseded by sigaction.
2508 asmlinkage unsigned long
2509 sys_signal(int sig, __sighandler_t handler)
2511 struct k_sigaction new_sa, old_sa;
2514 new_sa.sa.sa_handler = handler;
2515 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2516 sigemptyset(&new_sa.sa.sa_mask);
2518 ret = do_sigaction(sig, &new_sa, &old_sa);
2520 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2522 #endif /* __ARCH_WANT_SYS_SIGNAL */
2524 #ifdef __ARCH_WANT_SYS_PAUSE
2529 current->state = TASK_INTERRUPTIBLE;
2531 return -ERESTARTNOHAND;
2536 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2537 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2541 /* XXX: Don't preclude handling different sized sigset_t's. */
2542 if (sigsetsize != sizeof(sigset_t))
2545 if (copy_from_user(&newset, unewset, sizeof(newset)))
2547 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2549 spin_lock_irq(¤t->sighand->siglock);
2550 current->saved_sigmask = current->blocked;
2551 current->blocked = newset;
2552 recalc_sigpending();
2553 spin_unlock_irq(¤t->sighand->siglock);
2555 current->state = TASK_INTERRUPTIBLE;
2557 set_thread_flag(TIF_RESTORE_SIGMASK);
2558 return -ERESTARTNOHAND;
2560 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2562 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2567 void __init signals_init(void)
2570 kmem_cache_create("sigqueue",
2571 sizeof(struct sigqueue),
2572 __alignof__(struct sigqueue),
2573 SLAB_PANIC, NULL, NULL);