2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <asm/param.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
28 #include <asm/siginfo.h>
30 extern void k_getrusage(struct task_struct *, int, struct rusage *);
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 #define sig_avoid_stop_race() \
157 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
158 sigtestsetmask(¤t->signal->shared_pending.signal, \
159 M(SIGCONT) | M(SIGKILL)))
161 static int sig_ignored(struct task_struct *t, int sig)
163 void __user * handler;
166 * Tracers always want to know about signals..
168 if (t->ptrace & PT_PTRACED)
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
176 if (sigismember(&t->blocked, sig))
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
194 switch (_NSIG_WORDS) {
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
219 if (t->signal->group_stop_count > 0 ||
220 PENDING(&t->pending, &t->blocked) ||
221 PENDING(&t->signal->shared_pending, &t->blocked))
222 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 clear_tsk_thread_flag(t, TIF_SIGPENDING);
227 void recalc_sigpending(void)
229 recalc_sigpending_tsk(current);
232 /* Given the mask, find the first available signal that should be serviced. */
235 next_signal(struct sigpending *pending, sigset_t *mask)
237 unsigned long i, *s, *m, x;
240 s = pending->signal.sig;
242 switch (_NSIG_WORDS) {
244 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
245 if ((x = *s &~ *m) != 0) {
246 sig = ffz(~x) + i*_NSIG_BPW + 1;
251 case 2: if ((x = s[0] &~ m[0]) != 0)
253 else if ((x = s[1] &~ m[1]) != 0)
260 case 1: if ((x = *s &~ *m) != 0)
268 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags)
270 struct sigqueue *q = NULL;
272 if (atomic_read(&t->user->sigpending) <
273 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
274 q = kmem_cache_alloc(sigqueue_cachep, flags);
276 INIT_LIST_HEAD(&q->list);
279 q->user = get_uid(t->user);
280 atomic_inc(&q->user->sigpending);
285 static inline void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
289 atomic_dec(&q->user->sigpending);
291 kmem_cache_free(sigqueue_cachep, q);
294 static void flush_sigqueue(struct sigpending *queue)
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
307 * Flush all pending signals for a task.
311 flush_signals(struct task_struct *t)
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
323 * This function expects the tasklist_lock write-locked.
325 void __exit_sighand(struct task_struct *tsk)
327 struct sighand_struct * sighand = tsk->sighand;
329 /* Ok, we're done with the signal handlers */
331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand);
335 void exit_sighand(struct task_struct *tsk)
337 write_lock_irq(&tasklist_lock);
339 write_unlock_irq(&tasklist_lock);
343 * This function expects the tasklist_lock write-locked.
345 void __exit_signal(struct task_struct *tsk)
347 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand;
352 if (!atomic_read(&sig->count))
354 spin_lock(&sighand->siglock);
355 if (atomic_dec_and_test(&sig->count)) {
356 if (tsk == sig->curr_target)
357 sig->curr_target = next_thread(tsk);
359 spin_unlock(&sighand->siglock);
360 flush_sigqueue(&sig->shared_pending);
363 * If there is any task waiting for the group exit
366 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
367 wake_up_process(sig->group_exit_task);
368 sig->group_exit_task = NULL;
370 if (tsk == sig->curr_target)
371 sig->curr_target = next_thread(tsk);
374 * Accumulate here the counters for all threads but the
375 * group leader as they die, so they can be added into
376 * the process-wide totals when those are taken.
377 * The group leader stays around as a zombie as long
378 * as there are other threads. When it gets reaped,
379 * the exit.c code will add its counts into these totals.
380 * We won't ever get here for the group leader, since it
381 * will have been the last reference on the signal_struct.
383 sig->utime += tsk->utime;
384 sig->stime += tsk->stime;
385 sig->min_flt += tsk->min_flt;
386 sig->maj_flt += tsk->maj_flt;
387 sig->nvcsw += tsk->nvcsw;
388 sig->nivcsw += tsk->nivcsw;
389 spin_unlock(&sighand->siglock);
390 sig = NULL; /* Marker for below. */
392 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
393 flush_sigqueue(&tsk->pending);
396 * We are cleaning up the signal_struct here. We delayed
397 * calling exit_itimers until after flush_sigqueue, just in
398 * case our thread-local pending queue contained a queued
399 * timer signal that would have been cleared in
400 * exit_itimers. When that called sigqueue_free, it would
401 * attempt to re-take the tasklist_lock and deadlock. This
402 * can never happen if we ensure that all queues the
403 * timer's signal might be queued on have been flushed
404 * first. The shared_pending queue, and our own pending
405 * queue are the only queues the timer could be on, since
406 * there are no other threads left in the group and timer
407 * signals are constrained to threads inside the group.
410 kmem_cache_free(signal_cachep, sig);
414 void exit_signal(struct task_struct *tsk)
416 write_lock_irq(&tasklist_lock);
418 write_unlock_irq(&tasklist_lock);
422 * Flush all handlers for a task.
426 flush_signal_handlers(struct task_struct *t, int force_default)
429 struct k_sigaction *ka = &t->sighand->action[0];
430 for (i = _NSIG ; i != 0 ; i--) {
431 if (force_default || ka->sa.sa_handler != SIG_IGN)
432 ka->sa.sa_handler = SIG_DFL;
434 sigemptyset(&ka->sa.sa_mask);
439 EXPORT_SYMBOL_GPL(flush_signal_handlers);
441 /* Notify the system that a driver wants to block all signals for this
442 * process, and wants to be notified if any signals at all were to be
443 * sent/acted upon. If the notifier routine returns non-zero, then the
444 * signal will be acted upon after all. If the notifier routine returns 0,
445 * then then signal will be blocked. Only one block per process is
446 * allowed. priv is a pointer to private data that the notifier routine
447 * can use to determine if the signal should be blocked or not. */
450 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
454 spin_lock_irqsave(¤t->sighand->siglock, flags);
455 current->notifier_mask = mask;
456 current->notifier_data = priv;
457 current->notifier = notifier;
458 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
461 /* Notify the system that blocking has ended. */
464 unblock_all_signals(void)
468 spin_lock_irqsave(¤t->sighand->siglock, flags);
469 current->notifier = NULL;
470 current->notifier_data = NULL;
472 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
475 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
477 struct sigqueue *q, *first = NULL;
478 int still_pending = 0;
480 if (unlikely(!sigismember(&list->signal, sig)))
484 * Collect the siginfo appropriate to this signal. Check if
485 * there is another siginfo for the same signal.
487 list_for_each_entry(q, &list->list, list) {
488 if (q->info.si_signo == sig) {
497 list_del_init(&first->list);
498 copy_siginfo(info, &first->info);
499 __sigqueue_free(first);
501 sigdelset(&list->signal, sig);
504 /* Ok, it wasn't in the queue. This must be
505 a fast-pathed signal or we must have been
506 out of queue space. So zero out the info.
508 sigdelset(&list->signal, sig);
509 info->si_signo = sig;
518 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
523 sig = next_signal(pending, mask);
525 if (current->notifier) {
526 if (sigismember(current->notifier_mask, sig)) {
527 if (!(current->notifier)(current->notifier_data)) {
528 clear_thread_flag(TIF_SIGPENDING);
534 if (!collect_signal(sig, pending, info))
544 * Dequeue a signal and return the element to the caller, which is
545 * expected to free it.
547 * All callers have to hold the siglock.
549 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
551 int signr = __dequeue_signal(&tsk->pending, mask, info);
553 signr = __dequeue_signal(&tsk->signal->shared_pending,
556 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
557 info->si_sys_private){
558 do_schedule_next_timer(info);
564 * Tell a process that it has a new active signal..
566 * NOTE! we rely on the previous spin_lock to
567 * lock interrupts for us! We can only be called with
568 * "siglock" held, and the local interrupt must
569 * have been disabled when that got acquired!
571 * No need to set need_resched since signal event passing
572 * goes through ->blocked
574 void signal_wake_up(struct task_struct *t, int resume)
578 set_tsk_thread_flag(t, TIF_SIGPENDING);
581 * If resume is set, we want to wake it up in the TASK_STOPPED case.
582 * We don't check for TASK_STOPPED because there is a race with it
583 * executing another processor and just now entering stopped state.
584 * By calling wake_up_process any time resume is set, we ensure
585 * the process will wake up and handle its stop or death signal.
587 mask = TASK_INTERRUPTIBLE;
589 mask |= TASK_STOPPED;
590 if (!wake_up_state(t, mask))
595 * Remove signals in mask from the pending set and queue.
596 * Returns 1 if any signals were found.
598 * All callers must be holding the siglock.
600 static int rm_from_queue(unsigned long mask, struct sigpending *s)
602 struct sigqueue *q, *n;
604 if (!sigtestsetmask(&s->signal, mask))
607 sigdelsetmask(&s->signal, mask);
608 list_for_each_entry_safe(q, n, &s->list, list) {
609 if (q->info.si_signo < SIGRTMIN &&
610 (mask & sigmask(q->info.si_signo))) {
611 list_del_init(&q->list);
619 * Bad permissions for sending the signal
621 static int check_kill_permission(int sig, struct siginfo *info,
622 struct task_struct *t)
627 if (sig < 0 || sig > _NSIG)
630 (info != SEND_SIG_PRIV &&
631 info != SEND_SIG_FORCED &&
635 if (user && (sig != SIGCONT ||
636 current->signal->session != t->signal->session)
637 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
638 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
639 && !capable(CAP_KILL))
643 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
646 return security_task_kill(t, info, sig);
650 static void do_notify_parent_cldstop(struct task_struct *tsk,
651 struct task_struct *parent,
655 * Handle magic process-wide effects of stop/continue signals.
656 * Unlike the signal actions, these happen immediately at signal-generation
657 * time regardless of blocking, ignoring, or handling. This does the
658 * actual continuing for SIGCONT, but not the actual stopping for stop
659 * signals. The process stop is done as a signal action for SIG_DFL.
661 static void handle_stop_signal(int sig, struct task_struct *p)
663 struct task_struct *t;
665 if (sig_kernel_stop(sig)) {
667 * This is a stop signal. Remove SIGCONT from all queues.
669 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
672 rm_from_queue(sigmask(SIGCONT), &t->pending);
675 } else if (sig == SIGCONT) {
677 * Remove all stop signals from all queues,
678 * and wake all threads.
680 if (unlikely(p->signal->group_stop_count > 0)) {
682 * There was a group stop in progress. We'll
683 * pretend it finished before we got here. We are
684 * obliged to report it to the parent: if the
685 * SIGSTOP happened "after" this SIGCONT, then it
686 * would have cleared this pending SIGCONT. If it
687 * happened "before" this SIGCONT, then the parent
688 * got the SIGCHLD about the stop finishing before
689 * the continue happened. We do the notification
690 * now, and it's as if the stop had finished and
691 * the SIGCHLD was pending on entry to this kill.
693 p->signal->group_stop_count = 0;
694 p->signal->stop_state = 1;
695 spin_unlock(&p->sighand->siglock);
696 if (p->ptrace & PT_PTRACED)
697 do_notify_parent_cldstop(p, p->parent,
700 do_notify_parent_cldstop(
702 p->group_leader->real_parent,
704 spin_lock(&p->sighand->siglock);
706 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
710 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
713 * If there is a handler for SIGCONT, we must make
714 * sure that no thread returns to user mode before
715 * we post the signal, in case it was the only
716 * thread eligible to run the signal handler--then
717 * it must not do anything between resuming and
718 * running the handler. With the TIF_SIGPENDING
719 * flag set, the thread will pause and acquire the
720 * siglock that we hold now and until we've queued
721 * the pending signal.
723 * Wake up the stopped thread _after_ setting
726 state = TASK_STOPPED;
727 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
728 set_tsk_thread_flag(t, TIF_SIGPENDING);
729 state |= TASK_INTERRUPTIBLE;
731 wake_up_state(t, state);
736 if (p->signal->stop_state > 0) {
738 * We were in fact stopped, and are now continued.
739 * Notify the parent with CLD_CONTINUED.
741 p->signal->stop_state = -1;
742 p->signal->group_exit_code = 0;
743 spin_unlock(&p->sighand->siglock);
744 if (p->ptrace & PT_PTRACED)
745 do_notify_parent_cldstop(p, p->parent,
748 do_notify_parent_cldstop(
750 p->group_leader->real_parent,
752 spin_lock(&p->sighand->siglock);
757 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
758 struct sigpending *signals)
760 struct sigqueue * q = NULL;
764 * fast-pathed signals for kernel-internal things like SIGSTOP
767 if ((unsigned long)info == 2)
770 /* Real-time signals must be queued if sent by sigqueue, or
771 some other real-time mechanism. It is implementation
772 defined whether kill() does so. We attempt to do so, on
773 the principle of least surprise, but since kill is not
774 allowed to fail with EAGAIN when low on memory we just
775 make sure at least one signal gets delivered and don't
776 pass on the info struct. */
778 q = __sigqueue_alloc(t, GFP_ATOMIC);
780 list_add_tail(&q->list, &signals->list);
781 switch ((unsigned long) info) {
783 q->info.si_signo = sig;
784 q->info.si_errno = 0;
785 q->info.si_code = SI_USER;
786 q->info.si_pid = current->pid;
787 q->info.si_uid = current->uid;
790 q->info.si_signo = sig;
791 q->info.si_errno = 0;
792 q->info.si_code = SI_KERNEL;
797 copy_siginfo(&q->info, info);
801 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
802 && info->si_code != SI_USER)
804 * Queue overflow, abort. We may abort if the signal was rt
805 * and sent by user using something other than kill().
808 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
810 * Set up a return to indicate that we dropped
813 ret = info->si_sys_private;
817 sigaddset(&signals->signal, sig);
821 #define LEGACY_QUEUE(sigptr, sig) \
822 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
826 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
830 if (!irqs_disabled())
833 if (!spin_is_locked(&t->sighand->siglock))
837 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
839 * Set up a return to indicate that we dropped the signal.
841 ret = info->si_sys_private;
843 /* Short-circuit ignored signals. */
844 if (sig_ignored(t, sig))
847 /* Support queueing exactly one non-rt signal, so that we
848 can get more detailed information about the cause of
850 if (LEGACY_QUEUE(&t->pending, sig))
853 ret = send_signal(sig, info, t, &t->pending);
854 if (!ret && !sigismember(&t->blocked, sig))
855 signal_wake_up(t, sig == SIGKILL);
861 * Force a signal that the process can't ignore: if necessary
862 * we unblock the signal and change any SIG_IGN to SIG_DFL.
866 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
868 unsigned long int flags;
871 spin_lock_irqsave(&t->sighand->siglock, flags);
872 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
873 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
874 sigdelset(&t->blocked, sig);
875 recalc_sigpending_tsk(t);
877 ret = specific_send_sig_info(sig, info, t);
878 spin_unlock_irqrestore(&t->sighand->siglock, flags);
884 force_sig_specific(int sig, struct task_struct *t)
886 unsigned long int flags;
888 spin_lock_irqsave(&t->sighand->siglock, flags);
889 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
890 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
891 sigdelset(&t->blocked, sig);
892 recalc_sigpending_tsk(t);
893 specific_send_sig_info(sig, (void *)2, t);
894 spin_unlock_irqrestore(&t->sighand->siglock, flags);
898 * Test if P wants to take SIG. After we've checked all threads with this,
899 * it's equivalent to finding no threads not blocking SIG. Any threads not
900 * blocking SIG were ruled out because they are not running and already
901 * have pending signals. Such threads will dequeue from the shared queue
902 * as soon as they're available, so putting the signal on the shared queue
903 * will be equivalent to sending it to one such thread.
905 #define wants_signal(sig, p, mask) \
906 (!sigismember(&(p)->blocked, sig) \
907 && !((p)->state & mask) \
908 && !((p)->flags & PF_EXITING) \
909 && (task_curr(p) || !signal_pending(p)))
913 __group_complete_signal(int sig, struct task_struct *p)
916 struct task_struct *t;
919 * Don't bother zombies and stopped tasks (but
920 * SIGKILL will punch through stopped state)
922 mask = EXIT_DEAD | EXIT_ZOMBIE | TASK_TRACED;
924 mask |= TASK_STOPPED;
927 * Now find a thread we can wake up to take the signal off the queue.
929 * If the main thread wants the signal, it gets first crack.
930 * Probably the least surprising to the average bear.
932 if (wants_signal(sig, p, mask))
934 else if (thread_group_empty(p))
936 * There is just one thread and it does not need to be woken.
937 * It will dequeue unblocked signals before it runs again.
942 * Otherwise try to find a suitable thread.
944 t = p->signal->curr_target;
946 /* restart balancing at this thread */
947 t = p->signal->curr_target = p;
948 BUG_ON(t->tgid != p->tgid);
950 while (!wants_signal(sig, t, mask)) {
952 if (t == p->signal->curr_target)
954 * No thread needs to be woken.
955 * Any eligible threads will see
956 * the signal in the queue soon.
960 p->signal->curr_target = t;
964 * Found a killable thread. If the signal will be fatal,
965 * then start taking the whole group down immediately.
967 if (sig_fatal(p, sig) && !p->signal->group_exit &&
968 !sigismember(&t->real_blocked, sig) &&
969 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
971 * This signal will be fatal to the whole group.
973 if (!sig_kernel_coredump(sig)) {
975 * Start a group exit and wake everybody up.
976 * This way we don't have other threads
977 * running and doing things after a slower
978 * thread has the fatal signal pending.
980 p->signal->group_exit = 1;
981 p->signal->group_exit_code = sig;
982 p->signal->group_stop_count = 0;
985 sigaddset(&t->pending.signal, SIGKILL);
986 signal_wake_up(t, 1);
993 * There will be a core dump. We make all threads other
994 * than the chosen one go into a group stop so that nothing
995 * happens until it gets scheduled, takes the signal off
996 * the shared queue, and does the core dump. This is a
997 * little more complicated than strictly necessary, but it
998 * keeps the signal state that winds up in the core dump
999 * unchanged from the death state, e.g. which thread had
1000 * the core-dump signal unblocked.
1002 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1003 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1004 p->signal->group_stop_count = 0;
1005 p->signal->group_exit_task = t;
1008 p->signal->group_stop_count++;
1009 signal_wake_up(t, 0);
1012 wake_up_process(p->signal->group_exit_task);
1017 * The signal is already in the shared-pending queue.
1018 * Tell the chosen thread to wake up and dequeue it.
1020 signal_wake_up(t, sig == SIGKILL);
1025 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1030 if (!spin_is_locked(&p->sighand->siglock))
1033 handle_stop_signal(sig, p);
1035 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1037 * Set up a return to indicate that we dropped the signal.
1039 ret = info->si_sys_private;
1041 /* Short-circuit ignored signals. */
1042 if (sig_ignored(p, sig))
1045 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1046 /* This is a non-RT signal and we already have one queued. */
1050 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1051 * We always use the shared queue for process-wide signals,
1052 * to avoid several races.
1054 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1058 __group_complete_signal(sig, p);
1063 * Nuke all other threads in the group.
1065 void zap_other_threads(struct task_struct *p)
1067 struct task_struct *t;
1069 p->signal->group_stop_count = 0;
1071 if (thread_group_empty(p))
1074 for (t = next_thread(p); t != p; t = next_thread(t)) {
1076 * Don't bother with already dead threads
1078 if (t->exit_state & (EXIT_ZOMBIE|EXIT_DEAD))
1082 * We don't want to notify the parent, since we are
1083 * killed as part of a thread group due to another
1084 * thread doing an execve() or similar. So set the
1085 * exit signal to -1 to allow immediate reaping of
1086 * the process. But don't detach the thread group
1089 if (t != p->group_leader)
1090 t->exit_signal = -1;
1092 sigaddset(&t->pending.signal, SIGKILL);
1093 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1094 signal_wake_up(t, 1);
1099 * Must be called with the tasklist_lock held for reading!
1101 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1103 unsigned long flags;
1106 ret = check_kill_permission(sig, info, p);
1107 if (!ret && sig && p->sighand) {
1108 spin_lock_irqsave(&p->sighand->siglock, flags);
1109 ret = __group_send_sig_info(sig, info, p);
1110 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1117 * kill_pg_info() sends a signal to a process group: this is what the tty
1118 * control characters do (^C, ^Z etc)
1121 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1123 struct task_struct *p = NULL;
1124 int retval, success;
1131 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1132 int err = group_send_sig_info(sig, info, p);
1135 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1136 return success ? 0 : retval;
1140 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1144 read_lock(&tasklist_lock);
1145 retval = __kill_pg_info(sig, info, pgrp);
1146 read_unlock(&tasklist_lock);
1152 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1155 struct task_struct *p;
1157 read_lock(&tasklist_lock);
1158 p = find_task_by_pid(pid);
1161 error = group_send_sig_info(sig, info, p);
1162 read_unlock(&tasklist_lock);
1168 * kill_something_info() interprets pid in interesting ways just like kill(2).
1170 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1171 * is probably wrong. Should make it like BSD or SYSV.
1174 static int kill_something_info(int sig, struct siginfo *info, int pid)
1177 return kill_pg_info(sig, info, process_group(current));
1178 } else if (pid == -1) {
1179 int retval = 0, count = 0;
1180 struct task_struct * p;
1182 read_lock(&tasklist_lock);
1183 for_each_process(p) {
1184 if (p->pid > 1 && p->tgid != current->tgid) {
1185 int err = group_send_sig_info(sig, info, p);
1191 read_unlock(&tasklist_lock);
1192 return count ? retval : -ESRCH;
1193 } else if (pid < 0) {
1194 return kill_pg_info(sig, info, -pid);
1196 return kill_proc_info(sig, info, pid);
1201 * These are for backward compatibility with the rest of the kernel source.
1205 * These two are the most common entry points. They send a signal
1206 * just to the specific thread.
1209 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1212 unsigned long flags;
1215 * Make sure legacy kernel users don't send in bad values
1216 * (normal paths check this in check_kill_permission).
1218 if (sig < 0 || sig > _NSIG)
1222 * We need the tasklist lock even for the specific
1223 * thread case (when we don't need to follow the group
1224 * lists) in order to avoid races with "p->sighand"
1225 * going away or changing from under us.
1227 read_lock(&tasklist_lock);
1228 spin_lock_irqsave(&p->sighand->siglock, flags);
1229 ret = specific_send_sig_info(sig, info, p);
1230 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1231 read_unlock(&tasklist_lock);
1236 send_sig(int sig, struct task_struct *p, int priv)
1238 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1242 * This is the entry point for "process-wide" signals.
1243 * They will go to an appropriate thread in the thread group.
1246 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1249 read_lock(&tasklist_lock);
1250 ret = group_send_sig_info(sig, info, p);
1251 read_unlock(&tasklist_lock);
1256 force_sig(int sig, struct task_struct *p)
1258 force_sig_info(sig, (void*)1L, p);
1262 * When things go south during signal handling, we
1263 * will force a SIGSEGV. And if the signal that caused
1264 * the problem was already a SIGSEGV, we'll want to
1265 * make sure we don't even try to deliver the signal..
1268 force_sigsegv(int sig, struct task_struct *p)
1270 if (sig == SIGSEGV) {
1271 unsigned long flags;
1272 spin_lock_irqsave(&p->sighand->siglock, flags);
1273 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1274 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1276 force_sig(SIGSEGV, p);
1281 kill_pg(pid_t pgrp, int sig, int priv)
1283 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1287 kill_proc(pid_t pid, int sig, int priv)
1289 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1293 * These functions support sending signals using preallocated sigqueue
1294 * structures. This is needed "because realtime applications cannot
1295 * afford to lose notifications of asynchronous events, like timer
1296 * expirations or I/O completions". In the case of Posix Timers
1297 * we allocate the sigqueue structure from the timer_create. If this
1298 * allocation fails we are able to report the failure to the application
1299 * with an EAGAIN error.
1302 struct sigqueue *sigqueue_alloc(void)
1306 if ((q = __sigqueue_alloc(current, GFP_KERNEL)))
1307 q->flags |= SIGQUEUE_PREALLOC;
1311 void sigqueue_free(struct sigqueue *q)
1313 unsigned long flags;
1314 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1316 * If the signal is still pending remove it from the
1319 if (unlikely(!list_empty(&q->list))) {
1320 read_lock(&tasklist_lock);
1321 spin_lock_irqsave(q->lock, flags);
1322 if (!list_empty(&q->list))
1323 list_del_init(&q->list);
1324 spin_unlock_irqrestore(q->lock, flags);
1325 read_unlock(&tasklist_lock);
1327 q->flags &= ~SIGQUEUE_PREALLOC;
1332 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1334 unsigned long flags;
1338 * We need the tasklist lock even for the specific
1339 * thread case (when we don't need to follow the group
1340 * lists) in order to avoid races with "p->sighand"
1341 * going away or changing from under us.
1343 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1344 read_lock(&tasklist_lock);
1345 spin_lock_irqsave(&p->sighand->siglock, flags);
1347 if (unlikely(!list_empty(&q->list))) {
1349 * If an SI_TIMER entry is already queue just increment
1350 * the overrun count.
1352 if (q->info.si_code != SI_TIMER)
1354 q->info.si_overrun++;
1357 /* Short-circuit ignored signals. */
1358 if (sig_ignored(p, sig)) {
1363 q->lock = &p->sighand->siglock;
1364 list_add_tail(&q->list, &p->pending.list);
1365 sigaddset(&p->pending.signal, sig);
1366 if (!sigismember(&p->blocked, sig))
1367 signal_wake_up(p, sig == SIGKILL);
1370 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1371 read_unlock(&tasklist_lock);
1376 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1378 unsigned long flags;
1381 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1382 read_lock(&tasklist_lock);
1383 spin_lock_irqsave(&p->sighand->siglock, flags);
1384 handle_stop_signal(sig, p);
1386 /* Short-circuit ignored signals. */
1387 if (sig_ignored(p, sig)) {
1392 if (unlikely(!list_empty(&q->list))) {
1394 * If an SI_TIMER entry is already queue just increment
1395 * the overrun count. Other uses should not try to
1396 * send the signal multiple times.
1398 if (q->info.si_code != SI_TIMER)
1400 q->info.si_overrun++;
1405 * Put this signal on the shared-pending queue.
1406 * We always use the shared queue for process-wide signals,
1407 * to avoid several races.
1409 q->lock = &p->sighand->siglock;
1410 list_add_tail(&q->list, &p->signal->shared_pending.list);
1411 sigaddset(&p->signal->shared_pending.signal, sig);
1413 __group_complete_signal(sig, p);
1415 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1416 read_unlock(&tasklist_lock);
1421 * Joy. Or not. Pthread wants us to wake up every thread
1422 * in our parent group.
1424 static void __wake_up_parent(struct task_struct *p,
1425 struct task_struct *parent)
1427 struct task_struct *tsk = parent;
1430 * Fortunately this is not necessary for thread groups:
1432 if (p->tgid == tsk->tgid) {
1433 wake_up_interruptible_sync(&tsk->wait_chldexit);
1438 wake_up_interruptible_sync(&tsk->wait_chldexit);
1439 tsk = next_thread(tsk);
1440 if (tsk->signal != parent->signal)
1442 } while (tsk != parent);
1446 * Let a parent know about the death of a child.
1447 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1450 void do_notify_parent(struct task_struct *tsk, int sig)
1452 struct siginfo info;
1453 unsigned long flags;
1454 struct sighand_struct *psig;
1459 /* do_notify_parent_cldstop should have been called instead. */
1460 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1462 BUG_ON(!tsk->ptrace &&
1463 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1465 info.si_signo = sig;
1467 info.si_pid = tsk->pid;
1468 info.si_uid = tsk->uid;
1470 /* FIXME: find out whether or not this is supposed to be c*time. */
1471 info.si_utime = tsk->utime + tsk->signal->utime;
1472 info.si_stime = tsk->stime + tsk->signal->stime;
1474 info.si_status = tsk->exit_code & 0x7f;
1475 if (tsk->exit_code & 0x80)
1476 info.si_code = CLD_DUMPED;
1477 else if (tsk->exit_code & 0x7f)
1478 info.si_code = CLD_KILLED;
1480 info.si_code = CLD_EXITED;
1481 info.si_status = tsk->exit_code >> 8;
1484 psig = tsk->parent->sighand;
1485 spin_lock_irqsave(&psig->siglock, flags);
1486 if (sig == SIGCHLD &&
1487 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1488 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1490 * We are exiting and our parent doesn't care. POSIX.1
1491 * defines special semantics for setting SIGCHLD to SIG_IGN
1492 * or setting the SA_NOCLDWAIT flag: we should be reaped
1493 * automatically and not left for our parent's wait4 call.
1494 * Rather than having the parent do it as a magic kind of
1495 * signal handler, we just set this to tell do_exit that we
1496 * can be cleaned up without becoming a zombie. Note that
1497 * we still call __wake_up_parent in this case, because a
1498 * blocked sys_wait4 might now return -ECHILD.
1500 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1501 * is implementation-defined: we do (if you don't want
1502 * it, just use SIG_IGN instead).
1504 tsk->exit_signal = -1;
1505 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1508 if (sig > 0 && sig <= _NSIG)
1509 __group_send_sig_info(sig, &info, tsk->parent);
1510 __wake_up_parent(tsk, tsk->parent);
1511 spin_unlock_irqrestore(&psig->siglock, flags);
1515 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1518 struct siginfo info;
1519 unsigned long flags;
1520 struct sighand_struct *sighand;
1522 info.si_signo = SIGCHLD;
1524 info.si_pid = tsk->pid;
1525 info.si_uid = tsk->uid;
1527 /* FIXME: find out whether or not this is supposed to be c*time. */
1528 info.si_utime = tsk->utime;
1529 info.si_stime = tsk->stime;
1534 info.si_status = SIGCONT;
1537 info.si_status = tsk->signal->group_exit_code & 0x7f;
1540 info.si_status = tsk->exit_code & 0x7f;
1546 sighand = parent->sighand;
1547 spin_lock_irqsave(&sighand->siglock, flags);
1548 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1549 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1550 __group_send_sig_info(SIGCHLD, &info, parent);
1552 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1554 __wake_up_parent(tsk, parent);
1555 spin_unlock_irqrestore(&sighand->siglock, flags);
1559 * This must be called with current->sighand->siglock held.
1561 * This should be the path for all ptrace stops.
1562 * We always set current->last_siginfo while stopped here.
1563 * That makes it a way to test a stopped process for
1564 * being ptrace-stopped vs being job-control-stopped.
1566 static void ptrace_stop(int exit_code, siginfo_t *info)
1568 BUG_ON(!(current->ptrace & PT_PTRACED));
1571 * If there is a group stop in progress,
1572 * we must participate in the bookkeeping.
1574 if (current->signal->group_stop_count > 0)
1575 --current->signal->group_stop_count;
1577 current->last_siginfo = info;
1578 current->exit_code = exit_code;
1580 /* Let the debugger run. */
1581 set_current_state(TASK_TRACED);
1582 spin_unlock_irq(¤t->sighand->siglock);
1583 read_lock(&tasklist_lock);
1584 do_notify_parent_cldstop(current, current->parent, CLD_TRAPPED);
1585 read_unlock(&tasklist_lock);
1589 * We are back. Now reacquire the siglock before touching
1590 * last_siginfo, so that we are sure to have synchronized with
1591 * any signal-sending on another CPU that wants to examine it.
1593 spin_lock_irq(¤t->sighand->siglock);
1594 current->last_siginfo = NULL;
1597 * Queued signals ignored us while we were stopped for tracing.
1598 * So check for any that we should take before resuming user mode.
1600 recalc_sigpending();
1603 void ptrace_notify(int exit_code)
1607 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1609 memset(&info, 0, sizeof info);
1610 info.si_signo = SIGTRAP;
1611 info.si_code = exit_code;
1612 info.si_pid = current->pid;
1613 info.si_uid = current->uid;
1615 /* Let the debugger run. */
1616 spin_lock_irq(¤t->sighand->siglock);
1617 ptrace_stop(exit_code, &info);
1618 spin_unlock_irq(¤t->sighand->siglock);
1621 int print_fatal_signals = 0;
1623 static void print_fatal_signal(struct pt_regs *regs, int signr)
1627 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1628 current->comm, current->pid, signr);
1631 printk("code at %08lx: ", regs->eip);
1632 for (i = 0; i < 16; i++) {
1633 __get_user(insn, (unsigned char *)(regs->eip + i));
1634 printk("%02x ", insn);
1641 static int __init setup_print_fatal_signals(char *str)
1643 get_option (&str, &print_fatal_signals);
1648 __setup("print-fatal-signals=", setup_print_fatal_signals);
1650 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1653 finish_stop(int stop_count)
1656 * If there are no other threads in the group, or if there is
1657 * a group stop in progress and we are the last to stop,
1658 * report to the parent. When ptraced, every thread reports itself.
1660 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1661 read_lock(&tasklist_lock);
1662 do_notify_parent_cldstop(current, current->parent,
1664 read_unlock(&tasklist_lock);
1666 else if (stop_count == 0) {
1667 read_lock(&tasklist_lock);
1668 do_notify_parent_cldstop(current->group_leader,
1669 current->group_leader->real_parent,
1671 read_unlock(&tasklist_lock);
1676 * Now we don't run again until continued.
1678 current->exit_code = 0;
1682 * This performs the stopping for SIGSTOP and other stop signals.
1683 * We have to stop all threads in the thread group.
1686 do_signal_stop(int signr)
1688 struct signal_struct *sig = current->signal;
1689 struct sighand_struct *sighand = current->sighand;
1690 int stop_count = -1;
1692 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1694 if (sig->group_stop_count > 0) {
1696 * There is a group stop in progress. We don't need to
1697 * start another one.
1699 signr = sig->group_exit_code;
1700 stop_count = --sig->group_stop_count;
1701 current->exit_code = signr;
1702 set_current_state(TASK_STOPPED);
1703 if (stop_count == 0)
1704 sig->stop_state = 1;
1705 spin_unlock_irq(&sighand->siglock);
1707 else if (thread_group_empty(current)) {
1709 * Lock must be held through transition to stopped state.
1711 current->exit_code = current->signal->group_exit_code = signr;
1712 set_current_state(TASK_STOPPED);
1713 sig->stop_state = 1;
1714 spin_unlock_irq(&sighand->siglock);
1718 * There is no group stop already in progress.
1719 * We must initiate one now, but that requires
1720 * dropping siglock to get both the tasklist lock
1721 * and siglock again in the proper order. Note that
1722 * this allows an intervening SIGCONT to be posted.
1723 * We need to check for that and bail out if necessary.
1725 struct task_struct *t;
1727 spin_unlock_irq(&sighand->siglock);
1729 /* signals can be posted during this window */
1731 read_lock(&tasklist_lock);
1732 spin_lock_irq(&sighand->siglock);
1734 if (unlikely(sig->group_exit)) {
1736 * There is a group exit in progress now.
1737 * We'll just ignore the stop and process the
1738 * associated fatal signal.
1740 spin_unlock_irq(&sighand->siglock);
1741 read_unlock(&tasklist_lock);
1745 if (unlikely(sig_avoid_stop_race())) {
1747 * Either a SIGCONT or a SIGKILL signal was
1748 * posted in the siglock-not-held window.
1750 spin_unlock_irq(&sighand->siglock);
1751 read_unlock(&tasklist_lock);
1755 if (sig->group_stop_count == 0) {
1756 sig->group_exit_code = signr;
1758 for (t = next_thread(current); t != current;
1761 * Setting state to TASK_STOPPED for a group
1762 * stop is always done with the siglock held,
1763 * so this check has no races.
1765 if (t->state < TASK_STOPPED) {
1767 signal_wake_up(t, 0);
1769 sig->group_stop_count = stop_count;
1772 /* A race with another thread while unlocked. */
1773 signr = sig->group_exit_code;
1774 stop_count = --sig->group_stop_count;
1777 current->exit_code = signr;
1778 set_current_state(TASK_STOPPED);
1779 if (stop_count == 0)
1780 sig->stop_state = 1;
1782 spin_unlock_irq(&sighand->siglock);
1783 read_unlock(&tasklist_lock);
1786 finish_stop(stop_count);
1790 * Do appropriate magic when group_stop_count > 0.
1791 * We return nonzero if we stopped, after releasing the siglock.
1792 * We return zero if we still hold the siglock and should look
1793 * for another signal without checking group_stop_count again.
1795 static inline int handle_group_stop(void)
1799 if (current->signal->group_exit_task == current) {
1801 * Group stop is so we can do a core dump,
1802 * We are the initiating thread, so get on with it.
1804 current->signal->group_exit_task = NULL;
1808 if (current->signal->group_exit)
1810 * Group stop is so another thread can do a core dump,
1811 * or else we are racing against a death signal.
1812 * Just punt the stop so we can get the next signal.
1817 * There is a group stop in progress. We stop
1818 * without any associated signal being in our queue.
1820 stop_count = --current->signal->group_stop_count;
1821 if (stop_count == 0)
1822 current->signal->stop_state = 1;
1823 current->exit_code = current->signal->group_exit_code;
1824 set_current_state(TASK_STOPPED);
1825 spin_unlock_irq(¤t->sighand->siglock);
1826 finish_stop(stop_count);
1830 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1831 struct pt_regs *regs, void *cookie)
1833 sigset_t *mask = ¤t->blocked;
1837 spin_lock_irq(¤t->sighand->siglock);
1839 struct k_sigaction *ka;
1841 if (unlikely(current->signal->group_stop_count > 0) &&
1842 handle_group_stop())
1845 signr = dequeue_signal(current, mask, info);
1848 break; /* will return 0 */
1850 if ((signr == SIGSEGV) && print_fatal_signals) {
1851 spin_unlock_irq(¤t->sighand->siglock);
1852 print_fatal_signal(regs, signr);
1853 spin_lock_irq(¤t->sighand->siglock);
1856 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1857 ptrace_signal_deliver(regs, cookie);
1859 /* Let the debugger run. */
1860 ptrace_stop(signr, info);
1862 /* We're back. Did the debugger cancel the sig? */
1863 signr = current->exit_code;
1867 current->exit_code = 0;
1869 /* Update the siginfo structure if the signal has
1870 changed. If the debugger wanted something
1871 specific in the siginfo structure then it should
1872 have updated *info via PTRACE_SETSIGINFO. */
1873 if (signr != info->si_signo) {
1874 info->si_signo = signr;
1876 info->si_code = SI_USER;
1877 info->si_pid = current->parent->pid;
1878 info->si_uid = current->parent->uid;
1881 /* If the (new) signal is now blocked, requeue it. */
1882 if (sigismember(¤t->blocked, signr)) {
1883 specific_send_sig_info(signr, info, current);
1888 ka = ¤t->sighand->action[signr-1];
1889 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1891 if (ka->sa.sa_handler != SIG_DFL) {
1892 /* Run the handler. */
1895 if (ka->sa.sa_flags & SA_ONESHOT)
1896 ka->sa.sa_handler = SIG_DFL;
1898 break; /* will return non-zero "signr" value */
1902 * Now we are doing the default action for this signal.
1904 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1907 /* Init gets no signals it doesn't want. */
1908 if (current->pid == 1)
1911 if (sig_kernel_stop(signr)) {
1913 * The default action is to stop all threads in
1914 * the thread group. The job control signals
1915 * do nothing in an orphaned pgrp, but SIGSTOP
1916 * always works. Note that siglock needs to be
1917 * dropped during the call to is_orphaned_pgrp()
1918 * because of lock ordering with tasklist_lock.
1919 * This allows an intervening SIGCONT to be posted.
1920 * We need to check for that and bail out if necessary.
1922 if (signr == SIGSTOP) {
1923 do_signal_stop(signr); /* releases siglock */
1926 spin_unlock_irq(¤t->sighand->siglock);
1928 /* signals can be posted during this window */
1930 if (is_orphaned_pgrp(process_group(current)))
1933 spin_lock_irq(¤t->sighand->siglock);
1934 if (unlikely(sig_avoid_stop_race())) {
1936 * Either a SIGCONT or a SIGKILL signal was
1937 * posted in the siglock-not-held window.
1942 do_signal_stop(signr); /* releases siglock */
1946 spin_unlock_irq(¤t->sighand->siglock);
1949 * Anything else is fatal, maybe with a core dump.
1951 current->flags |= PF_SIGNALED;
1952 if (print_fatal_signals)
1953 print_fatal_signal(regs, signr);
1954 if (sig_kernel_coredump(signr)) {
1956 * If it was able to dump core, this kills all
1957 * other threads in the group and synchronizes with
1958 * their demise. If we lost the race with another
1959 * thread getting here, it set group_exit_code
1960 * first and our do_group_exit call below will use
1961 * that value and ignore the one we pass it.
1963 do_coredump((long)signr, signr, regs);
1967 * Death signals, no core dump.
1969 do_group_exit(signr);
1972 spin_unlock_irq(¤t->sighand->siglock);
1978 EXPORT_SYMBOL(recalc_sigpending);
1979 EXPORT_SYMBOL_GPL(dequeue_signal);
1980 EXPORT_SYMBOL(flush_signals);
1981 EXPORT_SYMBOL(force_sig);
1982 EXPORT_SYMBOL(kill_pg);
1983 EXPORT_SYMBOL(kill_proc);
1984 EXPORT_SYMBOL(ptrace_notify);
1985 EXPORT_SYMBOL(send_sig);
1986 EXPORT_SYMBOL(send_sig_info);
1987 EXPORT_SYMBOL(sigprocmask);
1988 EXPORT_SYMBOL(block_all_signals);
1989 EXPORT_SYMBOL(unblock_all_signals);
1992 * System call entry points.
1995 asmlinkage long sys_restart_syscall(void)
1997 struct restart_block *restart = ¤t_thread_info()->restart_block;
1998 return restart->fn(restart);
2001 long do_no_restart_syscall(struct restart_block *param)
2007 * We don't need to get the kernel lock - this is all local to this
2008 * particular thread.. (and that's good, because this is _heavily_
2009 * used by various programs)
2013 * This is also useful for kernel threads that want to temporarily
2014 * (or permanently) block certain signals.
2016 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2017 * interface happily blocks "unblockable" signals like SIGKILL
2020 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2025 spin_lock_irq(¤t->sighand->siglock);
2026 old_block = current->blocked;
2030 sigorsets(¤t->blocked, ¤t->blocked, set);
2033 signandsets(¤t->blocked, ¤t->blocked, set);
2036 current->blocked = *set;
2041 recalc_sigpending();
2042 spin_unlock_irq(¤t->sighand->siglock);
2044 *oldset = old_block;
2049 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2051 int error = -EINVAL;
2052 sigset_t old_set, new_set;
2054 /* XXX: Don't preclude handling different sized sigset_t's. */
2055 if (sigsetsize != sizeof(sigset_t))
2060 if (copy_from_user(&new_set, set, sizeof(*set)))
2062 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2064 error = sigprocmask(how, &new_set, &old_set);
2070 spin_lock_irq(¤t->sighand->siglock);
2071 old_set = current->blocked;
2072 spin_unlock_irq(¤t->sighand->siglock);
2076 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2084 long do_sigpending(void __user *set, unsigned long sigsetsize)
2086 long error = -EINVAL;
2089 if (sigsetsize > sizeof(sigset_t))
2092 spin_lock_irq(¤t->sighand->siglock);
2093 sigorsets(&pending, ¤t->pending.signal,
2094 ¤t->signal->shared_pending.signal);
2095 spin_unlock_irq(¤t->sighand->siglock);
2097 /* Outside the lock because only this thread touches it. */
2098 sigandsets(&pending, ¤t->blocked, &pending);
2101 if (!copy_to_user(set, &pending, sigsetsize))
2109 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2111 return do_sigpending(set, sigsetsize);
2114 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2116 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2120 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2122 if (from->si_code < 0)
2123 return __copy_to_user(to, from, sizeof(siginfo_t))
2126 * If you change siginfo_t structure, please be sure
2127 * this code is fixed accordingly.
2128 * It should never copy any pad contained in the structure
2129 * to avoid security leaks, but must copy the generic
2130 * 3 ints plus the relevant union member.
2132 err = __put_user(from->si_signo, &to->si_signo);
2133 err |= __put_user(from->si_errno, &to->si_errno);
2134 err |= __put_user((short)from->si_code, &to->si_code);
2135 switch (from->si_code & __SI_MASK) {
2137 err |= __put_user(from->si_pid, &to->si_pid);
2138 err |= __put_user(from->si_uid, &to->si_uid);
2141 err |= __put_user(from->si_tid, &to->si_tid);
2142 err |= __put_user(from->si_overrun, &to->si_overrun);
2143 err |= __put_user(from->si_ptr, &to->si_ptr);
2146 err |= __put_user(from->si_band, &to->si_band);
2147 err |= __put_user(from->si_fd, &to->si_fd);
2150 err |= __put_user(from->si_addr, &to->si_addr);
2151 #ifdef __ARCH_SI_TRAPNO
2152 err |= __put_user(from->si_trapno, &to->si_trapno);
2156 err |= __put_user(from->si_pid, &to->si_pid);
2157 err |= __put_user(from->si_uid, &to->si_uid);
2158 err |= __put_user(from->si_status, &to->si_status);
2159 err |= __put_user(from->si_utime, &to->si_utime);
2160 err |= __put_user(from->si_stime, &to->si_stime);
2162 case __SI_RT: /* This is not generated by the kernel as of now. */
2163 case __SI_MESGQ: /* But this is */
2164 err |= __put_user(from->si_pid, &to->si_pid);
2165 err |= __put_user(from->si_uid, &to->si_uid);
2166 err |= __put_user(from->si_ptr, &to->si_ptr);
2168 default: /* this is just in case for now ... */
2169 err |= __put_user(from->si_pid, &to->si_pid);
2170 err |= __put_user(from->si_uid, &to->si_uid);
2179 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2180 siginfo_t __user *uinfo,
2181 const struct timespec __user *uts,
2190 /* XXX: Don't preclude handling different sized sigset_t's. */
2191 if (sigsetsize != sizeof(sigset_t))
2194 if (copy_from_user(&these, uthese, sizeof(these)))
2198 * Invert the set of allowed signals to get those we
2201 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2205 if (copy_from_user(&ts, uts, sizeof(ts)))
2207 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2212 spin_lock_irq(¤t->sighand->siglock);
2213 sig = dequeue_signal(current, &these, &info);
2215 timeout = MAX_SCHEDULE_TIMEOUT;
2217 timeout = (timespec_to_jiffies(&ts)
2218 + (ts.tv_sec || ts.tv_nsec));
2221 /* None ready -- temporarily unblock those we're
2222 * interested while we are sleeping in so that we'll
2223 * be awakened when they arrive. */
2224 current->real_blocked = current->blocked;
2225 sigandsets(¤t->blocked, ¤t->blocked, &these);
2226 recalc_sigpending();
2227 spin_unlock_irq(¤t->sighand->siglock);
2229 current->state = TASK_INTERRUPTIBLE;
2230 timeout = schedule_timeout(timeout);
2232 spin_lock_irq(¤t->sighand->siglock);
2233 sig = dequeue_signal(current, &these, &info);
2234 current->blocked = current->real_blocked;
2235 siginitset(¤t->real_blocked, 0);
2236 recalc_sigpending();
2239 spin_unlock_irq(¤t->sighand->siglock);
2244 if (copy_siginfo_to_user(uinfo, &info))
2257 sys_kill(int pid, int sig)
2259 struct siginfo info;
2261 info.si_signo = sig;
2263 info.si_code = SI_USER;
2264 info.si_pid = current->tgid;
2265 info.si_uid = current->uid;
2267 return kill_something_info(sig, &info, pid);
2271 * sys_tgkill - send signal to one specific thread
2272 * @tgid: the thread group ID of the thread
2273 * @pid: the PID of the thread
2274 * @sig: signal to be sent
2276 * This syscall also checks the tgid and returns -ESRCH even if the PID
2277 * exists but it's not belonging to the target process anymore. This
2278 * method solves the problem of threads exiting and PIDs getting reused.
2280 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2282 struct siginfo info;
2284 struct task_struct *p;
2286 /* This is only valid for single tasks */
2287 if (pid <= 0 || tgid <= 0)
2290 info.si_signo = sig;
2292 info.si_code = SI_TKILL;
2293 info.si_pid = current->tgid;
2294 info.si_uid = current->uid;
2296 read_lock(&tasklist_lock);
2297 p = find_task_by_pid(pid);
2299 if (p && (p->tgid == tgid)) {
2300 error = check_kill_permission(sig, &info, p);
2302 * The null signal is a permissions and process existence
2303 * probe. No signal is actually delivered.
2305 if (!error && sig && p->sighand) {
2306 spin_lock_irq(&p->sighand->siglock);
2307 handle_stop_signal(sig, p);
2308 error = specific_send_sig_info(sig, &info, p);
2309 spin_unlock_irq(&p->sighand->siglock);
2312 read_unlock(&tasklist_lock);
2317 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2320 sys_tkill(int pid, int sig)
2322 struct siginfo info;
2324 struct task_struct *p;
2326 /* This is only valid for single tasks */
2330 info.si_signo = sig;
2332 info.si_code = SI_TKILL;
2333 info.si_pid = current->tgid;
2334 info.si_uid = current->uid;
2336 read_lock(&tasklist_lock);
2337 p = find_task_by_pid(pid);
2340 error = check_kill_permission(sig, &info, p);
2342 * The null signal is a permissions and process existence
2343 * probe. No signal is actually delivered.
2345 if (!error && sig && p->sighand) {
2346 spin_lock_irq(&p->sighand->siglock);
2347 handle_stop_signal(sig, p);
2348 error = specific_send_sig_info(sig, &info, p);
2349 spin_unlock_irq(&p->sighand->siglock);
2352 read_unlock(&tasklist_lock);
2357 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2361 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2364 /* Not even root can pretend to send signals from the kernel.
2365 Nor can they impersonate a kill(), which adds source info. */
2366 if (info.si_code >= 0)
2368 info.si_signo = sig;
2370 /* POSIX.1b doesn't mention process groups. */
2371 return kill_proc_info(sig, &info, pid);
2375 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2377 struct k_sigaction *k;
2379 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2382 k = ¤t->sighand->action[sig-1];
2384 spin_lock_irq(¤t->sighand->siglock);
2385 if (signal_pending(current)) {
2387 * If there might be a fatal signal pending on multiple
2388 * threads, make sure we take it before changing the action.
2390 spin_unlock_irq(¤t->sighand->siglock);
2391 return -ERESTARTNOINTR;
2400 * "Setting a signal action to SIG_IGN for a signal that is
2401 * pending shall cause the pending signal to be discarded,
2402 * whether or not it is blocked."
2404 * "Setting a signal action to SIG_DFL for a signal that is
2405 * pending and whose default action is to ignore the signal
2406 * (for example, SIGCHLD), shall cause the pending signal to
2407 * be discarded, whether or not it is blocked"
2409 if (act->sa.sa_handler == SIG_IGN ||
2410 (act->sa.sa_handler == SIG_DFL &&
2411 sig_kernel_ignore(sig))) {
2413 * This is a fairly rare case, so we only take the
2414 * tasklist_lock once we're sure we'll need it.
2415 * Now we must do this little unlock and relock
2416 * dance to maintain the lock hierarchy.
2418 struct task_struct *t = current;
2419 spin_unlock_irq(&t->sighand->siglock);
2420 read_lock(&tasklist_lock);
2421 spin_lock_irq(&t->sighand->siglock);
2423 sigdelsetmask(&k->sa.sa_mask,
2424 sigmask(SIGKILL) | sigmask(SIGSTOP));
2425 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2427 rm_from_queue(sigmask(sig), &t->pending);
2428 recalc_sigpending_tsk(t);
2430 } while (t != current);
2431 spin_unlock_irq(¤t->sighand->siglock);
2432 read_unlock(&tasklist_lock);
2437 sigdelsetmask(&k->sa.sa_mask,
2438 sigmask(SIGKILL) | sigmask(SIGSTOP));
2441 spin_unlock_irq(¤t->sighand->siglock);
2446 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2452 oss.ss_sp = (void __user *) current->sas_ss_sp;
2453 oss.ss_size = current->sas_ss_size;
2454 oss.ss_flags = sas_ss_flags(sp);
2463 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2464 || __get_user(ss_sp, &uss->ss_sp)
2465 || __get_user(ss_flags, &uss->ss_flags)
2466 || __get_user(ss_size, &uss->ss_size))
2470 if (on_sig_stack(sp))
2476 * Note - this code used to test ss_flags incorrectly
2477 * old code may have been written using ss_flags==0
2478 * to mean ss_flags==SS_ONSTACK (as this was the only
2479 * way that worked) - this fix preserves that older
2482 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2485 if (ss_flags == SS_DISABLE) {
2490 if (ss_size < MINSIGSTKSZ)
2494 current->sas_ss_sp = (unsigned long) ss_sp;
2495 current->sas_ss_size = ss_size;
2500 if (copy_to_user(uoss, &oss, sizeof(oss)))
2509 #ifdef __ARCH_WANT_SYS_SIGPENDING
2512 sys_sigpending(old_sigset_t __user *set)
2514 return do_sigpending(set, sizeof(*set));
2519 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2520 /* Some platforms have their own version with special arguments others
2521 support only sys_rt_sigprocmask. */
2524 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2527 old_sigset_t old_set, new_set;
2531 if (copy_from_user(&new_set, set, sizeof(*set)))
2533 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 spin_lock_irq(¤t->sighand->siglock);
2536 old_set = current->blocked.sig[0];
2544 sigaddsetmask(¤t->blocked, new_set);
2547 sigdelsetmask(¤t->blocked, new_set);
2550 current->blocked.sig[0] = new_set;
2554 recalc_sigpending();
2555 spin_unlock_irq(¤t->sighand->siglock);
2561 old_set = current->blocked.sig[0];
2564 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2571 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2573 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2575 sys_rt_sigaction(int sig,
2576 const struct sigaction __user *act,
2577 struct sigaction __user *oact,
2580 struct k_sigaction new_sa, old_sa;
2583 /* XXX: Don't preclude handling different sized sigset_t's. */
2584 if (sigsetsize != sizeof(sigset_t))
2588 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2592 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2595 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2601 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2603 #ifdef __ARCH_WANT_SYS_SGETMASK
2606 * For backwards compatibility. Functionality superseded by sigprocmask.
2612 return current->blocked.sig[0];
2616 sys_ssetmask(int newmask)
2620 spin_lock_irq(¤t->sighand->siglock);
2621 old = current->blocked.sig[0];
2623 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2625 recalc_sigpending();
2626 spin_unlock_irq(¤t->sighand->siglock);
2630 #endif /* __ARCH_WANT_SGETMASK */
2632 #ifdef __ARCH_WANT_SYS_SIGNAL
2634 * For backwards compatibility. Functionality superseded by sigaction.
2636 asmlinkage unsigned long
2637 sys_signal(int sig, __sighandler_t handler)
2639 struct k_sigaction new_sa, old_sa;
2642 new_sa.sa.sa_handler = handler;
2643 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2645 ret = do_sigaction(sig, &new_sa, &old_sa);
2647 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2649 #endif /* __ARCH_WANT_SYS_SIGNAL */
2651 #ifdef __ARCH_WANT_SYS_PAUSE
2656 current->state = TASK_INTERRUPTIBLE;
2658 return -ERESTARTNOHAND;
2663 void __init signals_init(void)
2666 kmem_cache_create("sigqueue",
2667 sizeof(struct sigqueue),
2668 __alignof__(struct sigqueue),
2669 SLAB_PANIC, NULL, NULL);