2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
36 * In POSIX a signal is sent either to a specific thread (Linux task)
37 * or to the process as a whole (Linux thread group). How the signal
38 * is sent determines whether it's to one thread or the whole group,
39 * which determines which signal mask(s) are involved in blocking it
40 * from being delivered until later. When the signal is delivered,
41 * either it's caught or ignored by a user handler or it has a default
42 * effect that applies to the whole thread group (POSIX process).
44 * The possible effects an unblocked signal set to SIG_DFL can have are:
45 * ignore - Nothing Happens
46 * terminate - kill the process, i.e. all threads in the group,
47 * similar to exit_group. The group leader (only) reports
48 * WIFSIGNALED status to its parent.
49 * coredump - write a core dump file describing all threads using
50 * the same mm and then kill all those threads
51 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
53 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
54 * Other signals when not blocked and set to SIG_DFL behaves as follows.
55 * The job control signals also have other special effects.
57 * +--------------------+------------------+
58 * | POSIX signal | default action |
59 * +--------------------+------------------+
60 * | SIGHUP | terminate |
61 * | SIGINT | terminate |
62 * | SIGQUIT | coredump |
63 * | SIGILL | coredump |
64 * | SIGTRAP | coredump |
65 * | SIGABRT/SIGIOT | coredump |
66 * | SIGBUS | coredump |
67 * | SIGFPE | coredump |
68 * | SIGKILL | terminate(+) |
69 * | SIGUSR1 | terminate |
70 * | SIGSEGV | coredump |
71 * | SIGUSR2 | terminate |
72 * | SIGPIPE | terminate |
73 * | SIGALRM | terminate |
74 * | SIGTERM | terminate |
75 * | SIGCHLD | ignore |
76 * | SIGCONT | ignore(*) |
77 * | SIGSTOP | stop(*)(+) |
78 * | SIGTSTP | stop(*) |
79 * | SIGTTIN | stop(*) |
80 * | SIGTTOU | stop(*) |
82 * | SIGXCPU | coredump |
83 * | SIGXFSZ | coredump |
84 * | SIGVTALRM | terminate |
85 * | SIGPROF | terminate |
86 * | SIGPOLL/SIGIO | terminate |
87 * | SIGSYS/SIGUNUSED | coredump |
88 * | SIGSTKFLT | terminate |
89 * | SIGWINCH | ignore |
90 * | SIGPWR | terminate |
91 * | SIGRTMIN-SIGRTMAX | terminate |
92 * +--------------------+------------------+
93 * | non-POSIX signal | default action |
94 * +--------------------+------------------+
95 * | SIGEMT | coredump |
96 * +--------------------+------------------+
98 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
99 * (*) Special job control effects:
100 * When SIGCONT is sent, it resumes the process (all threads in the group)
101 * from TASK_STOPPED state and also clears any pending/queued stop signals
102 * (any of those marked with "stop(*)"). This happens regardless of blocking,
103 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
104 * any pending/queued SIGCONT signals; this happens regardless of blocking,
105 * catching, or ignored the stop signal, though (except for SIGSTOP) the
106 * default action of stopping the process may happen later or never.
110 #define M_SIGEMT M(SIGEMT)
115 #if SIGRTMIN > BITS_PER_LONG
116 #define M(sig) (1ULL << ((sig)-1))
118 #define M(sig) (1UL << ((sig)-1))
120 #define T(sig, mask) (M(sig) & (mask))
122 #define SIG_KERNEL_ONLY_MASK (\
123 M(SIGKILL) | M(SIGSTOP) )
125 #define SIG_KERNEL_STOP_MASK (\
126 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
128 #define SIG_KERNEL_COREDUMP_MASK (\
129 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
130 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
131 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
133 #define SIG_KERNEL_IGNORE_MASK (\
134 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
136 #define sig_kernel_only(sig) \
137 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
138 #define sig_kernel_coredump(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
140 #define sig_kernel_ignore(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
142 #define sig_kernel_stop(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
145 #define sig_user_defined(t, signr) \
146 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
147 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
149 #define sig_fatal(t, signr) \
150 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
151 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
153 #define sig_avoid_stop_race() \
154 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
155 sigtestsetmask(¤t->signal->shared_pending.signal, \
156 M(SIGCONT) | M(SIGKILL)))
158 static int sig_ignored(struct task_struct *t, int sig)
160 void __user * handler;
163 * Tracers always want to know about signals..
165 if (t->ptrace & PT_PTRACED)
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 return handler == SIG_IGN ||
179 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 * Re-calculate pending state from the set of locally pending
184 * signals, globally pending signals, and blocked signals.
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
191 switch (_NSIG_WORDS) {
193 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 ready |= signal->sig[i] &~ blocked->sig[i];
197 case 4: ready = signal->sig[3] &~ blocked->sig[3];
198 ready |= signal->sig[2] &~ blocked->sig[2];
199 ready |= signal->sig[1] &~ blocked->sig[1];
200 ready |= signal->sig[0] &~ blocked->sig[0];
203 case 2: ready = signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 1: ready = signal->sig[0] &~ blocked->sig[0];
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(void)
267 struct sigqueue *q = NULL;
269 if (atomic_read(¤t->user->sigpending) <
270 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
271 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
273 INIT_LIST_HEAD(&q->list);
276 #warning MEF PLANETLAB: q->user = get_uid(current->user); is something new in Fedora Core.
277 q->user = get_uid(current->user);
278 atomic_inc(&q->user->sigpending);
283 static inline void __sigqueue_free(struct sigqueue *q)
285 if (q->flags & SIGQUEUE_PREALLOC)
287 atomic_dec(&q->user->sigpending);
289 kmem_cache_free(sigqueue_cachep, q);
292 static void flush_sigqueue(struct sigpending *queue)
296 sigemptyset(&queue->signal);
297 while (!list_empty(&queue->list)) {
298 q = list_entry(queue->list.next, struct sigqueue , list);
299 list_del_init(&q->list);
305 * Flush all pending signals for a task.
309 flush_signals(struct task_struct *t)
313 spin_lock_irqsave(&t->sighand->siglock, flags);
314 clear_tsk_thread_flag(t,TIF_SIGPENDING);
315 flush_sigqueue(&t->pending);
316 flush_sigqueue(&t->signal->shared_pending);
317 spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 * This function expects the tasklist_lock write-locked.
323 void __exit_sighand(struct task_struct *tsk)
325 struct sighand_struct * sighand = tsk->sighand;
327 /* Ok, we're done with the signal handlers */
329 if (atomic_dec_and_test(&sighand->count))
330 kmem_cache_free(sighand_cachep, sighand);
333 void exit_sighand(struct task_struct *tsk)
335 write_lock_irq(&tasklist_lock);
337 write_unlock_irq(&tasklist_lock);
341 * This function expects the tasklist_lock write-locked.
343 void __exit_signal(struct task_struct *tsk)
345 struct signal_struct * sig = tsk->signal;
346 struct sighand_struct * sighand = tsk->sighand;
350 if (!atomic_read(&sig->count))
352 spin_lock(&sighand->siglock);
353 if (atomic_dec_and_test(&sig->count)) {
354 if (tsk == sig->curr_target)
355 sig->curr_target = next_thread(tsk);
357 spin_unlock(&sighand->siglock);
358 flush_sigqueue(&sig->shared_pending);
361 * If there is any task waiting for the group exit
364 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
365 wake_up_process(sig->group_exit_task);
366 sig->group_exit_task = NULL;
368 if (tsk == sig->curr_target)
369 sig->curr_target = next_thread(tsk);
371 spin_unlock(&sighand->siglock);
372 sig = NULL; /* Marker for below. */
374 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
375 flush_sigqueue(&tsk->pending);
378 * We are cleaning up the signal_struct here. We delayed
379 * calling exit_itimers until after flush_sigqueue, just in
380 * case our thread-local pending queue contained a queued
381 * timer signal that would have been cleared in
382 * exit_itimers. When that called sigqueue_free, it would
383 * attempt to re-take the tasklist_lock and deadlock. This
384 * can never happen if we ensure that all queues the
385 * timer's signal might be queued on have been flushed
386 * first. The shared_pending queue, and our own pending
387 * queue are the only queues the timer could be on, since
388 * there are no other threads left in the group and timer
389 * signals are constrained to threads inside the group.
392 kmem_cache_free(signal_cachep, sig);
396 void exit_signal(struct task_struct *tsk)
398 write_lock_irq(&tasklist_lock);
400 write_unlock_irq(&tasklist_lock);
404 * Flush all handlers for a task.
408 flush_signal_handlers(struct task_struct *t, int force_default)
411 struct k_sigaction *ka = &t->sighand->action[0];
412 for (i = _NSIG ; i != 0 ; i--) {
413 if (force_default || ka->sa.sa_handler != SIG_IGN)
414 ka->sa.sa_handler = SIG_DFL;
416 sigemptyset(&ka->sa.sa_mask);
422 /* Notify the system that a driver wants to block all signals for this
423 * process, and wants to be notified if any signals at all were to be
424 * sent/acted upon. If the notifier routine returns non-zero, then the
425 * signal will be acted upon after all. If the notifier routine returns 0,
426 * then then signal will be blocked. Only one block per process is
427 * allowed. priv is a pointer to private data that the notifier routine
428 * can use to determine if the signal should be blocked or not. */
431 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
435 spin_lock_irqsave(¤t->sighand->siglock, flags);
436 current->notifier_mask = mask;
437 current->notifier_data = priv;
438 current->notifier = notifier;
439 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
442 /* Notify the system that blocking has ended. */
445 unblock_all_signals(void)
449 spin_lock_irqsave(¤t->sighand->siglock, flags);
450 current->notifier = NULL;
451 current->notifier_data = NULL;
453 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
456 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
458 struct sigqueue *q, *first = NULL;
459 int still_pending = 0;
461 if (unlikely(!sigismember(&list->signal, sig)))
465 * Collect the siginfo appropriate to this signal. Check if
466 * there is another siginfo for the same signal.
468 list_for_each_entry(q, &list->list, list) {
469 if (q->info.si_signo == sig) {
478 list_del_init(&first->list);
479 copy_siginfo(info, &first->info);
480 __sigqueue_free(first);
482 sigdelset(&list->signal, sig);
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
489 sigdelset(&list->signal, sig);
490 info->si_signo = sig;
499 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
504 sig = next_signal(pending, mask);
506 if (current->notifier) {
507 if (sigismember(current->notifier_mask, sig)) {
508 if (!(current->notifier)(current->notifier_data)) {
509 clear_thread_flag(TIF_SIGPENDING);
515 if (!collect_signal(sig, pending, info))
525 * Dequeue a signal and return the element to the caller, which is
526 * expected to free it.
528 * All callers have to hold the siglock.
530 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
532 int signr = __dequeue_signal(&tsk->pending, mask, info);
534 signr = __dequeue_signal(&tsk->signal->shared_pending,
537 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
538 info->si_sys_private){
539 do_schedule_next_timer(info);
545 * Tell a process that it has a new active signal..
547 * NOTE! we rely on the previous spin_lock to
548 * lock interrupts for us! We can only be called with
549 * "siglock" held, and the local interrupt must
550 * have been disabled when that got acquired!
552 * No need to set need_resched since signal event passing
553 * goes through ->blocked
555 void signal_wake_up(struct task_struct *t, int resume)
559 set_tsk_thread_flag(t, TIF_SIGPENDING);
562 * If resume is set, we want to wake it up in the TASK_STOPPED case.
563 * We don't check for TASK_STOPPED because there is a race with it
564 * executing another processor and just now entering stopped state.
565 * By calling wake_up_process any time resume is set, we ensure
566 * the process will wake up and handle its stop or death signal.
568 mask = TASK_INTERRUPTIBLE;
570 mask |= TASK_STOPPED;
571 if (!wake_up_state(t, mask))
576 * Remove signals in mask from the pending set and queue.
577 * Returns 1 if any signals were found.
579 * All callers must be holding the siglock.
581 static int rm_from_queue(unsigned long mask, struct sigpending *s)
583 struct sigqueue *q, *n;
585 if (!sigtestsetmask(&s->signal, mask))
588 sigdelsetmask(&s->signal, mask);
589 list_for_each_entry_safe(q, n, &s->list, list) {
590 if (q->info.si_signo < SIGRTMIN &&
591 (mask & sigmask(q->info.si_signo))) {
592 list_del_init(&q->list);
600 * Bad permissions for sending the signal
602 static int check_kill_permission(int sig, struct siginfo *info,
603 struct task_struct *t)
606 if (sig < 0 || sig > _NSIG)
609 if ((!info || ((unsigned long)info != 1 &&
610 (unsigned long)info != 2 && SI_FROMUSER(info)))
611 && ((sig != SIGCONT) ||
612 (current->signal->session != t->signal->session))
613 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
614 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
615 && !capable(CAP_KILL))
617 return security_task_kill(t, info, sig);
621 static void do_notify_parent_cldstop(struct task_struct *tsk,
622 struct task_struct *parent);
625 * Handle magic process-wide effects of stop/continue signals.
626 * Unlike the signal actions, these happen immediately at signal-generation
627 * time regardless of blocking, ignoring, or handling. This does the
628 * actual continuing for SIGCONT, but not the actual stopping for stop
629 * signals. The process stop is done as a signal action for SIG_DFL.
631 static void handle_stop_signal(int sig, struct task_struct *p)
633 struct task_struct *t;
635 if (sig_kernel_stop(sig)) {
637 * This is a stop signal. Remove SIGCONT from all queues.
639 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
642 rm_from_queue(sigmask(SIGCONT), &t->pending);
645 } else if (sig == SIGCONT) {
647 * Remove all stop signals from all queues,
648 * and wake all threads.
650 if (unlikely(p->signal->group_stop_count > 0)) {
652 * There was a group stop in progress. We'll
653 * pretend it finished before we got here. We are
654 * obliged to report it to the parent: if the
655 * SIGSTOP happened "after" this SIGCONT, then it
656 * would have cleared this pending SIGCONT. If it
657 * happened "before" this SIGCONT, then the parent
658 * got the SIGCHLD about the stop finishing before
659 * the continue happened. We do the notification
660 * now, and it's as if the stop had finished and
661 * the SIGCHLD was pending on entry to this kill.
663 p->signal->group_stop_count = 0;
664 if (p->ptrace & PT_PTRACED)
665 do_notify_parent_cldstop(p, p->parent);
667 do_notify_parent_cldstop(
669 p->group_leader->real_parent);
671 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
675 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
678 * If there is a handler for SIGCONT, we must make
679 * sure that no thread returns to user mode before
680 * we post the signal, in case it was the only
681 * thread eligible to run the signal handler--then
682 * it must not do anything between resuming and
683 * running the handler. With the TIF_SIGPENDING
684 * flag set, the thread will pause and acquire the
685 * siglock that we hold now and until we've queued
686 * the pending signal.
688 * Wake up the stopped thread _after_ setting
691 state = TASK_STOPPED;
692 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
693 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 state |= TASK_INTERRUPTIBLE;
696 wake_up_state(t, state);
703 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
704 struct sigpending *signals)
706 struct sigqueue * q = NULL;
710 * fast-pathed signals for kernel-internal things like SIGSTOP
713 if ((unsigned long)info == 2)
716 /* Real-time signals must be queued if sent by sigqueue, or
717 some other real-time mechanism. It is implementation
718 defined whether kill() does so. We attempt to do so, on
719 the principle of least surprise, but since kill is not
720 allowed to fail with EAGAIN when low on memory we just
721 make sure at least one signal gets delivered and don't
722 pass on the info struct. */
724 if (atomic_read(&t->user->sigpending) <
725 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
726 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
730 #warning MEF PLANETLAB: q->user = get_uid(t->user); is something new in Fedora Core.
731 q->user = get_uid(t->user);
732 atomic_inc(&q->user->sigpending);
733 list_add_tail(&q->list, &signals->list);
734 switch ((unsigned long) info) {
736 q->info.si_signo = sig;
737 q->info.si_errno = 0;
738 q->info.si_code = SI_USER;
739 q->info.si_pid = current->pid;
740 q->info.si_uid = current->uid;
743 q->info.si_signo = sig;
744 q->info.si_errno = 0;
745 q->info.si_code = SI_KERNEL;
750 copy_siginfo(&q->info, info);
754 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
755 && info->si_code != SI_USER)
757 * Queue overflow, abort. We may abort if the signal was rt
758 * and sent by user using something other than kill().
761 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
763 * Set up a return to indicate that we dropped
766 ret = info->si_sys_private;
770 sigaddset(&signals->signal, sig);
774 #define LEGACY_QUEUE(sigptr, sig) \
775 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
779 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
783 if (!irqs_disabled())
786 if (!spin_is_locked(&t->sighand->siglock))
790 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
792 * Set up a return to indicate that we dropped the signal.
794 ret = info->si_sys_private;
796 /* Short-circuit ignored signals. */
797 if (sig_ignored(t, sig))
800 /* Support queueing exactly one non-rt signal, so that we
801 can get more detailed information about the cause of
803 if (LEGACY_QUEUE(&t->pending, sig))
806 ret = send_signal(sig, info, t, &t->pending);
807 if (!ret && !sigismember(&t->blocked, sig))
808 signal_wake_up(t, sig == SIGKILL);
814 * Force a signal that the process can't ignore: if necessary
815 * we unblock the signal and change any SIG_IGN to SIG_DFL.
819 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
821 unsigned long int flags;
824 spin_lock_irqsave(&t->sighand->siglock, flags);
825 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
826 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
827 sigdelset(&t->blocked, sig);
828 recalc_sigpending_tsk(t);
830 ret = specific_send_sig_info(sig, info, t);
831 spin_unlock_irqrestore(&t->sighand->siglock, flags);
837 force_sig_specific(int sig, struct task_struct *t)
839 unsigned long int flags;
841 spin_lock_irqsave(&t->sighand->siglock, flags);
842 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
843 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
844 sigdelset(&t->blocked, sig);
845 recalc_sigpending_tsk(t);
846 specific_send_sig_info(sig, (void *)2, t);
847 spin_unlock_irqrestore(&t->sighand->siglock, flags);
851 * Test if P wants to take SIG. After we've checked all threads with this,
852 * it's equivalent to finding no threads not blocking SIG. Any threads not
853 * blocking SIG were ruled out because they are not running and already
854 * have pending signals. Such threads will dequeue from the shared queue
855 * as soon as they're available, so putting the signal on the shared queue
856 * will be equivalent to sending it to one such thread.
858 #define wants_signal(sig, p, mask) \
859 (!sigismember(&(p)->blocked, sig) \
860 && !((p)->state & mask) \
861 && !((p)->flags & PF_EXITING) \
862 && (task_curr(p) || !signal_pending(p)))
866 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
868 struct task_struct *t;
871 * Now find a thread we can wake up to take the signal off the queue.
873 * If the main thread wants the signal, it gets first crack.
874 * Probably the least surprising to the average bear.
876 if (wants_signal(sig, p, mask))
878 else if (thread_group_empty(p))
880 * There is just one thread and it does not need to be woken.
881 * It will dequeue unblocked signals before it runs again.
886 * Otherwise try to find a suitable thread.
888 t = p->signal->curr_target;
890 /* restart balancing at this thread */
891 t = p->signal->curr_target = p;
892 BUG_ON(t->tgid != p->tgid);
894 while (!wants_signal(sig, t, mask)) {
896 if (t == p->signal->curr_target)
898 * No thread needs to be woken.
899 * Any eligible threads will see
900 * the signal in the queue soon.
904 p->signal->curr_target = t;
908 * Found a killable thread. If the signal will be fatal,
909 * then start taking the whole group down immediately.
911 if (sig_fatal(p, sig) && !p->signal->group_exit &&
912 !sigismember(&t->real_blocked, sig) &&
913 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
915 * This signal will be fatal to the whole group.
917 if (!sig_kernel_coredump(sig)) {
919 * Start a group exit and wake everybody up.
920 * This way we don't have other threads
921 * running and doing things after a slower
922 * thread has the fatal signal pending.
924 p->signal->group_exit = 1;
925 p->signal->group_exit_code = sig;
926 p->signal->group_stop_count = 0;
929 sigaddset(&t->pending.signal, SIGKILL);
930 signal_wake_up(t, 1);
937 * There will be a core dump. We make all threads other
938 * than the chosen one go into a group stop so that nothing
939 * happens until it gets scheduled, takes the signal off
940 * the shared queue, and does the core dump. This is a
941 * little more complicated than strictly necessary, but it
942 * keeps the signal state that winds up in the core dump
943 * unchanged from the death state, e.g. which thread had
944 * the core-dump signal unblocked.
946 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
947 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
948 p->signal->group_stop_count = 0;
949 p->signal->group_exit_task = t;
952 p->signal->group_stop_count++;
953 signal_wake_up(t, 0);
956 wake_up_process(p->signal->group_exit_task);
961 * The signal is already in the shared-pending queue.
962 * Tell the chosen thread to wake up and dequeue it.
964 signal_wake_up(t, sig == SIGKILL);
969 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
975 if (!spin_is_locked(&p->sighand->siglock))
978 handle_stop_signal(sig, p);
980 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
982 * Set up a return to indicate that we dropped the signal.
984 ret = info->si_sys_private;
986 /* Short-circuit ignored signals. */
987 if (sig_ignored(p, sig))
990 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
991 /* This is a non-RT signal and we already have one queued. */
995 * Don't bother zombies and stopped tasks (but
996 * SIGKILL will punch through stopped state)
998 mask = TASK_DEAD | TASK_ZOMBIE;
1000 mask |= TASK_STOPPED;
1003 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1004 * We always use the shared queue for process-wide signals,
1005 * to avoid several races.
1007 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1011 __group_complete_signal(sig, p, mask);
1016 * Nuke all other threads in the group.
1018 void zap_other_threads(struct task_struct *p)
1020 struct task_struct *t;
1022 p->signal->group_stop_count = 0;
1024 if (thread_group_empty(p))
1027 for (t = next_thread(p); t != p; t = next_thread(t)) {
1029 * Don't bother with already dead threads
1031 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1035 * We don't want to notify the parent, since we are
1036 * killed as part of a thread group due to another
1037 * thread doing an execve() or similar. So set the
1038 * exit signal to -1 to allow immediate reaping of
1039 * the process. But don't detach the thread group
1042 if (t != p->group_leader)
1043 t->exit_signal = -1;
1045 sigaddset(&t->pending.signal, SIGKILL);
1046 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1047 signal_wake_up(t, 1);
1052 * Must be called with the tasklist_lock held for reading!
1054 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1056 unsigned long flags;
1059 if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
1062 ret = check_kill_permission(sig, info, p);
1063 if (!ret && sig && p->sighand) {
1064 spin_lock_irqsave(&p->sighand->siglock, flags);
1065 ret = __group_send_sig_info(sig, info, p);
1066 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1073 * kill_pg_info() sends a signal to a process group: this is what the tty
1074 * control characters do (^C, ^Z etc)
1077 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1079 struct task_struct *p;
1080 struct list_head *l;
1082 int retval, success;
1089 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1090 int err = group_send_sig_info(sig, info, p);
1094 return success ? 0 : retval;
1098 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1102 read_lock(&tasklist_lock);
1103 retval = __kill_pg_info(sig, info, pgrp);
1104 read_unlock(&tasklist_lock);
1110 * kill_sl_info() sends a signal to the session leader: this is used
1111 * to send SIGHUP to the controlling process of a terminal when
1112 * the connection is lost.
1117 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1119 int err, retval = -EINVAL;
1121 struct list_head *l;
1122 struct task_struct *p;
1128 read_lock(&tasklist_lock);
1129 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1130 if (!p->signal->leader)
1132 err = group_send_sig_info(sig, info, p);
1136 read_unlock(&tasklist_lock);
1142 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1145 struct task_struct *p;
1147 read_lock(&tasklist_lock);
1148 p = find_task_by_pid(pid);
1151 error = group_send_sig_info(sig, info, p);
1152 read_unlock(&tasklist_lock);
1158 * kill_something_info() interprets pid in interesting ways just like kill(2).
1160 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1161 * is probably wrong. Should make it like BSD or SYSV.
1164 static int kill_something_info(int sig, struct siginfo *info, int pid)
1167 return kill_pg_info(sig, info, process_group(current));
1168 } else if (pid == -1) {
1169 int retval = 0, count = 0;
1170 struct task_struct * p;
1172 read_lock(&tasklist_lock);
1173 for_each_process(p) {
1174 if (p->pid > 1 && p->tgid != current->tgid) {
1175 int err = group_send_sig_info(sig, info, p);
1181 read_unlock(&tasklist_lock);
1182 return count ? retval : -ESRCH;
1183 } else if (pid < 0) {
1184 return kill_pg_info(sig, info, -pid);
1186 return kill_proc_info(sig, info, pid);
1191 * These are for backward compatibility with the rest of the kernel source.
1195 * These two are the most common entry points. They send a signal
1196 * just to the specific thread.
1199 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1202 unsigned long flags;
1205 * Make sure legacy kernel users don't send in bad values
1206 * (normal paths check this in check_kill_permission).
1208 if (sig < 0 || sig > _NSIG)
1212 * We need the tasklist lock even for the specific
1213 * thread case (when we don't need to follow the group
1214 * lists) in order to avoid races with "p->sighand"
1215 * going away or changing from under us.
1217 read_lock(&tasklist_lock);
1218 spin_lock_irqsave(&p->sighand->siglock, flags);
1219 ret = specific_send_sig_info(sig, info, p);
1220 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1221 read_unlock(&tasklist_lock);
1226 send_sig(int sig, struct task_struct *p, int priv)
1228 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1232 * This is the entry point for "process-wide" signals.
1233 * They will go to an appropriate thread in the thread group.
1236 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1239 read_lock(&tasklist_lock);
1240 ret = group_send_sig_info(sig, info, p);
1241 read_unlock(&tasklist_lock);
1246 force_sig(int sig, struct task_struct *p)
1248 force_sig_info(sig, (void*)1L, p);
1252 kill_pg(pid_t pgrp, int sig, int priv)
1254 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1258 kill_sl(pid_t sess, int sig, int priv)
1260 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1264 kill_proc(pid_t pid, int sig, int priv)
1266 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1270 * These functions support sending signals using preallocated sigqueue
1271 * structures. This is needed "because realtime applications cannot
1272 * afford to lose notifications of asynchronous events, like timer
1273 * expirations or I/O completions". In the case of Posix Timers
1274 * we allocate the sigqueue structure from the timer_create. If this
1275 * allocation fails we are able to report the failure to the application
1276 * with an EAGAIN error.
1279 struct sigqueue *sigqueue_alloc(void)
1283 if ((q = __sigqueue_alloc()))
1284 q->flags |= SIGQUEUE_PREALLOC;
1288 void sigqueue_free(struct sigqueue *q)
1290 unsigned long flags;
1291 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1293 * If the signal is still pending remove it from the
1296 if (unlikely(!list_empty(&q->list))) {
1297 read_lock(&tasklist_lock);
1298 spin_lock_irqsave(q->lock, flags);
1299 if (!list_empty(&q->list))
1300 list_del_init(&q->list);
1301 spin_unlock_irqrestore(q->lock, flags);
1302 read_unlock(&tasklist_lock);
1304 q->flags &= ~SIGQUEUE_PREALLOC;
1309 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1311 unsigned long flags;
1315 * We need the tasklist lock even for the specific
1316 * thread case (when we don't need to follow the group
1317 * lists) in order to avoid races with "p->sighand"
1318 * going away or changing from under us.
1320 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1321 read_lock(&tasklist_lock);
1322 spin_lock_irqsave(&p->sighand->siglock, flags);
1324 if (unlikely(!list_empty(&q->list))) {
1326 * If an SI_TIMER entry is already queue just increment
1327 * the overrun count.
1329 if (q->info.si_code != SI_TIMER)
1331 q->info.si_overrun++;
1334 /* Short-circuit ignored signals. */
1335 if (sig_ignored(p, sig)) {
1340 q->lock = &p->sighand->siglock;
1341 list_add_tail(&q->list, &p->pending.list);
1342 sigaddset(&p->pending.signal, sig);
1343 if (!sigismember(&p->blocked, sig))
1344 signal_wake_up(p, sig == SIGKILL);
1347 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1348 read_unlock(&tasklist_lock);
1353 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1355 unsigned long flags;
1359 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1360 read_lock(&tasklist_lock);
1361 spin_lock_irqsave(&p->sighand->siglock, flags);
1362 handle_stop_signal(sig, p);
1364 /* Short-circuit ignored signals. */
1365 if (sig_ignored(p, sig)) {
1370 if (unlikely(!list_empty(&q->list))) {
1372 * If an SI_TIMER entry is already queue just increment
1373 * the overrun count. Other uses should not try to
1374 * send the signal multiple times.
1376 if (q->info.si_code != SI_TIMER)
1378 q->info.si_overrun++;
1382 * Don't bother zombies and stopped tasks (but
1383 * SIGKILL will punch through stopped state)
1385 mask = TASK_DEAD | TASK_ZOMBIE;
1387 mask |= TASK_STOPPED;
1390 * Put this signal on the shared-pending queue.
1391 * We always use the shared queue for process-wide signals,
1392 * to avoid several races.
1394 q->lock = &p->sighand->siglock;
1395 list_add_tail(&q->list, &p->signal->shared_pending.list);
1396 sigaddset(&p->signal->shared_pending.signal, sig);
1398 __group_complete_signal(sig, p, mask);
1400 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1401 read_unlock(&tasklist_lock);
1406 * Joy. Or not. Pthread wants us to wake up every thread
1407 * in our parent group.
1409 static void __wake_up_parent(struct task_struct *p,
1410 struct task_struct *parent)
1412 struct task_struct *tsk = parent;
1415 * Fortunately this is not necessary for thread groups:
1417 if (p->tgid == tsk->tgid) {
1418 wake_up_interruptible_sync(&tsk->wait_chldexit);
1423 wake_up_interruptible_sync(&tsk->wait_chldexit);
1424 tsk = next_thread(tsk);
1425 if (tsk->signal != parent->signal)
1427 } while (tsk != parent);
1431 * Let a parent know about a status change of a child.
1434 void do_notify_parent(struct task_struct *tsk, int sig)
1436 struct siginfo info;
1437 unsigned long flags;
1439 struct sighand_struct *psig;
1444 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1445 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1447 info.si_signo = sig;
1449 info.si_pid = tsk->pid;
1450 info.si_uid = tsk->uid;
1452 /* FIXME: find out whether or not this is supposed to be c*time. */
1453 info.si_utime = tsk->utime;
1454 info.si_stime = tsk->stime;
1456 status = tsk->exit_code & 0x7f;
1457 why = SI_KERNEL; /* shouldn't happen */
1458 switch (tsk->state) {
1460 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1461 if (tsk->ptrace & PT_PTRACED)
1468 if (tsk->exit_code & 0x80)
1470 else if (tsk->exit_code & 0x7f)
1474 status = tsk->exit_code >> 8;
1479 info.si_status = status;
1481 psig = tsk->parent->sighand;
1482 spin_lock_irqsave(&psig->siglock, flags);
1483 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1484 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1485 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1487 * We are exiting and our parent doesn't care. POSIX.1
1488 * defines special semantics for setting SIGCHLD to SIG_IGN
1489 * or setting the SA_NOCLDWAIT flag: we should be reaped
1490 * automatically and not left for our parent's wait4 call.
1491 * Rather than having the parent do it as a magic kind of
1492 * signal handler, we just set this to tell do_exit that we
1493 * can be cleaned up without becoming a zombie. Note that
1494 * we still call __wake_up_parent in this case, because a
1495 * blocked sys_wait4 might now return -ECHILD.
1497 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1498 * is implementation-defined: we do (if you don't want
1499 * it, just use SIG_IGN instead).
1501 tsk->exit_signal = -1;
1502 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1505 if (sig > 0 && sig <= _NSIG)
1506 __group_send_sig_info(sig, &info, tsk->parent);
1507 __wake_up_parent(tsk, tsk->parent);
1508 spin_unlock_irqrestore(&psig->siglock, flags);
1513 * We need the tasklist lock because it's the only
1514 * thing that protects out "parent" pointer.
1516 * exit.c calls "do_notify_parent()" directly, because
1517 * it already has the tasklist lock.
1520 notify_parent(struct task_struct *tsk, int sig)
1523 read_lock(&tasklist_lock);
1524 do_notify_parent(tsk, sig);
1525 read_unlock(&tasklist_lock);
1530 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1532 struct siginfo info;
1533 unsigned long flags;
1534 struct sighand_struct *sighand;
1536 info.si_signo = SIGCHLD;
1538 info.si_pid = tsk->pid;
1539 info.si_uid = tsk->uid;
1541 /* FIXME: find out whether or not this is supposed to be c*time. */
1542 info.si_utime = tsk->utime;
1543 info.si_stime = tsk->stime;
1545 info.si_status = tsk->exit_code & 0x7f;
1546 info.si_code = CLD_STOPPED;
1548 sighand = parent->sighand;
1549 spin_lock_irqsave(&sighand->siglock, flags);
1550 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1551 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1552 __group_send_sig_info(SIGCHLD, &info, parent);
1554 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1556 __wake_up_parent(tsk, parent);
1557 spin_unlock_irqrestore(&sighand->siglock, flags);
1560 int print_fatal_signals = 0;
1562 static void print_fatal_signal(struct pt_regs *regs, int signr)
1566 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1567 current->comm, current->pid, signr);
1570 printk("code at %08lx: ", regs->eip);
1571 for (i = 0; i < 16; i++) {
1572 __get_user(insn, (unsigned char *)(regs->eip + i));
1573 printk("%02x ", insn);
1580 static int __init setup_print_fatal_signals(char *str)
1582 get_option (&str, &print_fatal_signals);
1587 __setup("print-fatal-signals=", setup_print_fatal_signals);
1589 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1592 finish_stop(int stop_count)
1595 * If there are no other threads in the group, or if there is
1596 * a group stop in progress and we are the last to stop,
1597 * report to the parent. When ptraced, every thread reports itself.
1599 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1600 read_lock(&tasklist_lock);
1601 do_notify_parent_cldstop(current, current->parent);
1602 read_unlock(&tasklist_lock);
1604 else if (stop_count == 0) {
1605 read_lock(&tasklist_lock);
1606 do_notify_parent_cldstop(current->group_leader,
1607 current->group_leader->real_parent);
1608 read_unlock(&tasklist_lock);
1613 * Now we don't run again until continued.
1615 current->exit_code = 0;
1619 * This performs the stopping for SIGSTOP and other stop signals.
1620 * We have to stop all threads in the thread group.
1623 do_signal_stop(int signr)
1625 struct signal_struct *sig = current->signal;
1626 struct sighand_struct *sighand = current->sighand;
1627 int stop_count = -1;
1629 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1631 if (sig->group_stop_count > 0) {
1633 * There is a group stop in progress. We don't need to
1634 * start another one.
1636 signr = sig->group_exit_code;
1637 stop_count = --sig->group_stop_count;
1638 current->exit_code = signr;
1639 set_current_state(TASK_STOPPED);
1640 spin_unlock_irq(&sighand->siglock);
1642 else if (thread_group_empty(current)) {
1644 * Lock must be held through transition to stopped state.
1646 current->exit_code = signr;
1647 set_current_state(TASK_STOPPED);
1648 spin_unlock_irq(&sighand->siglock);
1652 * There is no group stop already in progress.
1653 * We must initiate one now, but that requires
1654 * dropping siglock to get both the tasklist lock
1655 * and siglock again in the proper order. Note that
1656 * this allows an intervening SIGCONT to be posted.
1657 * We need to check for that and bail out if necessary.
1659 struct task_struct *t;
1661 spin_unlock_irq(&sighand->siglock);
1663 /* signals can be posted during this window */
1665 read_lock(&tasklist_lock);
1666 spin_lock_irq(&sighand->siglock);
1668 if (unlikely(sig->group_exit)) {
1670 * There is a group exit in progress now.
1671 * We'll just ignore the stop and process the
1672 * associated fatal signal.
1674 spin_unlock_irq(&sighand->siglock);
1675 read_unlock(&tasklist_lock);
1679 if (unlikely(sig_avoid_stop_race())) {
1681 * Either a SIGCONT or a SIGKILL signal was
1682 * posted in the siglock-not-held window.
1684 spin_unlock_irq(&sighand->siglock);
1685 read_unlock(&tasklist_lock);
1689 if (sig->group_stop_count == 0) {
1690 sig->group_exit_code = signr;
1692 for (t = next_thread(current); t != current;
1695 * Setting state to TASK_STOPPED for a group
1696 * stop is always done with the siglock held,
1697 * so this check has no races.
1699 if (t->state < TASK_STOPPED) {
1701 signal_wake_up(t, 0);
1703 sig->group_stop_count = stop_count;
1706 /* A race with another thread while unlocked. */
1707 signr = sig->group_exit_code;
1708 stop_count = --sig->group_stop_count;
1711 current->exit_code = signr;
1712 set_current_state(TASK_STOPPED);
1714 spin_unlock_irq(&sighand->siglock);
1715 read_unlock(&tasklist_lock);
1718 finish_stop(stop_count);
1722 * Do appropriate magic when group_stop_count > 0.
1723 * We return nonzero if we stopped, after releasing the siglock.
1724 * We return zero if we still hold the siglock and should look
1725 * for another signal without checking group_stop_count again.
1727 static inline int handle_group_stop(void)
1731 if (current->signal->group_exit_task == current) {
1733 * Group stop is so we can do a core dump,
1734 * We are the initiating thread, so get on with it.
1736 current->signal->group_exit_task = NULL;
1740 if (current->signal->group_exit)
1742 * Group stop is so another thread can do a core dump,
1743 * or else we are racing against a death signal.
1744 * Just punt the stop so we can get the next signal.
1749 * There is a group stop in progress. We stop
1750 * without any associated signal being in our queue.
1752 stop_count = --current->signal->group_stop_count;
1753 current->exit_code = current->signal->group_exit_code;
1754 set_current_state(TASK_STOPPED);
1755 spin_unlock_irq(¤t->sighand->siglock);
1756 finish_stop(stop_count);
1760 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1762 sigset_t *mask = ¤t->blocked;
1766 spin_lock_irq(¤t->sighand->siglock);
1768 struct k_sigaction *ka;
1770 if (unlikely(current->signal->group_stop_count > 0) &&
1771 handle_group_stop())
1774 signr = dequeue_signal(current, mask, info);
1777 break; /* will return 0 */
1779 if ((signr == SIGSEGV) && print_fatal_signals) {
1780 spin_unlock_irq(¤t->sighand->siglock);
1781 print_fatal_signal(regs, signr);
1782 spin_lock_irq(¤t->sighand->siglock);
1784 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1785 ptrace_signal_deliver(regs, cookie);
1788 * If there is a group stop in progress,
1789 * we must participate in the bookkeeping.
1791 if (current->signal->group_stop_count > 0)
1792 --current->signal->group_stop_count;
1794 /* Let the debugger run. */
1795 current->exit_code = signr;
1796 current->last_siginfo = info;
1797 set_current_state(TASK_STOPPED);
1798 spin_unlock_irq(¤t->sighand->siglock);
1799 notify_parent(current, SIGCHLD);
1802 current->last_siginfo = NULL;
1804 /* We're back. Did the debugger cancel the sig? */
1805 spin_lock_irq(¤t->sighand->siglock);
1806 signr = current->exit_code;
1810 current->exit_code = 0;
1812 /* Update the siginfo structure if the signal has
1813 changed. If the debugger wanted something
1814 specific in the siginfo structure then it should
1815 have updated *info via PTRACE_SETSIGINFO. */
1816 if (signr != info->si_signo) {
1817 info->si_signo = signr;
1819 info->si_code = SI_USER;
1820 info->si_pid = current->parent->pid;
1821 info->si_uid = current->parent->uid;
1824 /* If the (new) signal is now blocked, requeue it. */
1825 if (sigismember(¤t->blocked, signr)) {
1826 specific_send_sig_info(signr, info, current);
1831 ka = ¤t->sighand->action[signr-1];
1832 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1834 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1835 break; /* will return non-zero "signr" value */
1838 * Now we are doing the default action for this signal.
1840 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1843 /* Init gets no signals it doesn't want. */
1844 if (current->pid == 1)
1847 if (sig_kernel_stop(signr)) {
1849 * The default action is to stop all threads in
1850 * the thread group. The job control signals
1851 * do nothing in an orphaned pgrp, but SIGSTOP
1852 * always works. Note that siglock needs to be
1853 * dropped during the call to is_orphaned_pgrp()
1854 * because of lock ordering with tasklist_lock.
1855 * This allows an intervening SIGCONT to be posted.
1856 * We need to check for that and bail out if necessary.
1858 if (signr == SIGSTOP) {
1859 do_signal_stop(signr); /* releases siglock */
1862 spin_unlock_irq(¤t->sighand->siglock);
1864 /* signals can be posted during this window */
1866 if (is_orphaned_pgrp(process_group(current)))
1869 spin_lock_irq(¤t->sighand->siglock);
1870 if (unlikely(sig_avoid_stop_race())) {
1872 * Either a SIGCONT or a SIGKILL signal was
1873 * posted in the siglock-not-held window.
1878 do_signal_stop(signr); /* releases siglock */
1882 spin_unlock_irq(¤t->sighand->siglock);
1885 * Anything else is fatal, maybe with a core dump.
1887 current->flags |= PF_SIGNALED;
1888 if (print_fatal_signals)
1889 print_fatal_signal(regs, signr);
1890 if (sig_kernel_coredump(signr) &&
1891 do_coredump((long)signr, signr, regs)) {
1893 * That killed all other threads in the group and
1894 * synchronized with their demise, so there can't
1895 * be any more left to kill now. The group_exit
1896 * flags are set by do_coredump. Note that
1897 * thread_group_empty won't always be true yet,
1898 * because those threads were blocked in __exit_mm
1899 * and we just let them go to finish dying.
1901 const int code = signr | 0x80;
1902 BUG_ON(!current->signal->group_exit);
1903 BUG_ON(current->signal->group_exit_code != code);
1909 * Death signals, no core dump.
1911 do_group_exit(signr);
1914 spin_unlock_irq(¤t->sighand->siglock);
1920 EXPORT_SYMBOL(recalc_sigpending);
1921 EXPORT_SYMBOL_GPL(dequeue_signal);
1922 EXPORT_SYMBOL(flush_signals);
1923 EXPORT_SYMBOL(force_sig);
1924 EXPORT_SYMBOL(force_sig_info);
1925 EXPORT_SYMBOL(kill_pg);
1926 EXPORT_SYMBOL(kill_pg_info);
1927 EXPORT_SYMBOL(kill_proc);
1928 EXPORT_SYMBOL(kill_proc_info);
1929 EXPORT_SYMBOL(kill_sl);
1930 EXPORT_SYMBOL(kill_sl_info);
1931 EXPORT_SYMBOL(notify_parent);
1932 EXPORT_SYMBOL(send_sig);
1933 EXPORT_SYMBOL(send_sig_info);
1934 EXPORT_SYMBOL(send_group_sig_info);
1935 EXPORT_SYMBOL(sigqueue_alloc);
1936 EXPORT_SYMBOL(sigqueue_free);
1937 EXPORT_SYMBOL(send_sigqueue);
1938 EXPORT_SYMBOL(send_group_sigqueue);
1939 EXPORT_SYMBOL(sigprocmask);
1940 EXPORT_SYMBOL(block_all_signals);
1941 EXPORT_SYMBOL(unblock_all_signals);
1945 * System call entry points.
1948 asmlinkage long sys_restart_syscall(void)
1950 struct restart_block *restart = ¤t_thread_info()->restart_block;
1951 return restart->fn(restart);
1954 long do_no_restart_syscall(struct restart_block *param)
1960 * We don't need to get the kernel lock - this is all local to this
1961 * particular thread.. (and that's good, because this is _heavily_
1962 * used by various programs)
1966 * This is also useful for kernel threads that want to temporarily
1967 * (or permanently) block certain signals.
1969 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1970 * interface happily blocks "unblockable" signals like SIGKILL
1973 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1978 spin_lock_irq(¤t->sighand->siglock);
1979 old_block = current->blocked;
1983 sigorsets(¤t->blocked, ¤t->blocked, set);
1986 signandsets(¤t->blocked, ¤t->blocked, set);
1989 current->blocked = *set;
1994 recalc_sigpending();
1995 spin_unlock_irq(¤t->sighand->siglock);
1997 *oldset = old_block;
2002 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2004 int error = -EINVAL;
2005 sigset_t old_set, new_set;
2007 /* XXX: Don't preclude handling different sized sigset_t's. */
2008 if (sigsetsize != sizeof(sigset_t))
2013 if (copy_from_user(&new_set, set, sizeof(*set)))
2015 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2017 error = sigprocmask(how, &new_set, &old_set);
2023 spin_lock_irq(¤t->sighand->siglock);
2024 old_set = current->blocked;
2025 spin_unlock_irq(¤t->sighand->siglock);
2029 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2037 long do_sigpending(void __user *set, unsigned long sigsetsize)
2039 long error = -EINVAL;
2042 if (sigsetsize > sizeof(sigset_t))
2045 spin_lock_irq(¤t->sighand->siglock);
2046 sigorsets(&pending, ¤t->pending.signal,
2047 ¤t->signal->shared_pending.signal);
2048 spin_unlock_irq(¤t->sighand->siglock);
2050 /* Outside the lock because only this thread touches it. */
2051 sigandsets(&pending, ¤t->blocked, &pending);
2054 if (!copy_to_user(set, &pending, sigsetsize))
2062 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2064 return do_sigpending(set, sigsetsize);
2067 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2069 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2073 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2075 if (from->si_code < 0)
2076 return __copy_to_user(to, from, sizeof(siginfo_t))
2079 * If you change siginfo_t structure, please be sure
2080 * this code is fixed accordingly.
2081 * It should never copy any pad contained in the structure
2082 * to avoid security leaks, but must copy the generic
2083 * 3 ints plus the relevant union member.
2085 err = __put_user(from->si_signo, &to->si_signo);
2086 err |= __put_user(from->si_errno, &to->si_errno);
2087 err |= __put_user((short)from->si_code, &to->si_code);
2088 switch (from->si_code & __SI_MASK) {
2090 err |= __put_user(from->si_pid, &to->si_pid);
2091 err |= __put_user(from->si_uid, &to->si_uid);
2094 err |= __put_user(from->si_tid, &to->si_tid);
2095 err |= __put_user(from->si_overrun, &to->si_overrun);
2096 err |= __put_user(from->si_ptr, &to->si_ptr);
2099 err |= __put_user(from->si_band, &to->si_band);
2100 err |= __put_user(from->si_fd, &to->si_fd);
2103 err |= __put_user(from->si_addr, &to->si_addr);
2104 #ifdef __ARCH_SI_TRAPNO
2105 err |= __put_user(from->si_trapno, &to->si_trapno);
2109 err |= __put_user(from->si_pid, &to->si_pid);
2110 err |= __put_user(from->si_uid, &to->si_uid);
2111 err |= __put_user(from->si_status, &to->si_status);
2112 err |= __put_user(from->si_utime, &to->si_utime);
2113 err |= __put_user(from->si_stime, &to->si_stime);
2115 case __SI_RT: /* This is not generated by the kernel as of now. */
2116 case __SI_MESGQ: /* But this is */
2117 err |= __put_user(from->si_pid, &to->si_pid);
2118 err |= __put_user(from->si_uid, &to->si_uid);
2119 err |= __put_user(from->si_ptr, &to->si_ptr);
2121 default: /* this is just in case for now ... */
2122 err |= __put_user(from->si_pid, &to->si_pid);
2123 err |= __put_user(from->si_uid, &to->si_uid);
2132 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2133 siginfo_t __user *uinfo,
2134 const struct timespec __user *uts,
2143 /* XXX: Don't preclude handling different sized sigset_t's. */
2144 if (sigsetsize != sizeof(sigset_t))
2147 if (copy_from_user(&these, uthese, sizeof(these)))
2151 * Invert the set of allowed signals to get those we
2154 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2158 if (copy_from_user(&ts, uts, sizeof(ts)))
2160 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2165 spin_lock_irq(¤t->sighand->siglock);
2166 sig = dequeue_signal(current, &these, &info);
2168 timeout = MAX_SCHEDULE_TIMEOUT;
2170 timeout = (timespec_to_jiffies(&ts)
2171 + (ts.tv_sec || ts.tv_nsec));
2174 /* None ready -- temporarily unblock those we're
2175 * interested while we are sleeping in so that we'll
2176 * be awakened when they arrive. */
2177 current->real_blocked = current->blocked;
2178 sigandsets(¤t->blocked, ¤t->blocked, &these);
2179 recalc_sigpending();
2180 spin_unlock_irq(¤t->sighand->siglock);
2182 current->state = TASK_INTERRUPTIBLE;
2183 timeout = schedule_timeout(timeout);
2185 spin_lock_irq(¤t->sighand->siglock);
2186 sig = dequeue_signal(current, &these, &info);
2187 current->blocked = current->real_blocked;
2188 siginitset(¤t->real_blocked, 0);
2189 recalc_sigpending();
2192 spin_unlock_irq(¤t->sighand->siglock);
2197 if (copy_siginfo_to_user(uinfo, &info))
2210 sys_kill(int pid, int sig)
2212 struct siginfo info;
2214 info.si_signo = sig;
2216 info.si_code = SI_USER;
2217 info.si_pid = current->tgid;
2218 info.si_uid = current->uid;
2220 return kill_something_info(sig, &info, pid);
2224 * sys_tgkill - send signal to one specific thread
2225 * @tgid: the thread group ID of the thread
2226 * @pid: the PID of the thread
2227 * @sig: signal to be sent
2229 * This syscall also checks the tgid and returns -ESRCH even if the PID
2230 * exists but it's not belonging to the target process anymore. This
2231 * method solves the problem of threads exiting and PIDs getting reused.
2233 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2235 struct siginfo info;
2237 struct task_struct *p;
2239 /* This is only valid for single tasks */
2240 if (pid <= 0 || tgid <= 0)
2243 info.si_signo = sig;
2245 info.si_code = SI_TKILL;
2246 info.si_pid = current->tgid;
2247 info.si_uid = current->uid;
2249 read_lock(&tasklist_lock);
2250 p = find_task_by_pid(pid);
2252 if (p && (p->tgid == tgid)) {
2253 error = check_kill_permission(sig, &info, p);
2255 * The null signal is a permissions and process existence
2256 * probe. No signal is actually delivered.
2258 if (!error && sig && p->sighand) {
2259 spin_lock_irq(&p->sighand->siglock);
2260 handle_stop_signal(sig, p);
2261 error = specific_send_sig_info(sig, &info, p);
2262 spin_unlock_irq(&p->sighand->siglock);
2265 read_unlock(&tasklist_lock);
2270 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2273 sys_tkill(int pid, int sig)
2275 struct siginfo info;
2277 struct task_struct *p;
2279 /* This is only valid for single tasks */
2283 info.si_signo = sig;
2285 info.si_code = SI_TKILL;
2286 info.si_pid = current->tgid;
2287 info.si_uid = current->uid;
2289 read_lock(&tasklist_lock);
2290 p = find_task_by_pid(pid);
2293 error = check_kill_permission(sig, &info, p);
2295 * The null signal is a permissions and process existence
2296 * probe. No signal is actually delivered.
2298 if (!error && sig && p->sighand) {
2299 spin_lock_irq(&p->sighand->siglock);
2300 handle_stop_signal(sig, p);
2301 error = specific_send_sig_info(sig, &info, p);
2302 spin_unlock_irq(&p->sighand->siglock);
2305 read_unlock(&tasklist_lock);
2310 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2314 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2317 /* Not even root can pretend to send signals from the kernel.
2318 Nor can they impersonate a kill(), which adds source info. */
2319 if (info.si_code >= 0)
2321 info.si_signo = sig;
2323 /* POSIX.1b doesn't mention process groups. */
2324 return kill_proc_info(sig, &info, pid);
2328 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2330 struct k_sigaction *k;
2332 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2335 k = ¤t->sighand->action[sig-1];
2337 spin_lock_irq(¤t->sighand->siglock);
2338 if (signal_pending(current)) {
2340 * If there might be a fatal signal pending on multiple
2341 * threads, make sure we take it before changing the action.
2343 spin_unlock_irq(¤t->sighand->siglock);
2344 return -ERESTARTNOINTR;
2353 * "Setting a signal action to SIG_IGN for a signal that is
2354 * pending shall cause the pending signal to be discarded,
2355 * whether or not it is blocked."
2357 * "Setting a signal action to SIG_DFL for a signal that is
2358 * pending and whose default action is to ignore the signal
2359 * (for example, SIGCHLD), shall cause the pending signal to
2360 * be discarded, whether or not it is blocked"
2362 if (act->sa.sa_handler == SIG_IGN ||
2363 (act->sa.sa_handler == SIG_DFL &&
2364 sig_kernel_ignore(sig))) {
2366 * This is a fairly rare case, so we only take the
2367 * tasklist_lock once we're sure we'll need it.
2368 * Now we must do this little unlock and relock
2369 * dance to maintain the lock hierarchy.
2371 struct task_struct *t = current;
2372 spin_unlock_irq(&t->sighand->siglock);
2373 read_lock(&tasklist_lock);
2374 spin_lock_irq(&t->sighand->siglock);
2376 sigdelsetmask(&k->sa.sa_mask,
2377 sigmask(SIGKILL) | sigmask(SIGSTOP));
2378 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2380 rm_from_queue(sigmask(sig), &t->pending);
2381 recalc_sigpending_tsk(t);
2383 } while (t != current);
2384 spin_unlock_irq(¤t->sighand->siglock);
2385 read_unlock(&tasklist_lock);
2390 sigdelsetmask(&k->sa.sa_mask,
2391 sigmask(SIGKILL) | sigmask(SIGSTOP));
2394 spin_unlock_irq(¤t->sighand->siglock);
2399 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2405 oss.ss_sp = (void __user *) current->sas_ss_sp;
2406 oss.ss_size = current->sas_ss_size;
2407 oss.ss_flags = sas_ss_flags(sp);
2416 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2417 || __get_user(ss_sp, &uss->ss_sp)
2418 || __get_user(ss_flags, &uss->ss_flags)
2419 || __get_user(ss_size, &uss->ss_size))
2423 if (on_sig_stack(sp))
2429 * Note - this code used to test ss_flags incorrectly
2430 * old code may have been written using ss_flags==0
2431 * to mean ss_flags==SS_ONSTACK (as this was the only
2432 * way that worked) - this fix preserves that older
2435 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2438 if (ss_flags == SS_DISABLE) {
2443 if (ss_size < MINSIGSTKSZ)
2447 current->sas_ss_sp = (unsigned long) ss_sp;
2448 current->sas_ss_size = ss_size;
2453 if (copy_to_user(uoss, &oss, sizeof(oss)))
2462 #ifdef __ARCH_WANT_SYS_SIGPENDING
2465 sys_sigpending(old_sigset_t __user *set)
2467 return do_sigpending(set, sizeof(*set));
2472 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2473 /* Some platforms have their own version with special arguments others
2474 support only sys_rt_sigprocmask. */
2477 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2480 old_sigset_t old_set, new_set;
2484 if (copy_from_user(&new_set, set, sizeof(*set)))
2486 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2488 spin_lock_irq(¤t->sighand->siglock);
2489 old_set = current->blocked.sig[0];
2497 sigaddsetmask(¤t->blocked, new_set);
2500 sigdelsetmask(¤t->blocked, new_set);
2503 current->blocked.sig[0] = new_set;
2507 recalc_sigpending();
2508 spin_unlock_irq(¤t->sighand->siglock);
2514 old_set = current->blocked.sig[0];
2517 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2524 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2526 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2528 sys_rt_sigaction(int sig,
2529 const struct sigaction __user *act,
2530 struct sigaction __user *oact,
2533 struct k_sigaction new_sa, old_sa;
2536 /* XXX: Don't preclude handling different sized sigset_t's. */
2537 if (sigsetsize != sizeof(sigset_t))
2541 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2545 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2548 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2554 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2556 #ifdef __ARCH_WANT_SYS_SGETMASK
2559 * For backwards compatibility. Functionality superseded by sigprocmask.
2565 return current->blocked.sig[0];
2569 sys_ssetmask(int newmask)
2573 spin_lock_irq(¤t->sighand->siglock);
2574 old = current->blocked.sig[0];
2576 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2578 recalc_sigpending();
2579 spin_unlock_irq(¤t->sighand->siglock);
2583 #endif /* __ARCH_WANT_SGETMASK */
2585 #ifdef __ARCH_WANT_SYS_SIGNAL
2587 * For backwards compatibility. Functionality superseded by sigaction.
2589 asmlinkage unsigned long
2590 sys_signal(int sig, __sighandler_t handler)
2592 struct k_sigaction new_sa, old_sa;
2595 new_sa.sa.sa_handler = handler;
2596 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2598 ret = do_sigaction(sig, &new_sa, &old_sa);
2600 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2602 #endif /* __ARCH_WANT_SYS_SIGNAL */
2604 #ifdef __ARCH_WANT_SYS_PAUSE
2609 current->state = TASK_INTERRUPTIBLE;
2611 return -ERESTARTNOHAND;
2616 void __init signals_init(void)
2619 kmem_cache_create("sigqueue",
2620 sizeof(struct sigqueue),
2621 __alignof__(struct sigqueue),
2622 SLAB_PANIC, NULL, NULL);