2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
35 atomic_t nr_queued_signals;
36 int max_queued_signals = 1024;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 #define sig_avoid_stop_race() \
157 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
158 sigtestsetmask(¤t->signal->shared_pending.signal, \
159 M(SIGCONT) | M(SIGKILL)))
161 static int sig_ignored(struct task_struct *t, int sig)
166 * Tracers always want to know about signals..
168 if (t->ptrace & PT_PTRACED)
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
176 if (sigismember(&t->blocked, sig))
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
194 switch (_NSIG_WORDS) {
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
219 if (t->signal->group_stop_count > 0 ||
220 PENDING(&t->pending, &t->blocked) ||
221 PENDING(&t->signal->shared_pending, &t->blocked))
222 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 clear_tsk_thread_flag(t, TIF_SIGPENDING);
227 void recalc_sigpending(void)
229 recalc_sigpending_tsk(current);
232 /* Given the mask, find the first available signal that should be serviced. */
235 next_signal(struct sigpending *pending, sigset_t *mask)
237 unsigned long i, *s, *m, x;
240 s = pending->signal.sig;
242 switch (_NSIG_WORDS) {
244 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
245 if ((x = *s &~ *m) != 0) {
246 sig = ffz(~x) + i*_NSIG_BPW + 1;
251 case 2: if ((x = s[0] &~ m[0]) != 0)
253 else if ((x = s[1] &~ m[1]) != 0)
260 case 1: if ((x = *s &~ *m) != 0)
268 struct sigqueue *__sigqueue_alloc(void)
270 struct sigqueue *q = 0;
272 if (atomic_read(&nr_queued_signals) < max_queued_signals)
273 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
275 atomic_inc(&nr_queued_signals);
276 INIT_LIST_HEAD(&q->list);
283 static inline void __sigqueue_free(struct sigqueue *q)
285 if (q->flags & SIGQUEUE_PREALLOC)
287 kmem_cache_free(sigqueue_cachep, q);
288 atomic_dec(&nr_queued_signals);
291 static void flush_sigqueue(struct sigpending *queue)
295 sigemptyset(&queue->signal);
296 while (!list_empty(&queue->list)) {
297 q = list_entry(queue->list.next, struct sigqueue , list);
298 list_del_init(&q->list);
304 * Flush all pending signals for a task.
308 flush_signals(struct task_struct *t)
312 spin_lock_irqsave(&t->sighand->siglock, flags);
313 clear_tsk_thread_flag(t,TIF_SIGPENDING);
314 flush_sigqueue(&t->pending);
315 flush_sigqueue(&t->signal->shared_pending);
316 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 * This function expects the tasklist_lock write-locked.
322 void __exit_sighand(struct task_struct *tsk)
324 struct sighand_struct * sighand = tsk->sighand;
326 /* Ok, we're done with the signal handlers */
328 if (atomic_dec_and_test(&sighand->count))
329 kmem_cache_free(sighand_cachep, sighand);
332 void exit_sighand(struct task_struct *tsk)
334 write_lock_irq(&tasklist_lock);
336 write_unlock_irq(&tasklist_lock);
340 * This function expects the tasklist_lock write-locked.
342 void __exit_signal(struct task_struct *tsk)
344 struct signal_struct * sig = tsk->signal;
345 struct sighand_struct * sighand = tsk->sighand;
349 if (!atomic_read(&sig->count))
351 spin_lock(&sighand->siglock);
352 if (atomic_dec_and_test(&sig->count)) {
353 if (tsk == sig->curr_target)
354 sig->curr_target = next_thread(tsk);
356 spin_unlock(&sighand->siglock);
357 flush_sigqueue(&sig->shared_pending);
360 * If there is any task waiting for the group exit
363 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
364 wake_up_process(sig->group_exit_task);
365 sig->group_exit_task = NULL;
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
370 spin_unlock(&sighand->siglock);
371 sig = NULL; /* Marker for below. */
373 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
374 flush_sigqueue(&tsk->pending);
377 * We are cleaning up the signal_struct here. We delayed
378 * calling exit_itimers until after flush_sigqueue, just in
379 * case our thread-local pending queue contained a queued
380 * timer signal that would have been cleared in
381 * exit_itimers. When that called sigqueue_free, it would
382 * attempt to re-take the tasklist_lock and deadlock. This
383 * can never happen if we ensure that all queues the
384 * timer's signal might be queued on have been flushed
385 * first. The shared_pending queue, and our own pending
386 * queue are the only queues the timer could be on, since
387 * there are no other threads left in the group and timer
388 * signals are constrained to threads inside the group.
391 kmem_cache_free(signal_cachep, sig);
395 void exit_signal(struct task_struct *tsk)
397 write_lock_irq(&tasklist_lock);
399 write_unlock_irq(&tasklist_lock);
403 * Flush all handlers for a task.
407 flush_signal_handlers(struct task_struct *t, int force_default)
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
415 sigemptyset(&ka->sa.sa_mask);
420 EXPORT_SYMBOL_GPL(flush_signal_handlers);
422 /* Notify the system that a driver wants to block all signals for this
423 * process, and wants to be notified if any signals at all were to be
424 * sent/acted upon. If the notifier routine returns non-zero, then the
425 * signal will be acted upon after all. If the notifier routine returns 0,
426 * then then signal will be blocked. Only one block per process is
427 * allowed. priv is a pointer to private data that the notifier routine
428 * can use to determine if the signal should be blocked or not. */
431 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
435 spin_lock_irqsave(¤t->sighand->siglock, flags);
436 current->notifier_mask = mask;
437 current->notifier_data = priv;
438 current->notifier = notifier;
439 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
442 /* Notify the system that blocking has ended. */
445 unblock_all_signals(void)
449 spin_lock_irqsave(¤t->sighand->siglock, flags);
450 current->notifier = NULL;
451 current->notifier_data = NULL;
453 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
456 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
458 struct sigqueue *q, *first = 0;
459 int still_pending = 0;
461 if (unlikely(!sigismember(&list->signal, sig)))
465 * Collect the siginfo appropriate to this signal. Check if
466 * there is another siginfo for the same signal.
468 list_for_each_entry(q, &list->list, list) {
469 if (q->info.si_signo == sig) {
478 list_del_init(&first->list);
479 copy_siginfo(info, &first->info);
480 __sigqueue_free(first);
482 sigdelset(&list->signal, sig);
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
489 sigdelset(&list->signal, sig);
490 info->si_signo = sig;
499 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
504 sig = next_signal(pending, mask);
506 if (current->notifier) {
507 if (sigismember(current->notifier_mask, sig)) {
508 if (!(current->notifier)(current->notifier_data)) {
509 clear_thread_flag(TIF_SIGPENDING);
515 if (!collect_signal(sig, pending, info))
525 * Dequeue a signal and return the element to the caller, which is
526 * expected to free it.
528 * All callers have to hold the siglock.
530 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
532 int signr = __dequeue_signal(&tsk->pending, mask, info);
534 signr = __dequeue_signal(&tsk->signal->shared_pending,
537 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
538 info->si_sys_private){
539 do_schedule_next_timer(info);
545 * Tell a process that it has a new active signal..
547 * NOTE! we rely on the previous spin_lock to
548 * lock interrupts for us! We can only be called with
549 * "siglock" held, and the local interrupt must
550 * have been disabled when that got acquired!
552 * No need to set need_resched since signal event passing
553 * goes through ->blocked
555 void signal_wake_up(struct task_struct *t, int resume)
559 set_tsk_thread_flag(t, TIF_SIGPENDING);
562 * If resume is set, we want to wake it up in the TASK_STOPPED case.
563 * We don't check for TASK_STOPPED because there is a race with it
564 * executing another processor and just now entering stopped state.
565 * By calling wake_up_process any time resume is set, we ensure
566 * the process will wake up and handle its stop or death signal.
568 mask = TASK_INTERRUPTIBLE;
570 mask |= TASK_STOPPED;
571 if (!wake_up_state(t, mask))
576 * Remove signals in mask from the pending set and queue.
577 * Returns 1 if any signals were found.
579 * All callers must be holding the siglock.
581 static int rm_from_queue(unsigned long mask, struct sigpending *s)
583 struct sigqueue *q, *n;
585 if (!sigtestsetmask(&s->signal, mask))
588 sigdelsetmask(&s->signal, mask);
589 list_for_each_entry_safe(q, n, &s->list, list) {
590 if (q->info.si_signo < SIGRTMIN &&
591 (mask & sigmask(q->info.si_signo))) {
592 list_del_init(&q->list);
600 * Bad permissions for sending the signal
602 static int check_kill_permission(int sig, struct siginfo *info,
603 struct task_struct *t)
606 if (sig < 0 || sig > _NSIG)
609 if ((!info || ((unsigned long)info != 1 &&
610 (unsigned long)info != 2 && SI_FROMUSER(info)))
611 && ((sig != SIGCONT) ||
612 (current->signal->session != t->signal->session))
613 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
614 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
615 && !capable(CAP_KILL))
617 return security_task_kill(t, info, sig);
621 static void do_notify_parent_cldstop(struct task_struct *tsk,
622 struct task_struct *parent);
625 * Handle magic process-wide effects of stop/continue signals.
626 * Unlike the signal actions, these happen immediately at signal-generation
627 * time regardless of blocking, ignoring, or handling. This does the
628 * actual continuing for SIGCONT, but not the actual stopping for stop
629 * signals. The process stop is done as a signal action for SIG_DFL.
631 static void handle_stop_signal(int sig, struct task_struct *p)
633 struct task_struct *t;
635 if (sig_kernel_stop(sig)) {
637 * This is a stop signal. Remove SIGCONT from all queues.
639 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
642 rm_from_queue(sigmask(SIGCONT), &t->pending);
645 } else if (sig == SIGCONT) {
647 * Remove all stop signals from all queues,
648 * and wake all threads.
650 if (unlikely(p->signal->group_stop_count > 0)) {
652 * There was a group stop in progress. We'll
653 * pretend it finished before we got here. We are
654 * obliged to report it to the parent: if the
655 * SIGSTOP happened "after" this SIGCONT, then it
656 * would have cleared this pending SIGCONT. If it
657 * happened "before" this SIGCONT, then the parent
658 * got the SIGCHLD about the stop finishing before
659 * the continue happened. We do the notification
660 * now, and it's as if the stop had finished and
661 * the SIGCHLD was pending on entry to this kill.
663 p->signal->group_stop_count = 0;
664 if (p->ptrace & PT_PTRACED)
665 do_notify_parent_cldstop(p, p->parent);
667 do_notify_parent_cldstop(
669 p->group_leader->real_parent);
671 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
675 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
678 * If there is a handler for SIGCONT, we must make
679 * sure that no thread returns to user mode before
680 * we post the signal, in case it was the only
681 * thread eligible to run the signal handler--then
682 * it must not do anything between resuming and
683 * running the handler. With the TIF_SIGPENDING
684 * flag set, the thread will pause and acquire the
685 * siglock that we hold now and until we've queued
686 * the pending signal.
688 * Wake up the stopped thread _after_ setting
691 state = TASK_STOPPED;
692 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
693 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 state |= TASK_INTERRUPTIBLE;
696 wake_up_state(t, state);
703 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
705 struct sigqueue * q = NULL;
709 * fast-pathed signals for kernel-internal things like SIGSTOP
712 if ((unsigned long)info == 2)
715 /* Real-time signals must be queued if sent by sigqueue, or
716 some other real-time mechanism. It is implementation
717 defined whether kill() does so. We attempt to do so, on
718 the principle of least surprise, but since kill is not
719 allowed to fail with EAGAIN when low on memory we just
720 make sure at least one signal gets delivered and don't
721 pass on the info struct. */
723 if (atomic_read(&nr_queued_signals) < max_queued_signals)
724 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
727 atomic_inc(&nr_queued_signals);
729 list_add_tail(&q->list, &signals->list);
730 switch ((unsigned long) info) {
732 q->info.si_signo = sig;
733 q->info.si_errno = 0;
734 q->info.si_code = SI_USER;
735 q->info.si_pid = current->pid;
736 q->info.si_uid = current->uid;
739 q->info.si_signo = sig;
740 q->info.si_errno = 0;
741 q->info.si_code = SI_KERNEL;
746 copy_siginfo(&q->info, info);
750 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
751 && info->si_code != SI_USER)
753 * Queue overflow, abort. We may abort if the signal was rt
754 * and sent by user using something other than kill().
757 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
759 * Set up a return to indicate that we dropped
762 ret = info->si_sys_private;
766 sigaddset(&signals->signal, sig);
770 #define LEGACY_QUEUE(sigptr, sig) \
771 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
775 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
779 if (!irqs_disabled())
782 if (!spin_is_locked(&t->sighand->siglock))
786 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
788 * Set up a return to indicate that we dropped the signal.
790 ret = info->si_sys_private;
792 /* Short-circuit ignored signals. */
793 if (sig_ignored(t, sig))
796 /* Support queueing exactly one non-rt signal, so that we
797 can get more detailed information about the cause of
799 if (LEGACY_QUEUE(&t->pending, sig))
802 ret = send_signal(sig, info, &t->pending);
803 if (!ret && !sigismember(&t->blocked, sig))
804 signal_wake_up(t, sig == SIGKILL);
810 * Force a signal that the process can't ignore: if necessary
811 * we unblock the signal and change any SIG_IGN to SIG_DFL.
815 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
817 unsigned long int flags;
820 spin_lock_irqsave(&t->sighand->siglock, flags);
821 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
822 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
823 sigdelset(&t->blocked, sig);
824 recalc_sigpending_tsk(t);
826 ret = specific_send_sig_info(sig, info, t);
827 spin_unlock_irqrestore(&t->sighand->siglock, flags);
833 force_sig_specific(int sig, struct task_struct *t)
835 unsigned long int flags;
837 spin_lock_irqsave(&t->sighand->siglock, flags);
838 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
839 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
840 sigdelset(&t->blocked, sig);
841 recalc_sigpending_tsk(t);
842 specific_send_sig_info(sig, (void *)2, t);
843 spin_unlock_irqrestore(&t->sighand->siglock, flags);
847 * Test if P wants to take SIG. After we've checked all threads with this,
848 * it's equivalent to finding no threads not blocking SIG. Any threads not
849 * blocking SIG were ruled out because they are not running and already
850 * have pending signals. Such threads will dequeue from the shared queue
851 * as soon as they're available, so putting the signal on the shared queue
852 * will be equivalent to sending it to one such thread.
854 #define wants_signal(sig, p, mask) \
855 (!sigismember(&(p)->blocked, sig) \
856 && !((p)->state & mask) \
857 && !((p)->flags & PF_EXITING) \
858 && (task_curr(p) || !signal_pending(p)))
862 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
864 struct task_struct *t;
867 * Now find a thread we can wake up to take the signal off the queue.
869 * If the main thread wants the signal, it gets first crack.
870 * Probably the least surprising to the average bear.
872 if (wants_signal(sig, p, mask))
874 else if (thread_group_empty(p))
876 * There is just one thread and it does not need to be woken.
877 * It will dequeue unblocked signals before it runs again.
882 * Otherwise try to find a suitable thread.
884 t = p->signal->curr_target;
886 /* restart balancing at this thread */
887 t = p->signal->curr_target = p;
888 BUG_ON(t->tgid != p->tgid);
890 while (!wants_signal(sig, t, mask)) {
892 if (t == p->signal->curr_target)
894 * No thread needs to be woken.
895 * Any eligible threads will see
896 * the signal in the queue soon.
900 p->signal->curr_target = t;
904 * Found a killable thread. If the signal will be fatal,
905 * then start taking the whole group down immediately.
907 if (sig_fatal(p, sig) && !p->signal->group_exit &&
908 !sigismember(&t->real_blocked, sig) &&
909 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
911 * This signal will be fatal to the whole group.
913 if (!sig_kernel_coredump(sig)) {
915 * Start a group exit and wake everybody up.
916 * This way we don't have other threads
917 * running and doing things after a slower
918 * thread has the fatal signal pending.
920 p->signal->group_exit = 1;
921 p->signal->group_exit_code = sig;
922 p->signal->group_stop_count = 0;
925 sigaddset(&t->pending.signal, SIGKILL);
926 signal_wake_up(t, 1);
933 * There will be a core dump. We make all threads other
934 * than the chosen one go into a group stop so that nothing
935 * happens until it gets scheduled, takes the signal off
936 * the shared queue, and does the core dump. This is a
937 * little more complicated than strictly necessary, but it
938 * keeps the signal state that winds up in the core dump
939 * unchanged from the death state, e.g. which thread had
940 * the core-dump signal unblocked.
942 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
943 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
944 p->signal->group_stop_count = 0;
945 p->signal->group_exit_task = t;
948 p->signal->group_stop_count++;
949 signal_wake_up(t, 0);
952 wake_up_process(p->signal->group_exit_task);
957 * The signal is already in the shared-pending queue.
958 * Tell the chosen thread to wake up and dequeue it.
960 signal_wake_up(t, sig == SIGKILL);
965 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
971 if (!spin_is_locked(&p->sighand->siglock))
974 handle_stop_signal(sig, p);
976 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
978 * Set up a return to indicate that we dropped the signal.
980 ret = info->si_sys_private;
982 /* Short-circuit ignored signals. */
983 if (sig_ignored(p, sig))
986 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
987 /* This is a non-RT signal and we already have one queued. */
991 * Don't bother zombies and stopped tasks (but
992 * SIGKILL will punch through stopped state)
994 mask = TASK_DEAD | TASK_ZOMBIE;
996 mask |= TASK_STOPPED;
999 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1000 * We always use the shared queue for process-wide signals,
1001 * to avoid several races.
1003 ret = send_signal(sig, info, &p->signal->shared_pending);
1007 __group_complete_signal(sig, p, mask);
1012 * Nuke all other threads in the group.
1014 void zap_other_threads(struct task_struct *p)
1016 struct task_struct *t;
1018 p->signal->group_stop_count = 0;
1020 if (thread_group_empty(p))
1023 for (t = next_thread(p); t != p; t = next_thread(t)) {
1025 * Don't bother with already dead threads
1027 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1031 * We don't want to notify the parent, since we are
1032 * killed as part of a thread group due to another
1033 * thread doing an execve() or similar. So set the
1034 * exit signal to -1 to allow immediate reaping of
1035 * the process. But don't detach the thread group
1038 if (t != p->group_leader)
1039 t->exit_signal = -1;
1041 sigaddset(&t->pending.signal, SIGKILL);
1042 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1043 signal_wake_up(t, 1);
1048 * Must be called with the tasklist_lock held for reading!
1050 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1052 unsigned long flags;
1055 if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
1058 ret = check_kill_permission(sig, info, p);
1059 if (!ret && sig && p->sighand) {
1060 spin_lock_irqsave(&p->sighand->siglock, flags);
1061 ret = __group_send_sig_info(sig, info, p);
1062 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1069 * kill_pg_info() sends a signal to a process group: this is what the tty
1070 * control characters do (^C, ^Z etc)
1073 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1075 struct task_struct *p;
1076 struct list_head *l;
1086 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1090 err = group_send_sig_info(sig, info, p);
1094 return found ? retval : -ESRCH;
1098 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1102 read_lock(&tasklist_lock);
1103 retval = __kill_pg_info(sig, info, pgrp);
1104 read_unlock(&tasklist_lock);
1110 * kill_sl_info() sends a signal to the session leader: this is used
1111 * to send SIGHUP to the controlling process of a terminal when
1112 * the connection is lost.
1117 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1119 int err, retval = -EINVAL;
1121 struct list_head *l;
1122 struct task_struct *p;
1128 read_lock(&tasklist_lock);
1129 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1130 if (!p->signal->leader)
1132 err = group_send_sig_info(sig, info, p);
1136 read_unlock(&tasklist_lock);
1142 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1145 struct task_struct *p;
1147 read_lock(&tasklist_lock);
1148 p = find_task_by_pid(pid);
1151 error = group_send_sig_info(sig, info, p);
1152 read_unlock(&tasklist_lock);
1158 * kill_something_info() interprets pid in interesting ways just like kill(2).
1160 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1161 * is probably wrong. Should make it like BSD or SYSV.
1164 static int kill_something_info(int sig, struct siginfo *info, int pid)
1167 return kill_pg_info(sig, info, process_group(current));
1168 } else if (pid == -1) {
1169 int retval = 0, count = 0;
1170 struct task_struct * p;
1172 read_lock(&tasklist_lock);
1173 for_each_process(p) {
1174 if (p->pid > 1 && p->tgid != current->tgid) {
1175 int err = group_send_sig_info(sig, info, p);
1181 read_unlock(&tasklist_lock);
1182 return count ? retval : -ESRCH;
1183 } else if (pid < 0) {
1184 return kill_pg_info(sig, info, -pid);
1186 return kill_proc_info(sig, info, pid);
1191 * These are for backward compatibility with the rest of the kernel source.
1195 * These two are the most common entry points. They send a signal
1196 * just to the specific thread.
1199 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1202 unsigned long flags;
1205 * We need the tasklist lock even for the specific
1206 * thread case (when we don't need to follow the group
1207 * lists) in order to avoid races with "p->sighand"
1208 * going away or changing from under us.
1210 read_lock(&tasklist_lock);
1211 spin_lock_irqsave(&p->sighand->siglock, flags);
1212 ret = specific_send_sig_info(sig, info, p);
1213 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1214 read_unlock(&tasklist_lock);
1219 send_sig(int sig, struct task_struct *p, int priv)
1221 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1225 * This is the entry point for "process-wide" signals.
1226 * They will go to an appropriate thread in the thread group.
1229 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1232 read_lock(&tasklist_lock);
1233 ret = group_send_sig_info(sig, info, p);
1234 read_unlock(&tasklist_lock);
1239 force_sig(int sig, struct task_struct *p)
1241 force_sig_info(sig, (void*)1L, p);
1245 kill_pg(pid_t pgrp, int sig, int priv)
1247 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1251 kill_sl(pid_t sess, int sig, int priv)
1253 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1257 kill_proc(pid_t pid, int sig, int priv)
1259 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1263 * These functions support sending signals using preallocated sigqueue
1264 * structures. This is needed "because realtime applications cannot
1265 * afford to lose notifications of asynchronous events, like timer
1266 * expirations or I/O completions". In the case of Posix Timers
1267 * we allocate the sigqueue structure from the timer_create. If this
1268 * allocation fails we are able to report the failure to the application
1269 * with an EAGAIN error.
1272 struct sigqueue *sigqueue_alloc(void)
1276 if ((q = __sigqueue_alloc()))
1277 q->flags |= SIGQUEUE_PREALLOC;
1281 void sigqueue_free(struct sigqueue *q)
1283 unsigned long flags;
1284 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1286 * If the signal is still pending remove it from the
1289 if (unlikely(!list_empty(&q->list))) {
1290 read_lock(&tasklist_lock);
1291 spin_lock_irqsave(q->lock, flags);
1292 if (!list_empty(&q->list))
1293 list_del_init(&q->list);
1294 spin_unlock_irqrestore(q->lock, flags);
1295 read_unlock(&tasklist_lock);
1297 q->flags &= ~SIGQUEUE_PREALLOC;
1302 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1304 unsigned long flags;
1308 * We need the tasklist lock even for the specific
1309 * thread case (when we don't need to follow the group
1310 * lists) in order to avoid races with "p->sighand"
1311 * going away or changing from under us.
1313 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1314 read_lock(&tasklist_lock);
1315 spin_lock_irqsave(&p->sighand->siglock, flags);
1317 if (unlikely(!list_empty(&q->list))) {
1319 * If an SI_TIMER entry is already queue just increment
1320 * the overrun count.
1322 if (q->info.si_code != SI_TIMER)
1324 q->info.si_overrun++;
1327 /* Short-circuit ignored signals. */
1328 if (sig_ignored(p, sig)) {
1333 q->lock = &p->sighand->siglock;
1334 list_add_tail(&q->list, &p->pending.list);
1335 sigaddset(&p->pending.signal, sig);
1336 if (!sigismember(&p->blocked, sig))
1337 signal_wake_up(p, sig == SIGKILL);
1340 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1341 read_unlock(&tasklist_lock);
1346 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1348 unsigned long flags;
1352 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1353 read_lock(&tasklist_lock);
1354 spin_lock_irqsave(&p->sighand->siglock, flags);
1355 handle_stop_signal(sig, p);
1357 /* Short-circuit ignored signals. */
1358 if (sig_ignored(p, sig)) {
1363 if (unlikely(!list_empty(&q->list))) {
1365 * If an SI_TIMER entry is already queue just increment
1366 * the overrun count. Other uses should not try to
1367 * send the signal multiple times.
1369 if (q->info.si_code != SI_TIMER)
1371 q->info.si_overrun++;
1375 * Don't bother zombies and stopped tasks (but
1376 * SIGKILL will punch through stopped state)
1378 mask = TASK_DEAD | TASK_ZOMBIE;
1380 mask |= TASK_STOPPED;
1383 * Put this signal on the shared-pending queue.
1384 * We always use the shared queue for process-wide signals,
1385 * to avoid several races.
1387 q->lock = &p->sighand->siglock;
1388 list_add_tail(&q->list, &p->signal->shared_pending.list);
1389 sigaddset(&p->signal->shared_pending.signal, sig);
1391 __group_complete_signal(sig, p, mask);
1393 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1394 read_unlock(&tasklist_lock);
1399 * Joy. Or not. Pthread wants us to wake up every thread
1400 * in our parent group.
1402 static void __wake_up_parent(struct task_struct *p,
1403 struct task_struct *parent)
1405 struct task_struct *tsk = parent;
1408 * Fortunately this is not necessary for thread groups:
1410 if (p->tgid == tsk->tgid) {
1411 wake_up_interruptible_sync(&tsk->wait_chldexit);
1416 wake_up_interruptible_sync(&tsk->wait_chldexit);
1417 tsk = next_thread(tsk);
1418 if (tsk->signal != parent->signal)
1420 } while (tsk != parent);
1424 * Let a parent know about a status change of a child.
1427 void do_notify_parent(struct task_struct *tsk, int sig)
1429 struct siginfo info;
1430 unsigned long flags;
1432 struct sighand_struct *psig;
1437 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1438 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1440 info.si_signo = sig;
1442 info.si_pid = tsk->pid;
1443 info.si_uid = tsk->uid;
1445 /* FIXME: find out whether or not this is supposed to be c*time. */
1446 info.si_utime = tsk->utime;
1447 info.si_stime = tsk->stime;
1449 status = tsk->exit_code & 0x7f;
1450 why = SI_KERNEL; /* shouldn't happen */
1451 switch (tsk->state) {
1453 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1454 if (tsk->ptrace & PT_PTRACED)
1461 if (tsk->exit_code & 0x80)
1463 else if (tsk->exit_code & 0x7f)
1467 status = tsk->exit_code >> 8;
1472 info.si_status = status;
1474 psig = tsk->parent->sighand;
1475 spin_lock_irqsave(&psig->siglock, flags);
1476 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1477 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1478 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1480 * We are exiting and our parent doesn't care. POSIX.1
1481 * defines special semantics for setting SIGCHLD to SIG_IGN
1482 * or setting the SA_NOCLDWAIT flag: we should be reaped
1483 * automatically and not left for our parent's wait4 call.
1484 * Rather than having the parent do it as a magic kind of
1485 * signal handler, we just set this to tell do_exit that we
1486 * can be cleaned up without becoming a zombie. Note that
1487 * we still call __wake_up_parent in this case, because a
1488 * blocked sys_wait4 might now return -ECHILD.
1490 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1491 * is implementation-defined: we do (if you don't want
1492 * it, just use SIG_IGN instead).
1494 tsk->exit_signal = -1;
1495 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1498 if (sig > 0 && sig <= _NSIG)
1499 __group_send_sig_info(sig, &info, tsk->parent);
1500 __wake_up_parent(tsk, tsk->parent);
1501 spin_unlock_irqrestore(&psig->siglock, flags);
1506 * We need the tasklist lock because it's the only
1507 * thing that protects out "parent" pointer.
1509 * exit.c calls "do_notify_parent()" directly, because
1510 * it already has the tasklist lock.
1513 notify_parent(struct task_struct *tsk, int sig)
1516 read_lock(&tasklist_lock);
1517 do_notify_parent(tsk, sig);
1518 read_unlock(&tasklist_lock);
1523 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1525 struct siginfo info;
1526 unsigned long flags;
1527 struct sighand_struct *sighand;
1529 info.si_signo = SIGCHLD;
1531 info.si_pid = tsk->pid;
1532 info.si_uid = tsk->uid;
1534 /* FIXME: find out whether or not this is supposed to be c*time. */
1535 info.si_utime = tsk->utime;
1536 info.si_stime = tsk->stime;
1538 info.si_status = tsk->exit_code & 0x7f;
1539 info.si_code = CLD_STOPPED;
1541 sighand = parent->sighand;
1542 spin_lock_irqsave(&sighand->siglock, flags);
1543 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1544 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1545 __group_send_sig_info(SIGCHLD, &info, parent);
1547 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1549 __wake_up_parent(tsk, parent);
1550 spin_unlock_irqrestore(&sighand->siglock, flags);
1553 int print_fatal_signals = 0;
1555 static void print_fatal_signal(struct pt_regs *regs, int signr)
1559 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1560 current->comm, current->pid, signr);
1563 printk("code at %08lx: ", regs->eip);
1564 for (i = 0; i < 16; i++) {
1565 __get_user(insn, (unsigned char *)(regs->eip + i));
1566 printk("%02x ", insn);
1573 static int __init setup_print_fatal_signals(char *str)
1575 get_option (&str, &print_fatal_signals);
1580 __setup("print-fatal-signals=", setup_print_fatal_signals);
1582 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1585 finish_stop(int stop_count)
1588 * If there are no other threads in the group, or if there is
1589 * a group stop in progress and we are the last to stop,
1590 * report to the parent. When ptraced, every thread reports itself.
1592 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1593 read_lock(&tasklist_lock);
1594 do_notify_parent_cldstop(current, current->parent);
1595 read_unlock(&tasklist_lock);
1597 else if (stop_count == 0) {
1598 read_lock(&tasklist_lock);
1599 do_notify_parent_cldstop(current->group_leader,
1600 current->group_leader->real_parent);
1601 read_unlock(&tasklist_lock);
1606 * Now we don't run again until continued.
1608 current->exit_code = 0;
1612 * This performs the stopping for SIGSTOP and other stop signals.
1613 * We have to stop all threads in the thread group.
1616 do_signal_stop(int signr)
1618 struct signal_struct *sig = current->signal;
1619 struct sighand_struct *sighand = current->sighand;
1620 int stop_count = -1;
1622 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1624 if (sig->group_stop_count > 0) {
1626 * There is a group stop in progress. We don't need to
1627 * start another one.
1629 signr = sig->group_exit_code;
1630 stop_count = --sig->group_stop_count;
1631 current->exit_code = signr;
1632 set_current_state(TASK_STOPPED);
1633 spin_unlock_irq(&sighand->siglock);
1635 else if (thread_group_empty(current)) {
1637 * Lock must be held through transition to stopped state.
1639 current->exit_code = signr;
1640 set_current_state(TASK_STOPPED);
1641 spin_unlock_irq(&sighand->siglock);
1645 * There is no group stop already in progress.
1646 * We must initiate one now, but that requires
1647 * dropping siglock to get both the tasklist lock
1648 * and siglock again in the proper order. Note that
1649 * this allows an intervening SIGCONT to be posted.
1650 * We need to check for that and bail out if necessary.
1652 struct task_struct *t;
1654 spin_unlock_irq(&sighand->siglock);
1656 /* signals can be posted during this window */
1658 read_lock(&tasklist_lock);
1659 spin_lock_irq(&sighand->siglock);
1661 if (unlikely(sig->group_exit)) {
1663 * There is a group exit in progress now.
1664 * We'll just ignore the stop and process the
1665 * associated fatal signal.
1667 spin_unlock_irq(&sighand->siglock);
1668 read_unlock(&tasklist_lock);
1672 if (unlikely(sig_avoid_stop_race())) {
1674 * Either a SIGCONT or a SIGKILL signal was
1675 * posted in the siglock-not-held window.
1677 spin_unlock_irq(&sighand->siglock);
1678 read_unlock(&tasklist_lock);
1682 if (sig->group_stop_count == 0) {
1683 sig->group_exit_code = signr;
1685 for (t = next_thread(current); t != current;
1688 * Setting state to TASK_STOPPED for a group
1689 * stop is always done with the siglock held,
1690 * so this check has no races.
1692 if (t->state < TASK_STOPPED) {
1694 signal_wake_up(t, 0);
1696 sig->group_stop_count = stop_count;
1699 /* A race with another thread while unlocked. */
1700 signr = sig->group_exit_code;
1701 stop_count = --sig->group_stop_count;
1704 current->exit_code = signr;
1705 set_current_state(TASK_STOPPED);
1707 spin_unlock_irq(&sighand->siglock);
1708 read_unlock(&tasklist_lock);
1711 finish_stop(stop_count);
1715 * Do appropriate magic when group_stop_count > 0.
1716 * We return nonzero if we stopped, after releasing the siglock.
1717 * We return zero if we still hold the siglock and should look
1718 * for another signal without checking group_stop_count again.
1720 static inline int handle_group_stop(void)
1724 if (current->signal->group_exit_task == current) {
1726 * Group stop is so we can do a core dump,
1727 * We are the initiating thread, so get on with it.
1729 current->signal->group_exit_task = NULL;
1733 if (current->signal->group_exit)
1735 * Group stop is so another thread can do a core dump,
1736 * or else we are racing against a death signal.
1737 * Just punt the stop so we can get the next signal.
1742 * There is a group stop in progress. We stop
1743 * without any associated signal being in our queue.
1745 stop_count = --current->signal->group_stop_count;
1746 current->exit_code = current->signal->group_exit_code;
1747 set_current_state(TASK_STOPPED);
1748 spin_unlock_irq(¤t->sighand->siglock);
1749 finish_stop(stop_count);
1753 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1755 sigset_t *mask = ¤t->blocked;
1759 spin_lock_irq(¤t->sighand->siglock);
1761 struct k_sigaction *ka;
1763 if (unlikely(current->signal->group_stop_count > 0) &&
1764 handle_group_stop())
1767 signr = dequeue_signal(current, mask, info);
1770 break; /* will return 0 */
1772 if ((signr == SIGSEGV) && print_fatal_signals) {
1773 spin_unlock_irq(¤t->sighand->siglock);
1774 print_fatal_signal(regs, signr);
1775 spin_lock_irq(¤t->sighand->siglock);
1777 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1778 ptrace_signal_deliver(regs, cookie);
1781 * If there is a group stop in progress,
1782 * we must participate in the bookkeeping.
1784 if (current->signal->group_stop_count > 0)
1785 --current->signal->group_stop_count;
1787 /* Let the debugger run. */
1788 current->exit_code = signr;
1789 current->last_siginfo = info;
1790 set_current_state(TASK_STOPPED);
1791 spin_unlock_irq(¤t->sighand->siglock);
1792 notify_parent(current, SIGCHLD);
1795 current->last_siginfo = NULL;
1797 /* We're back. Did the debugger cancel the sig? */
1798 spin_lock_irq(¤t->sighand->siglock);
1799 signr = current->exit_code;
1803 current->exit_code = 0;
1805 /* Update the siginfo structure if the signal has
1806 changed. If the debugger wanted something
1807 specific in the siginfo structure then it should
1808 have updated *info via PTRACE_SETSIGINFO. */
1809 if (signr != info->si_signo) {
1810 info->si_signo = signr;
1812 info->si_code = SI_USER;
1813 info->si_pid = current->parent->pid;
1814 info->si_uid = current->parent->uid;
1817 /* If the (new) signal is now blocked, requeue it. */
1818 if (sigismember(¤t->blocked, signr)) {
1819 specific_send_sig_info(signr, info, current);
1824 ka = ¤t->sighand->action[signr-1];
1825 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1827 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1828 break; /* will return non-zero "signr" value */
1831 * Now we are doing the default action for this signal.
1833 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1836 /* Init gets no signals it doesn't want. */
1837 if (current->pid == 1)
1840 if (sig_kernel_stop(signr)) {
1842 * The default action is to stop all threads in
1843 * the thread group. The job control signals
1844 * do nothing in an orphaned pgrp, but SIGSTOP
1845 * always works. Note that siglock needs to be
1846 * dropped during the call to is_orphaned_pgrp()
1847 * because of lock ordering with tasklist_lock.
1848 * This allows an intervening SIGCONT to be posted.
1849 * We need to check for that and bail out if necessary.
1851 if (signr == SIGSTOP) {
1852 do_signal_stop(signr); /* releases siglock */
1855 spin_unlock_irq(¤t->sighand->siglock);
1857 /* signals can be posted during this window */
1859 if (is_orphaned_pgrp(process_group(current)))
1862 spin_lock_irq(¤t->sighand->siglock);
1863 if (unlikely(sig_avoid_stop_race())) {
1865 * Either a SIGCONT or a SIGKILL signal was
1866 * posted in the siglock-not-held window.
1871 do_signal_stop(signr); /* releases siglock */
1875 spin_unlock_irq(¤t->sighand->siglock);
1878 * Anything else is fatal, maybe with a core dump.
1880 current->flags |= PF_SIGNALED;
1881 if (print_fatal_signals)
1882 print_fatal_signal(regs, signr);
1883 if (sig_kernel_coredump(signr) &&
1884 do_coredump((long)signr, signr, regs)) {
1886 * That killed all other threads in the group and
1887 * synchronized with their demise, so there can't
1888 * be any more left to kill now. The group_exit
1889 * flags are set by do_coredump. Note that
1890 * thread_group_empty won't always be true yet,
1891 * because those threads were blocked in __exit_mm
1892 * and we just let them go to finish dying.
1894 const int code = signr | 0x80;
1895 BUG_ON(!current->signal->group_exit);
1896 BUG_ON(current->signal->group_exit_code != code);
1902 * Death signals, no core dump.
1904 do_group_exit(signr);
1907 spin_unlock_irq(¤t->sighand->siglock);
1913 EXPORT_SYMBOL(recalc_sigpending);
1914 EXPORT_SYMBOL_GPL(dequeue_signal);
1915 EXPORT_SYMBOL(flush_signals);
1916 EXPORT_SYMBOL(force_sig);
1917 EXPORT_SYMBOL(force_sig_info);
1918 EXPORT_SYMBOL(kill_pg);
1919 EXPORT_SYMBOL(kill_pg_info);
1920 EXPORT_SYMBOL(kill_proc);
1921 EXPORT_SYMBOL(kill_proc_info);
1922 EXPORT_SYMBOL(kill_sl);
1923 EXPORT_SYMBOL(kill_sl_info);
1924 EXPORT_SYMBOL(notify_parent);
1925 EXPORT_SYMBOL(send_sig);
1926 EXPORT_SYMBOL(send_sig_info);
1927 EXPORT_SYMBOL(send_group_sig_info);
1928 EXPORT_SYMBOL(sigqueue_alloc);
1929 EXPORT_SYMBOL(sigqueue_free);
1930 EXPORT_SYMBOL(send_sigqueue);
1931 EXPORT_SYMBOL(send_group_sigqueue);
1932 EXPORT_SYMBOL(sigprocmask);
1933 EXPORT_SYMBOL(block_all_signals);
1934 EXPORT_SYMBOL(unblock_all_signals);
1938 * System call entry points.
1941 asmlinkage long sys_restart_syscall(void)
1943 struct restart_block *restart = ¤t_thread_info()->restart_block;
1944 return restart->fn(restart);
1947 long do_no_restart_syscall(struct restart_block *param)
1953 * We don't need to get the kernel lock - this is all local to this
1954 * particular thread.. (and that's good, because this is _heavily_
1955 * used by various programs)
1959 * This is also useful for kernel threads that want to temporarily
1960 * (or permanently) block certain signals.
1962 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1963 * interface happily blocks "unblockable" signals like SIGKILL
1966 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1971 spin_lock_irq(¤t->sighand->siglock);
1972 old_block = current->blocked;
1976 sigorsets(¤t->blocked, ¤t->blocked, set);
1979 signandsets(¤t->blocked, ¤t->blocked, set);
1982 current->blocked = *set;
1987 recalc_sigpending();
1988 spin_unlock_irq(¤t->sighand->siglock);
1990 *oldset = old_block;
1995 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1997 int error = -EINVAL;
1998 sigset_t old_set, new_set;
2000 /* XXX: Don't preclude handling different sized sigset_t's. */
2001 if (sigsetsize != sizeof(sigset_t))
2006 if (copy_from_user(&new_set, set, sizeof(*set)))
2008 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2010 error = sigprocmask(how, &new_set, &old_set);
2016 spin_lock_irq(¤t->sighand->siglock);
2017 old_set = current->blocked;
2018 spin_unlock_irq(¤t->sighand->siglock);
2022 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2030 long do_sigpending(void __user *set, unsigned long sigsetsize)
2032 long error = -EINVAL;
2035 if (sigsetsize > sizeof(sigset_t))
2038 spin_lock_irq(¤t->sighand->siglock);
2039 sigorsets(&pending, ¤t->pending.signal,
2040 ¤t->signal->shared_pending.signal);
2041 spin_unlock_irq(¤t->sighand->siglock);
2043 /* Outside the lock because only this thread touches it. */
2044 sigandsets(&pending, ¤t->blocked, &pending);
2047 if (!copy_to_user(set, &pending, sigsetsize))
2055 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2057 return do_sigpending(set, sigsetsize);
2060 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2062 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2066 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2068 if (from->si_code < 0)
2069 return __copy_to_user(to, from, sizeof(siginfo_t))
2072 * If you change siginfo_t structure, please be sure
2073 * this code is fixed accordingly.
2074 * It should never copy any pad contained in the structure
2075 * to avoid security leaks, but must copy the generic
2076 * 3 ints plus the relevant union member.
2078 err = __put_user(from->si_signo, &to->si_signo);
2079 err |= __put_user(from->si_errno, &to->si_errno);
2080 err |= __put_user((short)from->si_code, &to->si_code);
2081 switch (from->si_code & __SI_MASK) {
2083 err |= __put_user(from->si_pid, &to->si_pid);
2084 err |= __put_user(from->si_uid, &to->si_uid);
2087 err |= __put_user(from->si_tid, &to->si_tid);
2088 err |= __put_user(from->si_overrun, &to->si_overrun);
2089 err |= __put_user(from->si_ptr, &to->si_ptr);
2092 err |= __put_user(from->si_band, &to->si_band);
2093 err |= __put_user(from->si_fd, &to->si_fd);
2096 err |= __put_user(from->si_addr, &to->si_addr);
2097 #ifdef __ARCH_SI_TRAPNO
2098 err |= __put_user(from->si_trapno, &to->si_trapno);
2102 err |= __put_user(from->si_pid, &to->si_pid);
2103 err |= __put_user(from->si_uid, &to->si_uid);
2104 err |= __put_user(from->si_status, &to->si_status);
2105 err |= __put_user(from->si_utime, &to->si_utime);
2106 err |= __put_user(from->si_stime, &to->si_stime);
2108 case __SI_RT: /* This is not generated by the kernel as of now. */
2109 case __SI_MESGQ: /* But this is */
2110 err |= __put_user(from->si_pid, &to->si_pid);
2111 err |= __put_user(from->si_uid, &to->si_uid);
2112 err |= __put_user(from->si_ptr, &to->si_ptr);
2114 default: /* this is just in case for now ... */
2115 err |= __put_user(from->si_pid, &to->si_pid);
2116 err |= __put_user(from->si_uid, &to->si_uid);
2125 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2126 siginfo_t __user *uinfo,
2127 const struct timespec __user *uts,
2136 /* XXX: Don't preclude handling different sized sigset_t's. */
2137 if (sigsetsize != sizeof(sigset_t))
2140 if (copy_from_user(&these, uthese, sizeof(these)))
2144 * Invert the set of allowed signals to get those we
2147 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2151 if (copy_from_user(&ts, uts, sizeof(ts)))
2153 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2158 spin_lock_irq(¤t->sighand->siglock);
2159 sig = dequeue_signal(current, &these, &info);
2161 timeout = MAX_SCHEDULE_TIMEOUT;
2163 timeout = (timespec_to_jiffies(&ts)
2164 + (ts.tv_sec || ts.tv_nsec));
2167 /* None ready -- temporarily unblock those we're
2168 * interested while we are sleeping in so that we'll
2169 * be awakened when they arrive. */
2170 current->real_blocked = current->blocked;
2171 sigandsets(¤t->blocked, ¤t->blocked, &these);
2172 recalc_sigpending();
2173 spin_unlock_irq(¤t->sighand->siglock);
2175 current->state = TASK_INTERRUPTIBLE;
2176 timeout = schedule_timeout(timeout);
2178 spin_lock_irq(¤t->sighand->siglock);
2179 sig = dequeue_signal(current, &these, &info);
2180 current->blocked = current->real_blocked;
2181 siginitset(¤t->real_blocked, 0);
2182 recalc_sigpending();
2185 spin_unlock_irq(¤t->sighand->siglock);
2190 if (copy_siginfo_to_user(uinfo, &info))
2203 sys_kill(int pid, int sig)
2205 struct siginfo info;
2207 info.si_signo = sig;
2209 info.si_code = SI_USER;
2210 info.si_pid = current->tgid;
2211 info.si_uid = current->uid;
2213 return kill_something_info(sig, &info, pid);
2217 * sys_tkill - send signal to one specific thread
2218 * @tgid: the thread group ID of the thread
2219 * @pid: the PID of the thread
2220 * @sig: signal to be sent
2222 * This syscall also checks the tgid and returns -ESRCH even if the PID
2223 * exists but it's not belonging to the target process anymore. This
2224 * method solves the problem of threads exiting and PIDs getting reused.
2226 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2228 struct siginfo info;
2230 struct task_struct *p;
2232 /* This is only valid for single tasks */
2233 if (pid <= 0 || tgid <= 0)
2236 info.si_signo = sig;
2238 info.si_code = SI_TKILL;
2239 info.si_pid = current->tgid;
2240 info.si_uid = current->uid;
2242 read_lock(&tasklist_lock);
2243 p = find_task_by_pid(pid);
2245 if (p && (p->tgid == tgid)) {
2246 error = check_kill_permission(sig, &info, p);
2248 * The null signal is a permissions and process existence
2249 * probe. No signal is actually delivered.
2251 if (!error && sig && p->sighand) {
2252 spin_lock_irq(&p->sighand->siglock);
2253 handle_stop_signal(sig, p);
2254 error = specific_send_sig_info(sig, &info, p);
2255 spin_unlock_irq(&p->sighand->siglock);
2258 read_unlock(&tasklist_lock);
2263 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2266 sys_tkill(int pid, int sig)
2268 struct siginfo info;
2270 struct task_struct *p;
2272 /* This is only valid for single tasks */
2276 info.si_signo = sig;
2278 info.si_code = SI_TKILL;
2279 info.si_pid = current->tgid;
2280 info.si_uid = current->uid;
2282 read_lock(&tasklist_lock);
2283 p = find_task_by_pid(pid);
2286 error = check_kill_permission(sig, &info, p);
2288 * The null signal is a permissions and process existence
2289 * probe. No signal is actually delivered.
2291 if (!error && sig && p->sighand) {
2292 spin_lock_irq(&p->sighand->siglock);
2293 handle_stop_signal(sig, p);
2294 error = specific_send_sig_info(sig, &info, p);
2295 spin_unlock_irq(&p->sighand->siglock);
2298 read_unlock(&tasklist_lock);
2303 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2307 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2310 /* Not even root can pretend to send signals from the kernel.
2311 Nor can they impersonate a kill(), which adds source info. */
2312 if (info.si_code >= 0)
2314 info.si_signo = sig;
2316 /* POSIX.1b doesn't mention process groups. */
2317 return kill_proc_info(sig, &info, pid);
2321 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2323 struct k_sigaction *k;
2325 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2328 k = ¤t->sighand->action[sig-1];
2330 spin_lock_irq(¤t->sighand->siglock);
2331 if (signal_pending(current)) {
2333 * If there might be a fatal signal pending on multiple
2334 * threads, make sure we take it before changing the action.
2336 spin_unlock_irq(¤t->sighand->siglock);
2337 return -ERESTARTNOINTR;
2346 * "Setting a signal action to SIG_IGN for a signal that is
2347 * pending shall cause the pending signal to be discarded,
2348 * whether or not it is blocked."
2350 * "Setting a signal action to SIG_DFL for a signal that is
2351 * pending and whose default action is to ignore the signal
2352 * (for example, SIGCHLD), shall cause the pending signal to
2353 * be discarded, whether or not it is blocked"
2355 if (act->sa.sa_handler == SIG_IGN ||
2356 (act->sa.sa_handler == SIG_DFL &&
2357 sig_kernel_ignore(sig))) {
2359 * This is a fairly rare case, so we only take the
2360 * tasklist_lock once we're sure we'll need it.
2361 * Now we must do this little unlock and relock
2362 * dance to maintain the lock hierarchy.
2364 struct task_struct *t = current;
2365 spin_unlock_irq(&t->sighand->siglock);
2366 read_lock(&tasklist_lock);
2367 spin_lock_irq(&t->sighand->siglock);
2369 sigdelsetmask(&k->sa.sa_mask,
2370 sigmask(SIGKILL) | sigmask(SIGSTOP));
2371 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2373 rm_from_queue(sigmask(sig), &t->pending);
2374 recalc_sigpending_tsk(t);
2376 } while (t != current);
2377 spin_unlock_irq(¤t->sighand->siglock);
2378 read_unlock(&tasklist_lock);
2383 sigdelsetmask(&k->sa.sa_mask,
2384 sigmask(SIGKILL) | sigmask(SIGSTOP));
2387 spin_unlock_irq(¤t->sighand->siglock);
2392 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2398 oss.ss_sp = (void *) current->sas_ss_sp;
2399 oss.ss_size = current->sas_ss_size;
2400 oss.ss_flags = sas_ss_flags(sp);
2409 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2410 || __get_user(ss_sp, &uss->ss_sp)
2411 || __get_user(ss_flags, &uss->ss_flags)
2412 || __get_user(ss_size, &uss->ss_size))
2416 if (on_sig_stack(sp))
2422 * Note - this code used to test ss_flags incorrectly
2423 * old code may have been written using ss_flags==0
2424 * to mean ss_flags==SS_ONSTACK (as this was the only
2425 * way that worked) - this fix preserves that older
2428 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2431 if (ss_flags == SS_DISABLE) {
2436 if (ss_size < MINSIGSTKSZ)
2440 current->sas_ss_sp = (unsigned long) ss_sp;
2441 current->sas_ss_size = ss_size;
2446 if (copy_to_user(uoss, &oss, sizeof(oss)))
2455 #ifdef __ARCH_WANT_SYS_SIGPENDING
2458 sys_sigpending(old_sigset_t __user *set)
2460 return do_sigpending(set, sizeof(*set));
2465 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2466 /* Some platforms have their own version with special arguments others
2467 support only sys_rt_sigprocmask. */
2470 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2473 old_sigset_t old_set, new_set;
2477 if (copy_from_user(&new_set, set, sizeof(*set)))
2479 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2481 spin_lock_irq(¤t->sighand->siglock);
2482 old_set = current->blocked.sig[0];
2490 sigaddsetmask(¤t->blocked, new_set);
2493 sigdelsetmask(¤t->blocked, new_set);
2496 current->blocked.sig[0] = new_set;
2500 recalc_sigpending();
2501 spin_unlock_irq(¤t->sighand->siglock);
2507 old_set = current->blocked.sig[0];
2510 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2520 sys_rt_sigaction(int sig,
2521 const struct sigaction __user *act,
2522 struct sigaction __user *oact,
2525 struct k_sigaction new_sa, old_sa;
2528 /* XXX: Don't preclude handling different sized sigset_t's. */
2529 if (sigsetsize != sizeof(sigset_t))
2533 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2537 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2540 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2546 #endif /* __sparc__ */
2549 #ifdef __ARCH_WANT_SYS_SGETMASK
2552 * For backwards compatibility. Functionality superseded by sigprocmask.
2558 return current->blocked.sig[0];
2562 sys_ssetmask(int newmask)
2566 spin_lock_irq(¤t->sighand->siglock);
2567 old = current->blocked.sig[0];
2569 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2571 recalc_sigpending();
2572 spin_unlock_irq(¤t->sighand->siglock);
2576 #endif /* __ARCH_WANT_SGETMASK */
2578 #ifdef __ARCH_WANT_SYS_SIGNAL
2580 * For backwards compatibility. Functionality superseded by sigaction.
2582 asmlinkage unsigned long
2583 sys_signal(int sig, __sighandler_t handler)
2585 struct k_sigaction new_sa, old_sa;
2588 new_sa.sa.sa_handler = handler;
2589 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2591 ret = do_sigaction(sig, &new_sa, &old_sa);
2593 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2595 #endif /* __ARCH_WANT_SYS_SIGNAL */
2597 #ifdef __ARCH_WANT_SYS_PAUSE
2602 current->state = TASK_INTERRUPTIBLE;
2604 return -ERESTARTNOHAND;
2609 void __init signals_init(void)
2612 kmem_cache_create("sigqueue",
2613 sizeof(struct sigqueue),
2614 __alignof__(struct sigqueue),
2615 SLAB_PANIC, NULL, NULL);