2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
36 * In POSIX a signal is sent either to a specific thread (Linux task)
37 * or to the process as a whole (Linux thread group). How the signal
38 * is sent determines whether it's to one thread or the whole group,
39 * which determines which signal mask(s) are involved in blocking it
40 * from being delivered until later. When the signal is delivered,
41 * either it's caught or ignored by a user handler or it has a default
42 * effect that applies to the whole thread group (POSIX process).
44 * The possible effects an unblocked signal set to SIG_DFL can have are:
45 * ignore - Nothing Happens
46 * terminate - kill the process, i.e. all threads in the group,
47 * similar to exit_group. The group leader (only) reports
48 * WIFSIGNALED status to its parent.
49 * coredump - write a core dump file describing all threads using
50 * the same mm and then kill all those threads
51 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
53 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
54 * Other signals when not blocked and set to SIG_DFL behaves as follows.
55 * The job control signals also have other special effects.
57 * +--------------------+------------------+
58 * | POSIX signal | default action |
59 * +--------------------+------------------+
60 * | SIGHUP | terminate |
61 * | SIGINT | terminate |
62 * | SIGQUIT | coredump |
63 * | SIGILL | coredump |
64 * | SIGTRAP | coredump |
65 * | SIGABRT/SIGIOT | coredump |
66 * | SIGBUS | coredump |
67 * | SIGFPE | coredump |
68 * | SIGKILL | terminate(+) |
69 * | SIGUSR1 | terminate |
70 * | SIGSEGV | coredump |
71 * | SIGUSR2 | terminate |
72 * | SIGPIPE | terminate |
73 * | SIGALRM | terminate |
74 * | SIGTERM | terminate |
75 * | SIGCHLD | ignore |
76 * | SIGCONT | ignore(*) |
77 * | SIGSTOP | stop(*)(+) |
78 * | SIGTSTP | stop(*) |
79 * | SIGTTIN | stop(*) |
80 * | SIGTTOU | stop(*) |
82 * | SIGXCPU | coredump |
83 * | SIGXFSZ | coredump |
84 * | SIGVTALRM | terminate |
85 * | SIGPROF | terminate |
86 * | SIGPOLL/SIGIO | terminate |
87 * | SIGSYS/SIGUNUSED | coredump |
88 * | SIGSTKFLT | terminate |
89 * | SIGWINCH | ignore |
90 * | SIGPWR | terminate |
91 * | SIGRTMIN-SIGRTMAX | terminate |
92 * +--------------------+------------------+
93 * | non-POSIX signal | default action |
94 * +--------------------+------------------+
95 * | SIGEMT | coredump |
96 * +--------------------+------------------+
98 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
99 * (*) Special job control effects:
100 * When SIGCONT is sent, it resumes the process (all threads in the group)
101 * from TASK_STOPPED state and also clears any pending/queued stop signals
102 * (any of those marked with "stop(*)"). This happens regardless of blocking,
103 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
104 * any pending/queued SIGCONT signals; this happens regardless of blocking,
105 * catching, or ignored the stop signal, though (except for SIGSTOP) the
106 * default action of stopping the process may happen later or never.
110 #define M_SIGEMT M(SIGEMT)
115 #if SIGRTMIN > BITS_PER_LONG
116 #define M(sig) (1ULL << ((sig)-1))
118 #define M(sig) (1UL << ((sig)-1))
120 #define T(sig, mask) (M(sig) & (mask))
122 #define SIG_KERNEL_ONLY_MASK (\
123 M(SIGKILL) | M(SIGSTOP) )
125 #define SIG_KERNEL_STOP_MASK (\
126 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
128 #define SIG_KERNEL_COREDUMP_MASK (\
129 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
130 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
131 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
133 #define SIG_KERNEL_IGNORE_MASK (\
134 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
136 #define sig_kernel_only(sig) \
137 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
138 #define sig_kernel_coredump(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
140 #define sig_kernel_ignore(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
142 #define sig_kernel_stop(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
145 #define sig_user_defined(t, signr) \
146 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
147 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
149 #define sig_fatal(t, signr) \
150 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
151 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
153 #define sig_avoid_stop_race() \
154 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
155 sigtestsetmask(¤t->signal->shared_pending.signal, \
156 M(SIGCONT) | M(SIGKILL)))
158 static int sig_ignored(struct task_struct *t, int sig)
160 void __user * handler;
163 * Tracers always want to know about signals..
165 if (t->ptrace & PT_PTRACED)
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 return handler == SIG_IGN ||
179 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 * Re-calculate pending state from the set of locally pending
184 * signals, globally pending signals, and blocked signals.
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
191 switch (_NSIG_WORDS) {
193 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 ready |= signal->sig[i] &~ blocked->sig[i];
197 case 4: ready = signal->sig[3] &~ blocked->sig[3];
198 ready |= signal->sig[2] &~ blocked->sig[2];
199 ready |= signal->sig[1] &~ blocked->sig[1];
200 ready |= signal->sig[0] &~ blocked->sig[0];
203 case 2: ready = signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 1: ready = signal->sig[0] &~ blocked->sig[0];
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(void)
267 struct sigqueue *q = NULL;
269 if (atomic_read(¤t->user->sigpending) <
270 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
271 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
273 INIT_LIST_HEAD(&q->list);
276 q->user = get_uid(current->user);
277 atomic_inc(&q->user->sigpending);
282 static inline void __sigqueue_free(struct sigqueue *q)
284 if (q->flags & SIGQUEUE_PREALLOC)
286 atomic_dec(&q->user->sigpending);
288 kmem_cache_free(sigqueue_cachep, q);
291 static void flush_sigqueue(struct sigpending *queue)
295 sigemptyset(&queue->signal);
296 while (!list_empty(&queue->list)) {
297 q = list_entry(queue->list.next, struct sigqueue , list);
298 list_del_init(&q->list);
304 * Flush all pending signals for a task.
308 flush_signals(struct task_struct *t)
312 spin_lock_irqsave(&t->sighand->siglock, flags);
313 clear_tsk_thread_flag(t,TIF_SIGPENDING);
314 flush_sigqueue(&t->pending);
315 flush_sigqueue(&t->signal->shared_pending);
316 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 * This function expects the tasklist_lock write-locked.
322 void __exit_sighand(struct task_struct *tsk)
324 struct sighand_struct * sighand = tsk->sighand;
326 /* Ok, we're done with the signal handlers */
328 if (atomic_dec_and_test(&sighand->count))
329 kmem_cache_free(sighand_cachep, sighand);
332 void exit_sighand(struct task_struct *tsk)
334 write_lock_irq(&tasklist_lock);
336 write_unlock_irq(&tasklist_lock);
340 * This function expects the tasklist_lock write-locked.
342 void __exit_signal(struct task_struct *tsk)
344 struct signal_struct * sig = tsk->signal;
345 struct sighand_struct * sighand = tsk->sighand;
349 if (!atomic_read(&sig->count))
351 spin_lock(&sighand->siglock);
352 if (atomic_dec_and_test(&sig->count)) {
353 if (tsk == sig->curr_target)
354 sig->curr_target = next_thread(tsk);
356 spin_unlock(&sighand->siglock);
357 flush_sigqueue(&sig->shared_pending);
360 * If there is any task waiting for the group exit
363 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
364 wake_up_process(sig->group_exit_task);
365 sig->group_exit_task = NULL;
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
370 spin_unlock(&sighand->siglock);
371 sig = NULL; /* Marker for below. */
373 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
374 flush_sigqueue(&tsk->pending);
377 * We are cleaning up the signal_struct here. We delayed
378 * calling exit_itimers until after flush_sigqueue, just in
379 * case our thread-local pending queue contained a queued
380 * timer signal that would have been cleared in
381 * exit_itimers. When that called sigqueue_free, it would
382 * attempt to re-take the tasklist_lock and deadlock. This
383 * can never happen if we ensure that all queues the
384 * timer's signal might be queued on have been flushed
385 * first. The shared_pending queue, and our own pending
386 * queue are the only queues the timer could be on, since
387 * there are no other threads left in the group and timer
388 * signals are constrained to threads inside the group.
391 kmem_cache_free(signal_cachep, sig);
395 void exit_signal(struct task_struct *tsk)
397 write_lock_irq(&tasklist_lock);
399 write_unlock_irq(&tasklist_lock);
403 * Flush all handlers for a task.
407 flush_signal_handlers(struct task_struct *t, int force_default)
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
415 sigemptyset(&ka->sa.sa_mask);
421 /* Notify the system that a driver wants to block all signals for this
422 * process, and wants to be notified if any signals at all were to be
423 * sent/acted upon. If the notifier routine returns non-zero, then the
424 * signal will be acted upon after all. If the notifier routine returns 0,
425 * then then signal will be blocked. Only one block per process is
426 * allowed. priv is a pointer to private data that the notifier routine
427 * can use to determine if the signal should be blocked or not. */
430 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
434 spin_lock_irqsave(¤t->sighand->siglock, flags);
435 current->notifier_mask = mask;
436 current->notifier_data = priv;
437 current->notifier = notifier;
438 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
441 /* Notify the system that blocking has ended. */
444 unblock_all_signals(void)
448 spin_lock_irqsave(¤t->sighand->siglock, flags);
449 current->notifier = NULL;
450 current->notifier_data = NULL;
452 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
455 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
457 struct sigqueue *q, *first = NULL;
458 int still_pending = 0;
460 if (unlikely(!sigismember(&list->signal, sig)))
464 * Collect the siginfo appropriate to this signal. Check if
465 * there is another siginfo for the same signal.
467 list_for_each_entry(q, &list->list, list) {
468 if (q->info.si_signo == sig) {
477 list_del_init(&first->list);
478 copy_siginfo(info, &first->info);
479 __sigqueue_free(first);
481 sigdelset(&list->signal, sig);
484 /* Ok, it wasn't in the queue. This must be
485 a fast-pathed signal or we must have been
486 out of queue space. So zero out the info.
488 sigdelset(&list->signal, sig);
489 info->si_signo = sig;
498 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
503 sig = next_signal(pending, mask);
505 if (current->notifier) {
506 if (sigismember(current->notifier_mask, sig)) {
507 if (!(current->notifier)(current->notifier_data)) {
508 clear_thread_flag(TIF_SIGPENDING);
514 if (!collect_signal(sig, pending, info))
524 * Dequeue a signal and return the element to the caller, which is
525 * expected to free it.
527 * All callers have to hold the siglock.
529 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
531 int signr = __dequeue_signal(&tsk->pending, mask, info);
533 signr = __dequeue_signal(&tsk->signal->shared_pending,
536 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
537 info->si_sys_private){
538 do_schedule_next_timer(info);
544 * Tell a process that it has a new active signal..
546 * NOTE! we rely on the previous spin_lock to
547 * lock interrupts for us! We can only be called with
548 * "siglock" held, and the local interrupt must
549 * have been disabled when that got acquired!
551 * No need to set need_resched since signal event passing
552 * goes through ->blocked
554 void signal_wake_up(struct task_struct *t, int resume)
558 set_tsk_thread_flag(t, TIF_SIGPENDING);
561 * If resume is set, we want to wake it up in the TASK_STOPPED case.
562 * We don't check for TASK_STOPPED because there is a race with it
563 * executing another processor and just now entering stopped state.
564 * By calling wake_up_process any time resume is set, we ensure
565 * the process will wake up and handle its stop or death signal.
567 mask = TASK_INTERRUPTIBLE;
569 mask |= TASK_STOPPED;
570 if (!wake_up_state(t, mask))
575 * Remove signals in mask from the pending set and queue.
576 * Returns 1 if any signals were found.
578 * All callers must be holding the siglock.
580 static int rm_from_queue(unsigned long mask, struct sigpending *s)
582 struct sigqueue *q, *n;
584 if (!sigtestsetmask(&s->signal, mask))
587 sigdelsetmask(&s->signal, mask);
588 list_for_each_entry_safe(q, n, &s->list, list) {
589 if (q->info.si_signo < SIGRTMIN &&
590 (mask & sigmask(q->info.si_signo))) {
591 list_del_init(&q->list);
599 * Bad permissions for sending the signal
601 static int check_kill_permission(int sig, struct siginfo *info,
602 struct task_struct *t)
605 if (sig < 0 || sig > _NSIG)
608 if ((!info || ((unsigned long)info != 1 &&
609 (unsigned long)info != 2 && SI_FROMUSER(info)))
610 && ((sig != SIGCONT) ||
611 (current->signal->session != t->signal->session))
612 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
613 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
614 && !capable(CAP_KILL))
616 return security_task_kill(t, info, sig);
620 static void do_notify_parent_cldstop(struct task_struct *tsk,
621 struct task_struct *parent);
624 * Handle magic process-wide effects of stop/continue signals.
625 * Unlike the signal actions, these happen immediately at signal-generation
626 * time regardless of blocking, ignoring, or handling. This does the
627 * actual continuing for SIGCONT, but not the actual stopping for stop
628 * signals. The process stop is done as a signal action for SIG_DFL.
630 static void handle_stop_signal(int sig, struct task_struct *p)
632 struct task_struct *t;
634 if (sig_kernel_stop(sig)) {
636 * This is a stop signal. Remove SIGCONT from all queues.
638 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
641 rm_from_queue(sigmask(SIGCONT), &t->pending);
644 } else if (sig == SIGCONT) {
646 * Remove all stop signals from all queues,
647 * and wake all threads.
649 if (unlikely(p->signal->group_stop_count > 0)) {
651 * There was a group stop in progress. We'll
652 * pretend it finished before we got here. We are
653 * obliged to report it to the parent: if the
654 * SIGSTOP happened "after" this SIGCONT, then it
655 * would have cleared this pending SIGCONT. If it
656 * happened "before" this SIGCONT, then the parent
657 * got the SIGCHLD about the stop finishing before
658 * the continue happened. We do the notification
659 * now, and it's as if the stop had finished and
660 * the SIGCHLD was pending on entry to this kill.
662 p->signal->group_stop_count = 0;
663 if (p->ptrace & PT_PTRACED)
664 do_notify_parent_cldstop(p, p->parent);
666 do_notify_parent_cldstop(
668 p->group_leader->real_parent);
670 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
674 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
677 * If there is a handler for SIGCONT, we must make
678 * sure that no thread returns to user mode before
679 * we post the signal, in case it was the only
680 * thread eligible to run the signal handler--then
681 * it must not do anything between resuming and
682 * running the handler. With the TIF_SIGPENDING
683 * flag set, the thread will pause and acquire the
684 * siglock that we hold now and until we've queued
685 * the pending signal.
687 * Wake up the stopped thread _after_ setting
690 state = TASK_STOPPED;
691 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
693 state |= TASK_INTERRUPTIBLE;
695 wake_up_state(t, state);
702 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
703 struct sigpending *signals)
705 struct sigqueue * q = NULL;
709 * fast-pathed signals for kernel-internal things like SIGSTOP
712 if ((unsigned long)info == 2)
715 /* Real-time signals must be queued if sent by sigqueue, or
716 some other real-time mechanism. It is implementation
717 defined whether kill() does so. We attempt to do so, on
718 the principle of least surprise, but since kill is not
719 allowed to fail with EAGAIN when low on memory we just
720 make sure at least one signal gets delivered and don't
721 pass on the info struct. */
723 if (atomic_read(&t->user->sigpending) <
724 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
725 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
729 q->user = get_uid(t->user);
730 atomic_inc(&q->user->sigpending);
731 list_add_tail(&q->list, &signals->list);
732 switch ((unsigned long) info) {
734 q->info.si_signo = sig;
735 q->info.si_errno = 0;
736 q->info.si_code = SI_USER;
737 q->info.si_pid = current->pid;
738 q->info.si_uid = current->uid;
741 q->info.si_signo = sig;
742 q->info.si_errno = 0;
743 q->info.si_code = SI_KERNEL;
748 copy_siginfo(&q->info, info);
752 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
753 && info->si_code != SI_USER)
755 * Queue overflow, abort. We may abort if the signal was rt
756 * and sent by user using something other than kill().
759 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
761 * Set up a return to indicate that we dropped
764 ret = info->si_sys_private;
768 sigaddset(&signals->signal, sig);
772 #define LEGACY_QUEUE(sigptr, sig) \
773 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
777 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
781 if (!irqs_disabled())
784 if (!spin_is_locked(&t->sighand->siglock))
788 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
790 * Set up a return to indicate that we dropped the signal.
792 ret = info->si_sys_private;
794 /* Short-circuit ignored signals. */
795 if (sig_ignored(t, sig))
798 /* Support queueing exactly one non-rt signal, so that we
799 can get more detailed information about the cause of
801 if (LEGACY_QUEUE(&t->pending, sig))
804 ret = send_signal(sig, info, t, &t->pending);
805 if (!ret && !sigismember(&t->blocked, sig))
806 signal_wake_up(t, sig == SIGKILL);
812 * Force a signal that the process can't ignore: if necessary
813 * we unblock the signal and change any SIG_IGN to SIG_DFL.
817 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
819 unsigned long int flags;
822 spin_lock_irqsave(&t->sighand->siglock, flags);
823 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
824 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
825 sigdelset(&t->blocked, sig);
826 recalc_sigpending_tsk(t);
828 ret = specific_send_sig_info(sig, info, t);
829 spin_unlock_irqrestore(&t->sighand->siglock, flags);
835 force_sig_specific(int sig, struct task_struct *t)
837 unsigned long int flags;
839 spin_lock_irqsave(&t->sighand->siglock, flags);
840 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
841 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
842 sigdelset(&t->blocked, sig);
843 recalc_sigpending_tsk(t);
844 specific_send_sig_info(sig, (void *)2, t);
845 spin_unlock_irqrestore(&t->sighand->siglock, flags);
849 * Test if P wants to take SIG. After we've checked all threads with this,
850 * it's equivalent to finding no threads not blocking SIG. Any threads not
851 * blocking SIG were ruled out because they are not running and already
852 * have pending signals. Such threads will dequeue from the shared queue
853 * as soon as they're available, so putting the signal on the shared queue
854 * will be equivalent to sending it to one such thread.
856 #define wants_signal(sig, p, mask) \
857 (!sigismember(&(p)->blocked, sig) \
858 && !((p)->state & mask) \
859 && !((p)->flags & PF_EXITING) \
860 && (task_curr(p) || !signal_pending(p)))
864 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
866 struct task_struct *t;
869 * Now find a thread we can wake up to take the signal off the queue.
871 * If the main thread wants the signal, it gets first crack.
872 * Probably the least surprising to the average bear.
874 if (wants_signal(sig, p, mask))
876 else if (thread_group_empty(p))
878 * There is just one thread and it does not need to be woken.
879 * It will dequeue unblocked signals before it runs again.
884 * Otherwise try to find a suitable thread.
886 t = p->signal->curr_target;
888 /* restart balancing at this thread */
889 t = p->signal->curr_target = p;
890 BUG_ON(t->tgid != p->tgid);
892 while (!wants_signal(sig, t, mask)) {
894 if (t == p->signal->curr_target)
896 * No thread needs to be woken.
897 * Any eligible threads will see
898 * the signal in the queue soon.
902 p->signal->curr_target = t;
906 * Found a killable thread. If the signal will be fatal,
907 * then start taking the whole group down immediately.
909 if (sig_fatal(p, sig) && !p->signal->group_exit &&
910 !sigismember(&t->real_blocked, sig) &&
911 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
913 * This signal will be fatal to the whole group.
915 if (!sig_kernel_coredump(sig)) {
917 * Start a group exit and wake everybody up.
918 * This way we don't have other threads
919 * running and doing things after a slower
920 * thread has the fatal signal pending.
922 p->signal->group_exit = 1;
923 p->signal->group_exit_code = sig;
924 p->signal->group_stop_count = 0;
927 sigaddset(&t->pending.signal, SIGKILL);
928 signal_wake_up(t, 1);
935 * There will be a core dump. We make all threads other
936 * than the chosen one go into a group stop so that nothing
937 * happens until it gets scheduled, takes the signal off
938 * the shared queue, and does the core dump. This is a
939 * little more complicated than strictly necessary, but it
940 * keeps the signal state that winds up in the core dump
941 * unchanged from the death state, e.g. which thread had
942 * the core-dump signal unblocked.
944 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
945 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
946 p->signal->group_stop_count = 0;
947 p->signal->group_exit_task = t;
950 p->signal->group_stop_count++;
951 signal_wake_up(t, 0);
954 wake_up_process(p->signal->group_exit_task);
959 * The signal is already in the shared-pending queue.
960 * Tell the chosen thread to wake up and dequeue it.
962 signal_wake_up(t, sig == SIGKILL);
967 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
973 if (!spin_is_locked(&p->sighand->siglock))
976 handle_stop_signal(sig, p);
978 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
980 * Set up a return to indicate that we dropped the signal.
982 ret = info->si_sys_private;
984 /* Short-circuit ignored signals. */
985 if (sig_ignored(p, sig))
988 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
989 /* This is a non-RT signal and we already have one queued. */
993 * Don't bother zombies and stopped tasks (but
994 * SIGKILL will punch through stopped state)
996 mask = TASK_DEAD | TASK_ZOMBIE;
998 mask |= TASK_STOPPED;
1001 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1002 * We always use the shared queue for process-wide signals,
1003 * to avoid several races.
1005 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1009 __group_complete_signal(sig, p, mask);
1014 * Nuke all other threads in the group.
1016 void zap_other_threads(struct task_struct *p)
1018 struct task_struct *t;
1020 p->signal->group_stop_count = 0;
1022 if (thread_group_empty(p))
1025 for (t = next_thread(p); t != p; t = next_thread(t)) {
1027 * Don't bother with already dead threads
1029 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1033 * We don't want to notify the parent, since we are
1034 * killed as part of a thread group due to another
1035 * thread doing an execve() or similar. So set the
1036 * exit signal to -1 to allow immediate reaping of
1037 * the process. But don't detach the thread group
1040 if (t != p->group_leader)
1041 t->exit_signal = -1;
1043 sigaddset(&t->pending.signal, SIGKILL);
1044 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1045 signal_wake_up(t, 1);
1050 * Must be called with the tasklist_lock held for reading!
1052 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1054 unsigned long flags;
1057 ret = check_kill_permission(sig, info, p);
1058 if (!ret && sig && p->sighand) {
1059 spin_lock_irqsave(&p->sighand->siglock, flags);
1060 ret = __group_send_sig_info(sig, info, p);
1061 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1068 * kill_pg_info() sends a signal to a process group: this is what the tty
1069 * control characters do (^C, ^Z etc)
1072 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1074 struct task_struct *p;
1075 struct list_head *l;
1077 int retval, success;
1084 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1085 int err = group_send_sig_info(sig, info, p);
1089 return success ? 0 : retval;
1093 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1097 read_lock(&tasklist_lock);
1098 retval = __kill_pg_info(sig, info, pgrp);
1099 read_unlock(&tasklist_lock);
1105 * kill_sl_info() sends a signal to the session leader: this is used
1106 * to send SIGHUP to the controlling process of a terminal when
1107 * the connection is lost.
1112 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1114 int err, retval = -EINVAL;
1116 struct list_head *l;
1117 struct task_struct *p;
1123 read_lock(&tasklist_lock);
1124 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1125 if (!p->signal->leader)
1127 err = group_send_sig_info(sig, info, p);
1131 read_unlock(&tasklist_lock);
1137 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1140 struct task_struct *p;
1142 read_lock(&tasklist_lock);
1143 p = find_task_by_pid(pid);
1146 error = group_send_sig_info(sig, info, p);
1147 read_unlock(&tasklist_lock);
1153 * kill_something_info() interprets pid in interesting ways just like kill(2).
1155 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1156 * is probably wrong. Should make it like BSD or SYSV.
1159 static int kill_something_info(int sig, struct siginfo *info, int pid)
1162 return kill_pg_info(sig, info, process_group(current));
1163 } else if (pid == -1) {
1164 int retval = 0, count = 0;
1165 struct task_struct * p;
1167 read_lock(&tasklist_lock);
1168 for_each_process(p) {
1169 if (p->pid > 1 && p->tgid != current->tgid) {
1170 int err = group_send_sig_info(sig, info, p);
1176 read_unlock(&tasklist_lock);
1177 return count ? retval : -ESRCH;
1178 } else if (pid < 0) {
1179 return kill_pg_info(sig, info, -pid);
1181 return kill_proc_info(sig, info, pid);
1186 * These are for backward compatibility with the rest of the kernel source.
1190 * These two are the most common entry points. They send a signal
1191 * just to the specific thread.
1194 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1197 unsigned long flags;
1200 * Make sure legacy kernel users don't send in bad values
1201 * (normal paths check this in check_kill_permission).
1203 if (sig < 0 || sig > _NSIG)
1207 * We need the tasklist lock even for the specific
1208 * thread case (when we don't need to follow the group
1209 * lists) in order to avoid races with "p->sighand"
1210 * going away or changing from under us.
1212 read_lock(&tasklist_lock);
1213 spin_lock_irqsave(&p->sighand->siglock, flags);
1214 ret = specific_send_sig_info(sig, info, p);
1215 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1216 read_unlock(&tasklist_lock);
1221 send_sig(int sig, struct task_struct *p, int priv)
1223 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1227 * This is the entry point for "process-wide" signals.
1228 * They will go to an appropriate thread in the thread group.
1231 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1234 read_lock(&tasklist_lock);
1235 ret = group_send_sig_info(sig, info, p);
1236 read_unlock(&tasklist_lock);
1241 force_sig(int sig, struct task_struct *p)
1243 force_sig_info(sig, (void*)1L, p);
1247 kill_pg(pid_t pgrp, int sig, int priv)
1249 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1253 kill_sl(pid_t sess, int sig, int priv)
1255 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1259 kill_proc(pid_t pid, int sig, int priv)
1261 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1265 * These functions support sending signals using preallocated sigqueue
1266 * structures. This is needed "because realtime applications cannot
1267 * afford to lose notifications of asynchronous events, like timer
1268 * expirations or I/O completions". In the case of Posix Timers
1269 * we allocate the sigqueue structure from the timer_create. If this
1270 * allocation fails we are able to report the failure to the application
1271 * with an EAGAIN error.
1274 struct sigqueue *sigqueue_alloc(void)
1278 if ((q = __sigqueue_alloc()))
1279 q->flags |= SIGQUEUE_PREALLOC;
1283 void sigqueue_free(struct sigqueue *q)
1285 unsigned long flags;
1286 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1288 * If the signal is still pending remove it from the
1291 if (unlikely(!list_empty(&q->list))) {
1292 read_lock(&tasklist_lock);
1293 spin_lock_irqsave(q->lock, flags);
1294 if (!list_empty(&q->list))
1295 list_del_init(&q->list);
1296 spin_unlock_irqrestore(q->lock, flags);
1297 read_unlock(&tasklist_lock);
1299 q->flags &= ~SIGQUEUE_PREALLOC;
1304 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1306 unsigned long flags;
1310 * We need the tasklist lock even for the specific
1311 * thread case (when we don't need to follow the group
1312 * lists) in order to avoid races with "p->sighand"
1313 * going away or changing from under us.
1315 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1316 read_lock(&tasklist_lock);
1317 spin_lock_irqsave(&p->sighand->siglock, flags);
1319 if (unlikely(!list_empty(&q->list))) {
1321 * If an SI_TIMER entry is already queue just increment
1322 * the overrun count.
1324 if (q->info.si_code != SI_TIMER)
1326 q->info.si_overrun++;
1329 /* Short-circuit ignored signals. */
1330 if (sig_ignored(p, sig)) {
1335 q->lock = &p->sighand->siglock;
1336 list_add_tail(&q->list, &p->pending.list);
1337 sigaddset(&p->pending.signal, sig);
1338 if (!sigismember(&p->blocked, sig))
1339 signal_wake_up(p, sig == SIGKILL);
1342 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1343 read_unlock(&tasklist_lock);
1348 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1350 unsigned long flags;
1354 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1355 read_lock(&tasklist_lock);
1356 spin_lock_irqsave(&p->sighand->siglock, flags);
1357 handle_stop_signal(sig, p);
1359 /* Short-circuit ignored signals. */
1360 if (sig_ignored(p, sig)) {
1365 if (unlikely(!list_empty(&q->list))) {
1367 * If an SI_TIMER entry is already queue just increment
1368 * the overrun count. Other uses should not try to
1369 * send the signal multiple times.
1371 if (q->info.si_code != SI_TIMER)
1373 q->info.si_overrun++;
1377 * Don't bother zombies and stopped tasks (but
1378 * SIGKILL will punch through stopped state)
1380 mask = TASK_DEAD | TASK_ZOMBIE;
1382 mask |= TASK_STOPPED;
1385 * Put this signal on the shared-pending queue.
1386 * We always use the shared queue for process-wide signals,
1387 * to avoid several races.
1389 q->lock = &p->sighand->siglock;
1390 list_add_tail(&q->list, &p->signal->shared_pending.list);
1391 sigaddset(&p->signal->shared_pending.signal, sig);
1393 __group_complete_signal(sig, p, mask);
1395 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1396 read_unlock(&tasklist_lock);
1401 * Joy. Or not. Pthread wants us to wake up every thread
1402 * in our parent group.
1404 static void __wake_up_parent(struct task_struct *p,
1405 struct task_struct *parent)
1407 struct task_struct *tsk = parent;
1410 * Fortunately this is not necessary for thread groups:
1412 if (p->tgid == tsk->tgid) {
1413 wake_up_interruptible_sync(&tsk->wait_chldexit);
1418 wake_up_interruptible_sync(&tsk->wait_chldexit);
1419 tsk = next_thread(tsk);
1420 if (tsk->signal != parent->signal)
1422 } while (tsk != parent);
1426 * Let a parent know about a status change of a child.
1429 void do_notify_parent(struct task_struct *tsk, int sig)
1431 struct siginfo info;
1432 unsigned long flags;
1434 struct sighand_struct *psig;
1439 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1440 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1442 info.si_signo = sig;
1444 info.si_pid = tsk->pid;
1445 info.si_uid = tsk->uid;
1447 /* FIXME: find out whether or not this is supposed to be c*time. */
1448 info.si_utime = tsk->utime;
1449 info.si_stime = tsk->stime;
1451 status = tsk->exit_code & 0x7f;
1452 why = SI_KERNEL; /* shouldn't happen */
1453 switch (tsk->state) {
1455 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1456 if (tsk->ptrace & PT_PTRACED)
1463 if (tsk->exit_code & 0x80)
1465 else if (tsk->exit_code & 0x7f)
1469 status = tsk->exit_code >> 8;
1474 info.si_status = status;
1476 psig = tsk->parent->sighand;
1477 spin_lock_irqsave(&psig->siglock, flags);
1478 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1479 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1480 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1482 * We are exiting and our parent doesn't care. POSIX.1
1483 * defines special semantics for setting SIGCHLD to SIG_IGN
1484 * or setting the SA_NOCLDWAIT flag: we should be reaped
1485 * automatically and not left for our parent's wait4 call.
1486 * Rather than having the parent do it as a magic kind of
1487 * signal handler, we just set this to tell do_exit that we
1488 * can be cleaned up without becoming a zombie. Note that
1489 * we still call __wake_up_parent in this case, because a
1490 * blocked sys_wait4 might now return -ECHILD.
1492 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1493 * is implementation-defined: we do (if you don't want
1494 * it, just use SIG_IGN instead).
1496 tsk->exit_signal = -1;
1497 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1500 if (sig > 0 && sig <= _NSIG)
1501 __group_send_sig_info(sig, &info, tsk->parent);
1502 __wake_up_parent(tsk, tsk->parent);
1503 spin_unlock_irqrestore(&psig->siglock, flags);
1508 * We need the tasklist lock because it's the only
1509 * thing that protects out "parent" pointer.
1511 * exit.c calls "do_notify_parent()" directly, because
1512 * it already has the tasklist lock.
1515 notify_parent(struct task_struct *tsk, int sig)
1518 read_lock(&tasklist_lock);
1519 do_notify_parent(tsk, sig);
1520 read_unlock(&tasklist_lock);
1525 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1527 struct siginfo info;
1528 unsigned long flags;
1529 struct sighand_struct *sighand;
1531 info.si_signo = SIGCHLD;
1533 info.si_pid = tsk->pid;
1534 info.si_uid = tsk->uid;
1536 /* FIXME: find out whether or not this is supposed to be c*time. */
1537 info.si_utime = tsk->utime;
1538 info.si_stime = tsk->stime;
1540 info.si_status = tsk->exit_code & 0x7f;
1541 info.si_code = CLD_STOPPED;
1543 sighand = parent->sighand;
1544 spin_lock_irqsave(&sighand->siglock, flags);
1545 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1546 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1547 __group_send_sig_info(SIGCHLD, &info, parent);
1549 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1551 __wake_up_parent(tsk, parent);
1552 spin_unlock_irqrestore(&sighand->siglock, flags);
1555 int print_fatal_signals = 0;
1557 static void print_fatal_signal(struct pt_regs *regs, int signr)
1561 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1562 current->comm, current->pid, signr);
1565 printk("code at %08lx: ", regs->eip);
1566 for (i = 0; i < 16; i++) {
1567 __get_user(insn, (unsigned char *)(regs->eip + i));
1568 printk("%02x ", insn);
1575 static int __init setup_print_fatal_signals(char *str)
1577 get_option (&str, &print_fatal_signals);
1582 __setup("print-fatal-signals=", setup_print_fatal_signals);
1584 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1587 finish_stop(int stop_count)
1590 * If there are no other threads in the group, or if there is
1591 * a group stop in progress and we are the last to stop,
1592 * report to the parent. When ptraced, every thread reports itself.
1594 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1595 read_lock(&tasklist_lock);
1596 do_notify_parent_cldstop(current, current->parent);
1597 read_unlock(&tasklist_lock);
1599 else if (stop_count == 0) {
1600 read_lock(&tasklist_lock);
1601 do_notify_parent_cldstop(current->group_leader,
1602 current->group_leader->real_parent);
1603 read_unlock(&tasklist_lock);
1608 * Now we don't run again until continued.
1610 current->exit_code = 0;
1614 * This performs the stopping for SIGSTOP and other stop signals.
1615 * We have to stop all threads in the thread group.
1618 do_signal_stop(int signr)
1620 struct signal_struct *sig = current->signal;
1621 struct sighand_struct *sighand = current->sighand;
1622 int stop_count = -1;
1624 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1626 if (sig->group_stop_count > 0) {
1628 * There is a group stop in progress. We don't need to
1629 * start another one.
1631 signr = sig->group_exit_code;
1632 stop_count = --sig->group_stop_count;
1633 current->exit_code = signr;
1634 set_current_state(TASK_STOPPED);
1635 spin_unlock_irq(&sighand->siglock);
1637 else if (thread_group_empty(current)) {
1639 * Lock must be held through transition to stopped state.
1641 current->exit_code = signr;
1642 set_current_state(TASK_STOPPED);
1643 spin_unlock_irq(&sighand->siglock);
1647 * There is no group stop already in progress.
1648 * We must initiate one now, but that requires
1649 * dropping siglock to get both the tasklist lock
1650 * and siglock again in the proper order. Note that
1651 * this allows an intervening SIGCONT to be posted.
1652 * We need to check for that and bail out if necessary.
1654 struct task_struct *t;
1656 spin_unlock_irq(&sighand->siglock);
1658 /* signals can be posted during this window */
1660 read_lock(&tasklist_lock);
1661 spin_lock_irq(&sighand->siglock);
1663 if (unlikely(sig->group_exit)) {
1665 * There is a group exit in progress now.
1666 * We'll just ignore the stop and process the
1667 * associated fatal signal.
1669 spin_unlock_irq(&sighand->siglock);
1670 read_unlock(&tasklist_lock);
1674 if (unlikely(sig_avoid_stop_race())) {
1676 * Either a SIGCONT or a SIGKILL signal was
1677 * posted in the siglock-not-held window.
1679 spin_unlock_irq(&sighand->siglock);
1680 read_unlock(&tasklist_lock);
1684 if (sig->group_stop_count == 0) {
1685 sig->group_exit_code = signr;
1687 for (t = next_thread(current); t != current;
1690 * Setting state to TASK_STOPPED for a group
1691 * stop is always done with the siglock held,
1692 * so this check has no races.
1694 if (t->state < TASK_STOPPED) {
1696 signal_wake_up(t, 0);
1698 sig->group_stop_count = stop_count;
1701 /* A race with another thread while unlocked. */
1702 signr = sig->group_exit_code;
1703 stop_count = --sig->group_stop_count;
1706 current->exit_code = signr;
1707 set_current_state(TASK_STOPPED);
1709 spin_unlock_irq(&sighand->siglock);
1710 read_unlock(&tasklist_lock);
1713 finish_stop(stop_count);
1717 * Do appropriate magic when group_stop_count > 0.
1718 * We return nonzero if we stopped, after releasing the siglock.
1719 * We return zero if we still hold the siglock and should look
1720 * for another signal without checking group_stop_count again.
1722 static inline int handle_group_stop(void)
1726 if (current->signal->group_exit_task == current) {
1728 * Group stop is so we can do a core dump,
1729 * We are the initiating thread, so get on with it.
1731 current->signal->group_exit_task = NULL;
1735 if (current->signal->group_exit)
1737 * Group stop is so another thread can do a core dump,
1738 * or else we are racing against a death signal.
1739 * Just punt the stop so we can get the next signal.
1744 * There is a group stop in progress. We stop
1745 * without any associated signal being in our queue.
1747 stop_count = --current->signal->group_stop_count;
1748 current->exit_code = current->signal->group_exit_code;
1749 set_current_state(TASK_STOPPED);
1750 spin_unlock_irq(¤t->sighand->siglock);
1751 finish_stop(stop_count);
1755 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1757 sigset_t *mask = ¤t->blocked;
1761 spin_lock_irq(¤t->sighand->siglock);
1763 struct k_sigaction *ka;
1765 if (unlikely(current->signal->group_stop_count > 0) &&
1766 handle_group_stop())
1769 signr = dequeue_signal(current, mask, info);
1772 break; /* will return 0 */
1774 if ((signr == SIGSEGV) && print_fatal_signals) {
1775 spin_unlock_irq(¤t->sighand->siglock);
1776 print_fatal_signal(regs, signr);
1777 spin_lock_irq(¤t->sighand->siglock);
1779 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1780 ptrace_signal_deliver(regs, cookie);
1783 * If there is a group stop in progress,
1784 * we must participate in the bookkeeping.
1786 if (current->signal->group_stop_count > 0)
1787 --current->signal->group_stop_count;
1789 /* Let the debugger run. */
1790 current->exit_code = signr;
1791 current->last_siginfo = info;
1792 set_current_state(TASK_STOPPED);
1793 spin_unlock_irq(¤t->sighand->siglock);
1794 notify_parent(current, SIGCHLD);
1797 current->last_siginfo = NULL;
1799 /* We're back. Did the debugger cancel the sig? */
1800 spin_lock_irq(¤t->sighand->siglock);
1801 signr = current->exit_code;
1805 current->exit_code = 0;
1807 /* Update the siginfo structure if the signal has
1808 changed. If the debugger wanted something
1809 specific in the siginfo structure then it should
1810 have updated *info via PTRACE_SETSIGINFO. */
1811 if (signr != info->si_signo) {
1812 info->si_signo = signr;
1814 info->si_code = SI_USER;
1815 info->si_pid = current->parent->pid;
1816 info->si_uid = current->parent->uid;
1819 /* If the (new) signal is now blocked, requeue it. */
1820 if (sigismember(¤t->blocked, signr)) {
1821 specific_send_sig_info(signr, info, current);
1826 ka = ¤t->sighand->action[signr-1];
1827 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1829 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1830 break; /* will return non-zero "signr" value */
1833 * Now we are doing the default action for this signal.
1835 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1838 /* Init gets no signals it doesn't want. */
1839 if (current->pid == 1)
1842 if (sig_kernel_stop(signr)) {
1844 * The default action is to stop all threads in
1845 * the thread group. The job control signals
1846 * do nothing in an orphaned pgrp, but SIGSTOP
1847 * always works. Note that siglock needs to be
1848 * dropped during the call to is_orphaned_pgrp()
1849 * because of lock ordering with tasklist_lock.
1850 * This allows an intervening SIGCONT to be posted.
1851 * We need to check for that and bail out if necessary.
1853 if (signr == SIGSTOP) {
1854 do_signal_stop(signr); /* releases siglock */
1857 spin_unlock_irq(¤t->sighand->siglock);
1859 /* signals can be posted during this window */
1861 if (is_orphaned_pgrp(process_group(current)))
1864 spin_lock_irq(¤t->sighand->siglock);
1865 if (unlikely(sig_avoid_stop_race())) {
1867 * Either a SIGCONT or a SIGKILL signal was
1868 * posted in the siglock-not-held window.
1873 do_signal_stop(signr); /* releases siglock */
1877 spin_unlock_irq(¤t->sighand->siglock);
1880 * Anything else is fatal, maybe with a core dump.
1882 current->flags |= PF_SIGNALED;
1883 if (print_fatal_signals)
1884 print_fatal_signal(regs, signr);
1885 if (sig_kernel_coredump(signr) &&
1886 do_coredump((long)signr, signr, regs)) {
1888 * That killed all other threads in the group and
1889 * synchronized with their demise, so there can't
1890 * be any more left to kill now. The group_exit
1891 * flags are set by do_coredump. Note that
1892 * thread_group_empty won't always be true yet,
1893 * because those threads were blocked in __exit_mm
1894 * and we just let them go to finish dying.
1896 const int code = signr | 0x80;
1897 BUG_ON(!current->signal->group_exit);
1898 BUG_ON(current->signal->group_exit_code != code);
1904 * Death signals, no core dump.
1906 do_group_exit(signr);
1909 spin_unlock_irq(¤t->sighand->siglock);
1915 EXPORT_SYMBOL(recalc_sigpending);
1916 EXPORT_SYMBOL_GPL(dequeue_signal);
1917 EXPORT_SYMBOL(flush_signals);
1918 EXPORT_SYMBOL(force_sig);
1919 EXPORT_SYMBOL(force_sig_info);
1920 EXPORT_SYMBOL(kill_pg);
1921 EXPORT_SYMBOL(kill_pg_info);
1922 EXPORT_SYMBOL(kill_proc);
1923 EXPORT_SYMBOL(kill_proc_info);
1924 EXPORT_SYMBOL(kill_sl);
1925 EXPORT_SYMBOL(kill_sl_info);
1926 EXPORT_SYMBOL(notify_parent);
1927 EXPORT_SYMBOL(send_sig);
1928 EXPORT_SYMBOL(send_sig_info);
1929 EXPORT_SYMBOL(send_group_sig_info);
1930 EXPORT_SYMBOL(sigqueue_alloc);
1931 EXPORT_SYMBOL(sigqueue_free);
1932 EXPORT_SYMBOL(send_sigqueue);
1933 EXPORT_SYMBOL(send_group_sigqueue);
1934 EXPORT_SYMBOL(sigprocmask);
1935 EXPORT_SYMBOL(block_all_signals);
1936 EXPORT_SYMBOL(unblock_all_signals);
1940 * System call entry points.
1943 asmlinkage long sys_restart_syscall(void)
1945 struct restart_block *restart = ¤t_thread_info()->restart_block;
1946 return restart->fn(restart);
1949 long do_no_restart_syscall(struct restart_block *param)
1955 * We don't need to get the kernel lock - this is all local to this
1956 * particular thread.. (and that's good, because this is _heavily_
1957 * used by various programs)
1961 * This is also useful for kernel threads that want to temporarily
1962 * (or permanently) block certain signals.
1964 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1965 * interface happily blocks "unblockable" signals like SIGKILL
1968 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1973 spin_lock_irq(¤t->sighand->siglock);
1974 old_block = current->blocked;
1978 sigorsets(¤t->blocked, ¤t->blocked, set);
1981 signandsets(¤t->blocked, ¤t->blocked, set);
1984 current->blocked = *set;
1989 recalc_sigpending();
1990 spin_unlock_irq(¤t->sighand->siglock);
1992 *oldset = old_block;
1997 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1999 int error = -EINVAL;
2000 sigset_t old_set, new_set;
2002 /* XXX: Don't preclude handling different sized sigset_t's. */
2003 if (sigsetsize != sizeof(sigset_t))
2008 if (copy_from_user(&new_set, set, sizeof(*set)))
2010 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2012 error = sigprocmask(how, &new_set, &old_set);
2018 spin_lock_irq(¤t->sighand->siglock);
2019 old_set = current->blocked;
2020 spin_unlock_irq(¤t->sighand->siglock);
2024 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2032 long do_sigpending(void __user *set, unsigned long sigsetsize)
2034 long error = -EINVAL;
2037 if (sigsetsize > sizeof(sigset_t))
2040 spin_lock_irq(¤t->sighand->siglock);
2041 sigorsets(&pending, ¤t->pending.signal,
2042 ¤t->signal->shared_pending.signal);
2043 spin_unlock_irq(¤t->sighand->siglock);
2045 /* Outside the lock because only this thread touches it. */
2046 sigandsets(&pending, ¤t->blocked, &pending);
2049 if (!copy_to_user(set, &pending, sigsetsize))
2057 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2059 return do_sigpending(set, sigsetsize);
2062 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2064 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2068 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2070 if (from->si_code < 0)
2071 return __copy_to_user(to, from, sizeof(siginfo_t))
2074 * If you change siginfo_t structure, please be sure
2075 * this code is fixed accordingly.
2076 * It should never copy any pad contained in the structure
2077 * to avoid security leaks, but must copy the generic
2078 * 3 ints plus the relevant union member.
2080 err = __put_user(from->si_signo, &to->si_signo);
2081 err |= __put_user(from->si_errno, &to->si_errno);
2082 err |= __put_user((short)from->si_code, &to->si_code);
2083 switch (from->si_code & __SI_MASK) {
2085 err |= __put_user(from->si_pid, &to->si_pid);
2086 err |= __put_user(from->si_uid, &to->si_uid);
2089 err |= __put_user(from->si_tid, &to->si_tid);
2090 err |= __put_user(from->si_overrun, &to->si_overrun);
2091 err |= __put_user(from->si_ptr, &to->si_ptr);
2094 err |= __put_user(from->si_band, &to->si_band);
2095 err |= __put_user(from->si_fd, &to->si_fd);
2098 err |= __put_user(from->si_addr, &to->si_addr);
2099 #ifdef __ARCH_SI_TRAPNO
2100 err |= __put_user(from->si_trapno, &to->si_trapno);
2104 err |= __put_user(from->si_pid, &to->si_pid);
2105 err |= __put_user(from->si_uid, &to->si_uid);
2106 err |= __put_user(from->si_status, &to->si_status);
2107 err |= __put_user(from->si_utime, &to->si_utime);
2108 err |= __put_user(from->si_stime, &to->si_stime);
2110 case __SI_RT: /* This is not generated by the kernel as of now. */
2111 case __SI_MESGQ: /* But this is */
2112 err |= __put_user(from->si_pid, &to->si_pid);
2113 err |= __put_user(from->si_uid, &to->si_uid);
2114 err |= __put_user(from->si_ptr, &to->si_ptr);
2116 default: /* this is just in case for now ... */
2117 err |= __put_user(from->si_pid, &to->si_pid);
2118 err |= __put_user(from->si_uid, &to->si_uid);
2127 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2128 siginfo_t __user *uinfo,
2129 const struct timespec __user *uts,
2138 /* XXX: Don't preclude handling different sized sigset_t's. */
2139 if (sigsetsize != sizeof(sigset_t))
2142 if (copy_from_user(&these, uthese, sizeof(these)))
2146 * Invert the set of allowed signals to get those we
2149 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2153 if (copy_from_user(&ts, uts, sizeof(ts)))
2155 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2160 spin_lock_irq(¤t->sighand->siglock);
2161 sig = dequeue_signal(current, &these, &info);
2163 timeout = MAX_SCHEDULE_TIMEOUT;
2165 timeout = (timespec_to_jiffies(&ts)
2166 + (ts.tv_sec || ts.tv_nsec));
2169 /* None ready -- temporarily unblock those we're
2170 * interested while we are sleeping in so that we'll
2171 * be awakened when they arrive. */
2172 current->real_blocked = current->blocked;
2173 sigandsets(¤t->blocked, ¤t->blocked, &these);
2174 recalc_sigpending();
2175 spin_unlock_irq(¤t->sighand->siglock);
2177 current->state = TASK_INTERRUPTIBLE;
2178 timeout = schedule_timeout(timeout);
2180 spin_lock_irq(¤t->sighand->siglock);
2181 sig = dequeue_signal(current, &these, &info);
2182 current->blocked = current->real_blocked;
2183 siginitset(¤t->real_blocked, 0);
2184 recalc_sigpending();
2187 spin_unlock_irq(¤t->sighand->siglock);
2192 if (copy_siginfo_to_user(uinfo, &info))
2205 sys_kill(int pid, int sig)
2207 struct siginfo info;
2209 info.si_signo = sig;
2211 info.si_code = SI_USER;
2212 info.si_pid = current->tgid;
2213 info.si_uid = current->uid;
2215 return kill_something_info(sig, &info, pid);
2219 * sys_tgkill - send signal to one specific thread
2220 * @tgid: the thread group ID of the thread
2221 * @pid: the PID of the thread
2222 * @sig: signal to be sent
2224 * This syscall also checks the tgid and returns -ESRCH even if the PID
2225 * exists but it's not belonging to the target process anymore. This
2226 * method solves the problem of threads exiting and PIDs getting reused.
2228 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2230 struct siginfo info;
2232 struct task_struct *p;
2234 /* This is only valid for single tasks */
2235 if (pid <= 0 || tgid <= 0)
2238 info.si_signo = sig;
2240 info.si_code = SI_TKILL;
2241 info.si_pid = current->tgid;
2242 info.si_uid = current->uid;
2244 read_lock(&tasklist_lock);
2245 p = find_task_by_pid(pid);
2247 if (p && (p->tgid == tgid)) {
2248 error = check_kill_permission(sig, &info, p);
2250 * The null signal is a permissions and process existence
2251 * probe. No signal is actually delivered.
2253 if (!error && sig && p->sighand) {
2254 spin_lock_irq(&p->sighand->siglock);
2255 handle_stop_signal(sig, p);
2256 error = specific_send_sig_info(sig, &info, p);
2257 spin_unlock_irq(&p->sighand->siglock);
2260 read_unlock(&tasklist_lock);
2265 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2268 sys_tkill(int pid, int sig)
2270 struct siginfo info;
2272 struct task_struct *p;
2274 /* This is only valid for single tasks */
2278 info.si_signo = sig;
2280 info.si_code = SI_TKILL;
2281 info.si_pid = current->tgid;
2282 info.si_uid = current->uid;
2284 read_lock(&tasklist_lock);
2285 p = find_task_by_pid(pid);
2288 error = check_kill_permission(sig, &info, p);
2290 * The null signal is a permissions and process existence
2291 * probe. No signal is actually delivered.
2293 if (!error && sig && p->sighand) {
2294 spin_lock_irq(&p->sighand->siglock);
2295 handle_stop_signal(sig, p);
2296 error = specific_send_sig_info(sig, &info, p);
2297 spin_unlock_irq(&p->sighand->siglock);
2300 read_unlock(&tasklist_lock);
2305 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2309 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2312 /* Not even root can pretend to send signals from the kernel.
2313 Nor can they impersonate a kill(), which adds source info. */
2314 if (info.si_code >= 0)
2316 info.si_signo = sig;
2318 /* POSIX.1b doesn't mention process groups. */
2319 return kill_proc_info(sig, &info, pid);
2323 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2325 struct k_sigaction *k;
2327 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2330 k = ¤t->sighand->action[sig-1];
2332 spin_lock_irq(¤t->sighand->siglock);
2333 if (signal_pending(current)) {
2335 * If there might be a fatal signal pending on multiple
2336 * threads, make sure we take it before changing the action.
2338 spin_unlock_irq(¤t->sighand->siglock);
2339 return -ERESTARTNOINTR;
2348 * "Setting a signal action to SIG_IGN for a signal that is
2349 * pending shall cause the pending signal to be discarded,
2350 * whether or not it is blocked."
2352 * "Setting a signal action to SIG_DFL for a signal that is
2353 * pending and whose default action is to ignore the signal
2354 * (for example, SIGCHLD), shall cause the pending signal to
2355 * be discarded, whether or not it is blocked"
2357 if (act->sa.sa_handler == SIG_IGN ||
2358 (act->sa.sa_handler == SIG_DFL &&
2359 sig_kernel_ignore(sig))) {
2361 * This is a fairly rare case, so we only take the
2362 * tasklist_lock once we're sure we'll need it.
2363 * Now we must do this little unlock and relock
2364 * dance to maintain the lock hierarchy.
2366 struct task_struct *t = current;
2367 spin_unlock_irq(&t->sighand->siglock);
2368 read_lock(&tasklist_lock);
2369 spin_lock_irq(&t->sighand->siglock);
2371 sigdelsetmask(&k->sa.sa_mask,
2372 sigmask(SIGKILL) | sigmask(SIGSTOP));
2373 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2375 rm_from_queue(sigmask(sig), &t->pending);
2376 recalc_sigpending_tsk(t);
2378 } while (t != current);
2379 spin_unlock_irq(¤t->sighand->siglock);
2380 read_unlock(&tasklist_lock);
2385 sigdelsetmask(&k->sa.sa_mask,
2386 sigmask(SIGKILL) | sigmask(SIGSTOP));
2389 spin_unlock_irq(¤t->sighand->siglock);
2394 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2400 oss.ss_sp = (void __user *) current->sas_ss_sp;
2401 oss.ss_size = current->sas_ss_size;
2402 oss.ss_flags = sas_ss_flags(sp);
2411 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2412 || __get_user(ss_sp, &uss->ss_sp)
2413 || __get_user(ss_flags, &uss->ss_flags)
2414 || __get_user(ss_size, &uss->ss_size))
2418 if (on_sig_stack(sp))
2424 * Note - this code used to test ss_flags incorrectly
2425 * old code may have been written using ss_flags==0
2426 * to mean ss_flags==SS_ONSTACK (as this was the only
2427 * way that worked) - this fix preserves that older
2430 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2433 if (ss_flags == SS_DISABLE) {
2438 if (ss_size < MINSIGSTKSZ)
2442 current->sas_ss_sp = (unsigned long) ss_sp;
2443 current->sas_ss_size = ss_size;
2448 if (copy_to_user(uoss, &oss, sizeof(oss)))
2457 #ifdef __ARCH_WANT_SYS_SIGPENDING
2460 sys_sigpending(old_sigset_t __user *set)
2462 return do_sigpending(set, sizeof(*set));
2467 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2468 /* Some platforms have their own version with special arguments others
2469 support only sys_rt_sigprocmask. */
2472 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2475 old_sigset_t old_set, new_set;
2479 if (copy_from_user(&new_set, set, sizeof(*set)))
2481 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2483 spin_lock_irq(¤t->sighand->siglock);
2484 old_set = current->blocked.sig[0];
2492 sigaddsetmask(¤t->blocked, new_set);
2495 sigdelsetmask(¤t->blocked, new_set);
2498 current->blocked.sig[0] = new_set;
2502 recalc_sigpending();
2503 spin_unlock_irq(¤t->sighand->siglock);
2509 old_set = current->blocked.sig[0];
2512 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2519 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2521 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2523 sys_rt_sigaction(int sig,
2524 const struct sigaction __user *act,
2525 struct sigaction __user *oact,
2528 struct k_sigaction new_sa, old_sa;
2531 /* XXX: Don't preclude handling different sized sigset_t's. */
2532 if (sigsetsize != sizeof(sigset_t))
2536 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2540 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2543 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2549 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2551 #ifdef __ARCH_WANT_SYS_SGETMASK
2554 * For backwards compatibility. Functionality superseded by sigprocmask.
2560 return current->blocked.sig[0];
2564 sys_ssetmask(int newmask)
2568 spin_lock_irq(¤t->sighand->siglock);
2569 old = current->blocked.sig[0];
2571 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2573 recalc_sigpending();
2574 spin_unlock_irq(¤t->sighand->siglock);
2578 #endif /* __ARCH_WANT_SGETMASK */
2580 #ifdef __ARCH_WANT_SYS_SIGNAL
2582 * For backwards compatibility. Functionality superseded by sigaction.
2584 asmlinkage unsigned long
2585 sys_signal(int sig, __sighandler_t handler)
2587 struct k_sigaction new_sa, old_sa;
2590 new_sa.sa.sa_handler = handler;
2591 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2593 ret = do_sigaction(sig, &new_sa, &old_sa);
2595 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2597 #endif /* __ARCH_WANT_SYS_SIGNAL */
2599 #ifdef __ARCH_WANT_SYS_PAUSE
2604 current->state = TASK_INTERRUPTIBLE;
2606 return -ERESTARTNOHAND;
2611 void __init signals_init(void)
2614 kmem_cache_create("sigqueue",
2615 sizeof(struct sigqueue),
2616 __alignof__(struct sigqueue),
2617 SLAB_PANIC, NULL, NULL);