2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
36 * In POSIX a signal is sent either to a specific thread (Linux task)
37 * or to the process as a whole (Linux thread group). How the signal
38 * is sent determines whether it's to one thread or the whole group,
39 * which determines which signal mask(s) are involved in blocking it
40 * from being delivered until later. When the signal is delivered,
41 * either it's caught or ignored by a user handler or it has a default
42 * effect that applies to the whole thread group (POSIX process).
44 * The possible effects an unblocked signal set to SIG_DFL can have are:
45 * ignore - Nothing Happens
46 * terminate - kill the process, i.e. all threads in the group,
47 * similar to exit_group. The group leader (only) reports
48 * WIFSIGNALED status to its parent.
49 * coredump - write a core dump file describing all threads using
50 * the same mm and then kill all those threads
51 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
53 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
54 * Other signals when not blocked and set to SIG_DFL behaves as follows.
55 * The job control signals also have other special effects.
57 * +--------------------+------------------+
58 * | POSIX signal | default action |
59 * +--------------------+------------------+
60 * | SIGHUP | terminate |
61 * | SIGINT | terminate |
62 * | SIGQUIT | coredump |
63 * | SIGILL | coredump |
64 * | SIGTRAP | coredump |
65 * | SIGABRT/SIGIOT | coredump |
66 * | SIGBUS | coredump |
67 * | SIGFPE | coredump |
68 * | SIGKILL | terminate(+) |
69 * | SIGUSR1 | terminate |
70 * | SIGSEGV | coredump |
71 * | SIGUSR2 | terminate |
72 * | SIGPIPE | terminate |
73 * | SIGALRM | terminate |
74 * | SIGTERM | terminate |
75 * | SIGCHLD | ignore |
76 * | SIGCONT | ignore(*) |
77 * | SIGSTOP | stop(*)(+) |
78 * | SIGTSTP | stop(*) |
79 * | SIGTTIN | stop(*) |
80 * | SIGTTOU | stop(*) |
82 * | SIGXCPU | coredump |
83 * | SIGXFSZ | coredump |
84 * | SIGVTALRM | terminate |
85 * | SIGPROF | terminate |
86 * | SIGPOLL/SIGIO | terminate |
87 * | SIGSYS/SIGUNUSED | coredump |
88 * | SIGSTKFLT | terminate |
89 * | SIGWINCH | ignore |
90 * | SIGPWR | terminate |
91 * | SIGRTMIN-SIGRTMAX | terminate |
92 * +--------------------+------------------+
93 * | non-POSIX signal | default action |
94 * +--------------------+------------------+
95 * | SIGEMT | coredump |
96 * +--------------------+------------------+
98 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
99 * (*) Special job control effects:
100 * When SIGCONT is sent, it resumes the process (all threads in the group)
101 * from TASK_STOPPED state and also clears any pending/queued stop signals
102 * (any of those marked with "stop(*)"). This happens regardless of blocking,
103 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
104 * any pending/queued SIGCONT signals; this happens regardless of blocking,
105 * catching, or ignored the stop signal, though (except for SIGSTOP) the
106 * default action of stopping the process may happen later or never.
110 #define M_SIGEMT M(SIGEMT)
115 #if SIGRTMIN > BITS_PER_LONG
116 #define M(sig) (1ULL << ((sig)-1))
118 #define M(sig) (1UL << ((sig)-1))
120 #define T(sig, mask) (M(sig) & (mask))
122 #define SIG_KERNEL_ONLY_MASK (\
123 M(SIGKILL) | M(SIGSTOP) )
125 #define SIG_KERNEL_STOP_MASK (\
126 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
128 #define SIG_KERNEL_COREDUMP_MASK (\
129 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
130 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
131 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
133 #define SIG_KERNEL_IGNORE_MASK (\
134 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
136 #define sig_kernel_only(sig) \
137 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
138 #define sig_kernel_coredump(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
140 #define sig_kernel_ignore(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
142 #define sig_kernel_stop(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
145 #define sig_user_defined(t, signr) \
146 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
147 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
149 #define sig_fatal(t, signr) \
150 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
151 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
153 #define sig_avoid_stop_race() \
154 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
155 sigtestsetmask(¤t->signal->shared_pending.signal, \
156 M(SIGCONT) | M(SIGKILL)))
158 static int sig_ignored(struct task_struct *t, int sig)
163 * Tracers always want to know about signals..
165 if (t->ptrace & PT_PTRACED)
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 return handler == SIG_IGN ||
179 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 * Re-calculate pending state from the set of locally pending
184 * signals, globally pending signals, and blocked signals.
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
191 switch (_NSIG_WORDS) {
193 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 ready |= signal->sig[i] &~ blocked->sig[i];
197 case 4: ready = signal->sig[3] &~ blocked->sig[3];
198 ready |= signal->sig[2] &~ blocked->sig[2];
199 ready |= signal->sig[1] &~ blocked->sig[1];
200 ready |= signal->sig[0] &~ blocked->sig[0];
203 case 2: ready = signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 1: ready = signal->sig[0] &~ blocked->sig[0];
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(void)
267 struct sigqueue *q = 0;
269 if (atomic_read(¤t->user->sigpending) <
270 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
271 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
273 INIT_LIST_HEAD(&q->list);
276 q->user = get_uid(current->user);
277 atomic_inc(&q->user->sigpending);
282 static inline void __sigqueue_free(struct sigqueue *q)
284 if (q->flags & SIGQUEUE_PREALLOC)
286 atomic_dec(&q->user->sigpending);
288 kmem_cache_free(sigqueue_cachep, q);
291 static void flush_sigqueue(struct sigpending *queue)
295 sigemptyset(&queue->signal);
296 while (!list_empty(&queue->list)) {
297 q = list_entry(queue->list.next, struct sigqueue , list);
298 list_del_init(&q->list);
304 * Flush all pending signals for a task.
308 flush_signals(struct task_struct *t)
312 spin_lock_irqsave(&t->sighand->siglock, flags);
313 clear_tsk_thread_flag(t,TIF_SIGPENDING);
314 flush_sigqueue(&t->pending);
315 flush_sigqueue(&t->signal->shared_pending);
316 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 * This function expects the tasklist_lock write-locked.
322 void __exit_sighand(struct task_struct *tsk)
324 struct sighand_struct * sighand = tsk->sighand;
326 /* Ok, we're done with the signal handlers */
328 if (atomic_dec_and_test(&sighand->count))
329 kmem_cache_free(sighand_cachep, sighand);
332 void exit_sighand(struct task_struct *tsk)
334 write_lock_irq(&tasklist_lock);
336 write_unlock_irq(&tasklist_lock);
340 * This function expects the tasklist_lock write-locked.
342 void __exit_signal(struct task_struct *tsk)
344 struct signal_struct * sig = tsk->signal;
345 struct sighand_struct * sighand = tsk->sighand;
349 if (!atomic_read(&sig->count))
351 spin_lock(&sighand->siglock);
352 if (atomic_dec_and_test(&sig->count)) {
353 if (tsk == sig->curr_target)
354 sig->curr_target = next_thread(tsk);
356 spin_unlock(&sighand->siglock);
357 flush_sigqueue(&sig->shared_pending);
360 * If there is any task waiting for the group exit
363 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
364 wake_up_process(sig->group_exit_task);
365 sig->group_exit_task = NULL;
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
370 spin_unlock(&sighand->siglock);
371 sig = NULL; /* Marker for below. */
373 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
374 flush_sigqueue(&tsk->pending);
377 * We are cleaning up the signal_struct here. We delayed
378 * calling exit_itimers until after flush_sigqueue, just in
379 * case our thread-local pending queue contained a queued
380 * timer signal that would have been cleared in
381 * exit_itimers. When that called sigqueue_free, it would
382 * attempt to re-take the tasklist_lock and deadlock. This
383 * can never happen if we ensure that all queues the
384 * timer's signal might be queued on have been flushed
385 * first. The shared_pending queue, and our own pending
386 * queue are the only queues the timer could be on, since
387 * there are no other threads left in the group and timer
388 * signals are constrained to threads inside the group.
391 kmem_cache_free(signal_cachep, sig);
395 void exit_signal(struct task_struct *tsk)
397 write_lock_irq(&tasklist_lock);
399 write_unlock_irq(&tasklist_lock);
403 * Flush all handlers for a task.
407 flush_signal_handlers(struct task_struct *t, int force_default)
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
415 sigemptyset(&ka->sa.sa_mask);
420 EXPORT_SYMBOL_GPL(flush_signal_handlers);
422 /* Notify the system that a driver wants to block all signals for this
423 * process, and wants to be notified if any signals at all were to be
424 * sent/acted upon. If the notifier routine returns non-zero, then the
425 * signal will be acted upon after all. If the notifier routine returns 0,
426 * then then signal will be blocked. Only one block per process is
427 * allowed. priv is a pointer to private data that the notifier routine
428 * can use to determine if the signal should be blocked or not. */
431 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
435 spin_lock_irqsave(¤t->sighand->siglock, flags);
436 current->notifier_mask = mask;
437 current->notifier_data = priv;
438 current->notifier = notifier;
439 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
442 /* Notify the system that blocking has ended. */
445 unblock_all_signals(void)
449 spin_lock_irqsave(¤t->sighand->siglock, flags);
450 current->notifier = NULL;
451 current->notifier_data = NULL;
453 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
456 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
458 struct sigqueue *q, *first = 0;
459 int still_pending = 0;
461 if (unlikely(!sigismember(&list->signal, sig)))
465 * Collect the siginfo appropriate to this signal. Check if
466 * there is another siginfo for the same signal.
468 list_for_each_entry(q, &list->list, list) {
469 if (q->info.si_signo == sig) {
478 list_del_init(&first->list);
479 copy_siginfo(info, &first->info);
480 __sigqueue_free(first);
482 sigdelset(&list->signal, sig);
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
489 sigdelset(&list->signal, sig);
490 info->si_signo = sig;
499 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
504 sig = next_signal(pending, mask);
506 if (current->notifier) {
507 if (sigismember(current->notifier_mask, sig)) {
508 if (!(current->notifier)(current->notifier_data)) {
509 clear_thread_flag(TIF_SIGPENDING);
515 if (!collect_signal(sig, pending, info))
525 * Dequeue a signal and return the element to the caller, which is
526 * expected to free it.
528 * All callers have to hold the siglock.
530 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
532 int signr = __dequeue_signal(&tsk->pending, mask, info);
534 signr = __dequeue_signal(&tsk->signal->shared_pending,
537 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
538 info->si_sys_private){
539 do_schedule_next_timer(info);
545 * Tell a process that it has a new active signal..
547 * NOTE! we rely on the previous spin_lock to
548 * lock interrupts for us! We can only be called with
549 * "siglock" held, and the local interrupt must
550 * have been disabled when that got acquired!
552 * No need to set need_resched since signal event passing
553 * goes through ->blocked
555 void signal_wake_up(struct task_struct *t, int resume)
559 set_tsk_thread_flag(t, TIF_SIGPENDING);
562 * If resume is set, we want to wake it up in the TASK_STOPPED case.
563 * We don't check for TASK_STOPPED because there is a race with it
564 * executing another processor and just now entering stopped state.
565 * By calling wake_up_process any time resume is set, we ensure
566 * the process will wake up and handle its stop or death signal.
568 mask = TASK_INTERRUPTIBLE;
570 mask |= TASK_STOPPED;
571 if (!wake_up_state(t, mask))
576 * Remove signals in mask from the pending set and queue.
577 * Returns 1 if any signals were found.
579 * All callers must be holding the siglock.
581 static int rm_from_queue(unsigned long mask, struct sigpending *s)
583 struct sigqueue *q, *n;
585 if (!sigtestsetmask(&s->signal, mask))
588 sigdelsetmask(&s->signal, mask);
589 list_for_each_entry_safe(q, n, &s->list, list) {
590 if (q->info.si_signo < SIGRTMIN &&
591 (mask & sigmask(q->info.si_signo))) {
592 list_del_init(&q->list);
600 * Bad permissions for sending the signal
602 static int check_kill_permission(int sig, struct siginfo *info,
603 struct task_struct *t)
606 if (sig < 0 || sig > _NSIG)
609 if ((!info || ((unsigned long)info != 1 &&
610 (unsigned long)info != 2 && SI_FROMUSER(info)))
611 && ((sig != SIGCONT) ||
612 (current->signal->session != t->signal->session))
613 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
614 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
615 && !capable(CAP_KILL))
617 return security_task_kill(t, info, sig);
621 static void do_notify_parent_cldstop(struct task_struct *tsk,
622 struct task_struct *parent);
625 * Handle magic process-wide effects of stop/continue signals.
626 * Unlike the signal actions, these happen immediately at signal-generation
627 * time regardless of blocking, ignoring, or handling. This does the
628 * actual continuing for SIGCONT, but not the actual stopping for stop
629 * signals. The process stop is done as a signal action for SIG_DFL.
631 static void handle_stop_signal(int sig, struct task_struct *p)
633 struct task_struct *t;
635 if (sig_kernel_stop(sig)) {
637 * This is a stop signal. Remove SIGCONT from all queues.
639 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
642 rm_from_queue(sigmask(SIGCONT), &t->pending);
645 } else if (sig == SIGCONT) {
647 * Remove all stop signals from all queues,
648 * and wake all threads.
650 if (unlikely(p->signal->group_stop_count > 0)) {
652 * There was a group stop in progress. We'll
653 * pretend it finished before we got here. We are
654 * obliged to report it to the parent: if the
655 * SIGSTOP happened "after" this SIGCONT, then it
656 * would have cleared this pending SIGCONT. If it
657 * happened "before" this SIGCONT, then the parent
658 * got the SIGCHLD about the stop finishing before
659 * the continue happened. We do the notification
660 * now, and it's as if the stop had finished and
661 * the SIGCHLD was pending on entry to this kill.
663 p->signal->group_stop_count = 0;
664 if (p->ptrace & PT_PTRACED)
665 do_notify_parent_cldstop(p, p->parent);
667 do_notify_parent_cldstop(
669 p->group_leader->real_parent);
671 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
675 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
678 * If there is a handler for SIGCONT, we must make
679 * sure that no thread returns to user mode before
680 * we post the signal, in case it was the only
681 * thread eligible to run the signal handler--then
682 * it must not do anything between resuming and
683 * running the handler. With the TIF_SIGPENDING
684 * flag set, the thread will pause and acquire the
685 * siglock that we hold now and until we've queued
686 * the pending signal.
688 * Wake up the stopped thread _after_ setting
691 state = TASK_STOPPED;
692 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
693 set_tsk_thread_flag(t, TIF_SIGPENDING);
694 state |= TASK_INTERRUPTIBLE;
696 wake_up_state(t, state);
703 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
704 struct sigpending *signals)
706 struct sigqueue * q = NULL;
710 * fast-pathed signals for kernel-internal things like SIGSTOP
713 if ((unsigned long)info == 2)
716 /* Real-time signals must be queued if sent by sigqueue, or
717 some other real-time mechanism. It is implementation
718 defined whether kill() does so. We attempt to do so, on
719 the principle of least surprise, but since kill is not
720 allowed to fail with EAGAIN when low on memory we just
721 make sure at least one signal gets delivered and don't
722 pass on the info struct. */
724 if (atomic_read(&t->user->sigpending) <
725 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
726 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
730 q->user = get_uid(t->user);
731 atomic_inc(&q->user->sigpending);
732 list_add_tail(&q->list, &signals->list);
733 switch ((unsigned long) info) {
735 q->info.si_signo = sig;
736 q->info.si_errno = 0;
737 q->info.si_code = SI_USER;
738 q->info.si_pid = current->pid;
739 q->info.si_uid = current->uid;
742 q->info.si_signo = sig;
743 q->info.si_errno = 0;
744 q->info.si_code = SI_KERNEL;
749 copy_siginfo(&q->info, info);
753 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
754 && info->si_code != SI_USER)
756 * Queue overflow, abort. We may abort if the signal was rt
757 * and sent by user using something other than kill().
760 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
762 * Set up a return to indicate that we dropped
765 ret = info->si_sys_private;
769 sigaddset(&signals->signal, sig);
773 #define LEGACY_QUEUE(sigptr, sig) \
774 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
778 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
782 if (!irqs_disabled())
785 if (!spin_is_locked(&t->sighand->siglock))
789 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
791 * Set up a return to indicate that we dropped the signal.
793 ret = info->si_sys_private;
795 /* Short-circuit ignored signals. */
796 if (sig_ignored(t, sig))
799 /* Support queueing exactly one non-rt signal, so that we
800 can get more detailed information about the cause of
802 if (LEGACY_QUEUE(&t->pending, sig))
805 ret = send_signal(sig, info, t, &t->pending);
806 if (!ret && !sigismember(&t->blocked, sig))
807 signal_wake_up(t, sig == SIGKILL);
813 * Force a signal that the process can't ignore: if necessary
814 * we unblock the signal and change any SIG_IGN to SIG_DFL.
818 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
820 unsigned long int flags;
823 spin_lock_irqsave(&t->sighand->siglock, flags);
824 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
825 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
826 sigdelset(&t->blocked, sig);
827 recalc_sigpending_tsk(t);
829 ret = specific_send_sig_info(sig, info, t);
830 spin_unlock_irqrestore(&t->sighand->siglock, flags);
836 force_sig_specific(int sig, struct task_struct *t)
838 unsigned long int flags;
840 spin_lock_irqsave(&t->sighand->siglock, flags);
841 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
842 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
843 sigdelset(&t->blocked, sig);
844 recalc_sigpending_tsk(t);
845 specific_send_sig_info(sig, (void *)2, t);
846 spin_unlock_irqrestore(&t->sighand->siglock, flags);
850 * Test if P wants to take SIG. After we've checked all threads with this,
851 * it's equivalent to finding no threads not blocking SIG. Any threads not
852 * blocking SIG were ruled out because they are not running and already
853 * have pending signals. Such threads will dequeue from the shared queue
854 * as soon as they're available, so putting the signal on the shared queue
855 * will be equivalent to sending it to one such thread.
857 #define wants_signal(sig, p, mask) \
858 (!sigismember(&(p)->blocked, sig) \
859 && !((p)->state & mask) \
860 && !((p)->flags & PF_EXITING) \
861 && (task_curr(p) || !signal_pending(p)))
865 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
867 struct task_struct *t;
870 * Now find a thread we can wake up to take the signal off the queue.
872 * If the main thread wants the signal, it gets first crack.
873 * Probably the least surprising to the average bear.
875 if (wants_signal(sig, p, mask))
877 else if (thread_group_empty(p))
879 * There is just one thread and it does not need to be woken.
880 * It will dequeue unblocked signals before it runs again.
885 * Otherwise try to find a suitable thread.
887 t = p->signal->curr_target;
889 /* restart balancing at this thread */
890 t = p->signal->curr_target = p;
891 BUG_ON(t->tgid != p->tgid);
893 while (!wants_signal(sig, t, mask)) {
895 if (t == p->signal->curr_target)
897 * No thread needs to be woken.
898 * Any eligible threads will see
899 * the signal in the queue soon.
903 p->signal->curr_target = t;
907 * Found a killable thread. If the signal will be fatal,
908 * then start taking the whole group down immediately.
910 if (sig_fatal(p, sig) && !p->signal->group_exit &&
911 !sigismember(&t->real_blocked, sig) &&
912 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
914 * This signal will be fatal to the whole group.
916 if (!sig_kernel_coredump(sig)) {
918 * Start a group exit and wake everybody up.
919 * This way we don't have other threads
920 * running and doing things after a slower
921 * thread has the fatal signal pending.
923 p->signal->group_exit = 1;
924 p->signal->group_exit_code = sig;
925 p->signal->group_stop_count = 0;
928 sigaddset(&t->pending.signal, SIGKILL);
929 signal_wake_up(t, 1);
936 * There will be a core dump. We make all threads other
937 * than the chosen one go into a group stop so that nothing
938 * happens until it gets scheduled, takes the signal off
939 * the shared queue, and does the core dump. This is a
940 * little more complicated than strictly necessary, but it
941 * keeps the signal state that winds up in the core dump
942 * unchanged from the death state, e.g. which thread had
943 * the core-dump signal unblocked.
945 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
946 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
947 p->signal->group_stop_count = 0;
948 p->signal->group_exit_task = t;
951 p->signal->group_stop_count++;
952 signal_wake_up(t, 0);
955 wake_up_process(p->signal->group_exit_task);
960 * The signal is already in the shared-pending queue.
961 * Tell the chosen thread to wake up and dequeue it.
963 signal_wake_up(t, sig == SIGKILL);
968 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
974 if (!spin_is_locked(&p->sighand->siglock))
977 handle_stop_signal(sig, p);
979 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
981 * Set up a return to indicate that we dropped the signal.
983 ret = info->si_sys_private;
985 /* Short-circuit ignored signals. */
986 if (sig_ignored(p, sig))
989 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
990 /* This is a non-RT signal and we already have one queued. */
994 * Don't bother zombies and stopped tasks (but
995 * SIGKILL will punch through stopped state)
997 mask = TASK_DEAD | TASK_ZOMBIE;
999 mask |= TASK_STOPPED;
1002 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1003 * We always use the shared queue for process-wide signals,
1004 * to avoid several races.
1006 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1010 __group_complete_signal(sig, p, mask);
1015 * Nuke all other threads in the group.
1017 void zap_other_threads(struct task_struct *p)
1019 struct task_struct *t;
1021 p->signal->group_stop_count = 0;
1023 if (thread_group_empty(p))
1026 for (t = next_thread(p); t != p; t = next_thread(t)) {
1028 * Don't bother with already dead threads
1030 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1034 * We don't want to notify the parent, since we are
1035 * killed as part of a thread group due to another
1036 * thread doing an execve() or similar. So set the
1037 * exit signal to -1 to allow immediate reaping of
1038 * the process. But don't detach the thread group
1041 if (t != p->group_leader)
1042 t->exit_signal = -1;
1044 sigaddset(&t->pending.signal, SIGKILL);
1045 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1046 signal_wake_up(t, 1);
1051 * Must be called with the tasklist_lock held for reading!
1053 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1055 unsigned long flags;
1058 ret = check_kill_permission(sig, info, p);
1059 if (!ret && sig && p->sighand) {
1060 spin_lock_irqsave(&p->sighand->siglock, flags);
1061 ret = __group_send_sig_info(sig, info, p);
1062 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1069 * kill_pg_info() sends a signal to a process group: this is what the tty
1070 * control characters do (^C, ^Z etc)
1073 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1075 struct task_struct *p;
1076 struct list_head *l;
1078 int retval, success;
1085 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1086 int err = group_send_sig_info(sig, info, p);
1090 return success ? 0 : retval;
1094 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1098 read_lock(&tasklist_lock);
1099 retval = __kill_pg_info(sig, info, pgrp);
1100 read_unlock(&tasklist_lock);
1106 * kill_sl_info() sends a signal to the session leader: this is used
1107 * to send SIGHUP to the controlling process of a terminal when
1108 * the connection is lost.
1113 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1115 int err, retval = -EINVAL;
1117 struct list_head *l;
1118 struct task_struct *p;
1124 read_lock(&tasklist_lock);
1125 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1126 if (!p->signal->leader)
1128 err = group_send_sig_info(sig, info, p);
1132 read_unlock(&tasklist_lock);
1138 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1141 struct task_struct *p;
1143 read_lock(&tasklist_lock);
1144 p = find_task_by_pid(pid);
1147 error = group_send_sig_info(sig, info, p);
1148 read_unlock(&tasklist_lock);
1154 * kill_something_info() interprets pid in interesting ways just like kill(2).
1156 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1157 * is probably wrong. Should make it like BSD or SYSV.
1160 static int kill_something_info(int sig, struct siginfo *info, int pid)
1163 return kill_pg_info(sig, info, process_group(current));
1164 } else if (pid == -1) {
1165 int retval = 0, count = 0;
1166 struct task_struct * p;
1168 read_lock(&tasklist_lock);
1169 for_each_process(p) {
1170 if (p->pid > 1 && p->tgid != current->tgid) {
1171 int err = group_send_sig_info(sig, info, p);
1177 read_unlock(&tasklist_lock);
1178 return count ? retval : -ESRCH;
1179 } else if (pid < 0) {
1180 return kill_pg_info(sig, info, -pid);
1182 return kill_proc_info(sig, info, pid);
1187 * These are for backward compatibility with the rest of the kernel source.
1191 * These two are the most common entry points. They send a signal
1192 * just to the specific thread.
1195 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1198 unsigned long flags;
1201 * We need the tasklist lock even for the specific
1202 * thread case (when we don't need to follow the group
1203 * lists) in order to avoid races with "p->sighand"
1204 * going away or changing from under us.
1206 read_lock(&tasklist_lock);
1207 spin_lock_irqsave(&p->sighand->siglock, flags);
1208 ret = specific_send_sig_info(sig, info, p);
1209 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1210 read_unlock(&tasklist_lock);
1215 send_sig(int sig, struct task_struct *p, int priv)
1217 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1221 * This is the entry point for "process-wide" signals.
1222 * They will go to an appropriate thread in the thread group.
1225 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1228 read_lock(&tasklist_lock);
1229 ret = group_send_sig_info(sig, info, p);
1230 read_unlock(&tasklist_lock);
1235 force_sig(int sig, struct task_struct *p)
1237 force_sig_info(sig, (void*)1L, p);
1241 kill_pg(pid_t pgrp, int sig, int priv)
1243 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1247 kill_sl(pid_t sess, int sig, int priv)
1249 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1253 kill_proc(pid_t pid, int sig, int priv)
1255 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1259 * These functions support sending signals using preallocated sigqueue
1260 * structures. This is needed "because realtime applications cannot
1261 * afford to lose notifications of asynchronous events, like timer
1262 * expirations or I/O completions". In the case of Posix Timers
1263 * we allocate the sigqueue structure from the timer_create. If this
1264 * allocation fails we are able to report the failure to the application
1265 * with an EAGAIN error.
1268 struct sigqueue *sigqueue_alloc(void)
1272 if ((q = __sigqueue_alloc()))
1273 q->flags |= SIGQUEUE_PREALLOC;
1277 void sigqueue_free(struct sigqueue *q)
1279 unsigned long flags;
1280 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1282 * If the signal is still pending remove it from the
1285 if (unlikely(!list_empty(&q->list))) {
1286 read_lock(&tasklist_lock);
1287 spin_lock_irqsave(q->lock, flags);
1288 if (!list_empty(&q->list))
1289 list_del_init(&q->list);
1290 spin_unlock_irqrestore(q->lock, flags);
1291 read_unlock(&tasklist_lock);
1293 q->flags &= ~SIGQUEUE_PREALLOC;
1298 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1300 unsigned long flags;
1304 * We need the tasklist lock even for the specific
1305 * thread case (when we don't need to follow the group
1306 * lists) in order to avoid races with "p->sighand"
1307 * going away or changing from under us.
1309 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1310 read_lock(&tasklist_lock);
1311 spin_lock_irqsave(&p->sighand->siglock, flags);
1313 if (unlikely(!list_empty(&q->list))) {
1315 * If an SI_TIMER entry is already queue just increment
1316 * the overrun count.
1318 if (q->info.si_code != SI_TIMER)
1320 q->info.si_overrun++;
1323 /* Short-circuit ignored signals. */
1324 if (sig_ignored(p, sig)) {
1329 q->lock = &p->sighand->siglock;
1330 list_add_tail(&q->list, &p->pending.list);
1331 sigaddset(&p->pending.signal, sig);
1332 if (!sigismember(&p->blocked, sig))
1333 signal_wake_up(p, sig == SIGKILL);
1336 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1337 read_unlock(&tasklist_lock);
1342 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1344 unsigned long flags;
1348 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1349 read_lock(&tasklist_lock);
1350 spin_lock_irqsave(&p->sighand->siglock, flags);
1351 handle_stop_signal(sig, p);
1353 /* Short-circuit ignored signals. */
1354 if (sig_ignored(p, sig)) {
1359 if (unlikely(!list_empty(&q->list))) {
1361 * If an SI_TIMER entry is already queue just increment
1362 * the overrun count. Other uses should not try to
1363 * send the signal multiple times.
1365 if (q->info.si_code != SI_TIMER)
1367 q->info.si_overrun++;
1371 * Don't bother zombies and stopped tasks (but
1372 * SIGKILL will punch through stopped state)
1374 mask = TASK_DEAD | TASK_ZOMBIE;
1376 mask |= TASK_STOPPED;
1379 * Put this signal on the shared-pending queue.
1380 * We always use the shared queue for process-wide signals,
1381 * to avoid several races.
1383 q->lock = &p->sighand->siglock;
1384 list_add_tail(&q->list, &p->signal->shared_pending.list);
1385 sigaddset(&p->signal->shared_pending.signal, sig);
1387 __group_complete_signal(sig, p, mask);
1389 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1390 read_unlock(&tasklist_lock);
1395 * Joy. Or not. Pthread wants us to wake up every thread
1396 * in our parent group.
1398 static void __wake_up_parent(struct task_struct *p,
1399 struct task_struct *parent)
1401 struct task_struct *tsk = parent;
1404 * Fortunately this is not necessary for thread groups:
1406 if (p->tgid == tsk->tgid) {
1407 wake_up_interruptible_sync(&tsk->wait_chldexit);
1412 wake_up_interruptible_sync(&tsk->wait_chldexit);
1413 tsk = next_thread(tsk);
1414 if (tsk->signal != parent->signal)
1416 } while (tsk != parent);
1420 * Let a parent know about a status change of a child.
1423 void do_notify_parent(struct task_struct *tsk, int sig)
1425 struct siginfo info;
1426 unsigned long flags;
1428 struct sighand_struct *psig;
1433 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1434 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1436 info.si_signo = sig;
1438 info.si_pid = tsk->pid;
1439 info.si_uid = tsk->uid;
1441 /* FIXME: find out whether or not this is supposed to be c*time. */
1442 info.si_utime = tsk->utime;
1443 info.si_stime = tsk->stime;
1445 status = tsk->exit_code & 0x7f;
1446 why = SI_KERNEL; /* shouldn't happen */
1447 switch (tsk->state) {
1449 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1450 if (tsk->ptrace & PT_PTRACED)
1457 if (tsk->exit_code & 0x80)
1459 else if (tsk->exit_code & 0x7f)
1463 status = tsk->exit_code >> 8;
1468 info.si_status = status;
1470 psig = tsk->parent->sighand;
1471 spin_lock_irqsave(&psig->siglock, flags);
1472 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1473 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1474 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1476 * We are exiting and our parent doesn't care. POSIX.1
1477 * defines special semantics for setting SIGCHLD to SIG_IGN
1478 * or setting the SA_NOCLDWAIT flag: we should be reaped
1479 * automatically and not left for our parent's wait4 call.
1480 * Rather than having the parent do it as a magic kind of
1481 * signal handler, we just set this to tell do_exit that we
1482 * can be cleaned up without becoming a zombie. Note that
1483 * we still call __wake_up_parent in this case, because a
1484 * blocked sys_wait4 might now return -ECHILD.
1486 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1487 * is implementation-defined: we do (if you don't want
1488 * it, just use SIG_IGN instead).
1490 tsk->exit_signal = -1;
1491 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1494 if (sig > 0 && sig <= _NSIG)
1495 __group_send_sig_info(sig, &info, tsk->parent);
1496 __wake_up_parent(tsk, tsk->parent);
1497 spin_unlock_irqrestore(&psig->siglock, flags);
1502 * We need the tasklist lock because it's the only
1503 * thing that protects out "parent" pointer.
1505 * exit.c calls "do_notify_parent()" directly, because
1506 * it already has the tasklist lock.
1509 notify_parent(struct task_struct *tsk, int sig)
1512 read_lock(&tasklist_lock);
1513 do_notify_parent(tsk, sig);
1514 read_unlock(&tasklist_lock);
1519 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1521 struct siginfo info;
1522 unsigned long flags;
1523 struct sighand_struct *sighand;
1525 info.si_signo = SIGCHLD;
1527 info.si_pid = tsk->pid;
1528 info.si_uid = tsk->uid;
1530 /* FIXME: find out whether or not this is supposed to be c*time. */
1531 info.si_utime = tsk->utime;
1532 info.si_stime = tsk->stime;
1534 info.si_status = tsk->exit_code & 0x7f;
1535 info.si_code = CLD_STOPPED;
1537 sighand = parent->sighand;
1538 spin_lock_irqsave(&sighand->siglock, flags);
1539 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1540 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1541 __group_send_sig_info(SIGCHLD, &info, parent);
1543 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1545 __wake_up_parent(tsk, parent);
1546 spin_unlock_irqrestore(&sighand->siglock, flags);
1549 int print_fatal_signals = 0;
1551 static void print_fatal_signal(struct pt_regs *regs, int signr)
1555 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1556 current->comm, current->pid, signr);
1559 printk("code at %08lx: ", regs->eip);
1560 for (i = 0; i < 16; i++) {
1561 __get_user(insn, (unsigned char *)(regs->eip + i));
1562 printk("%02x ", insn);
1569 static int __init setup_print_fatal_signals(char *str)
1571 get_option (&str, &print_fatal_signals);
1576 __setup("print-fatal-signals=", setup_print_fatal_signals);
1578 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1581 finish_stop(int stop_count)
1584 * If there are no other threads in the group, or if there is
1585 * a group stop in progress and we are the last to stop,
1586 * report to the parent. When ptraced, every thread reports itself.
1588 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1589 read_lock(&tasklist_lock);
1590 do_notify_parent_cldstop(current, current->parent);
1591 read_unlock(&tasklist_lock);
1593 else if (stop_count == 0) {
1594 read_lock(&tasklist_lock);
1595 do_notify_parent_cldstop(current->group_leader,
1596 current->group_leader->real_parent);
1597 read_unlock(&tasklist_lock);
1602 * Now we don't run again until continued.
1604 current->exit_code = 0;
1608 * This performs the stopping for SIGSTOP and other stop signals.
1609 * We have to stop all threads in the thread group.
1612 do_signal_stop(int signr)
1614 struct signal_struct *sig = current->signal;
1615 struct sighand_struct *sighand = current->sighand;
1616 int stop_count = -1;
1618 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1620 if (sig->group_stop_count > 0) {
1622 * There is a group stop in progress. We don't need to
1623 * start another one.
1625 signr = sig->group_exit_code;
1626 stop_count = --sig->group_stop_count;
1627 current->exit_code = signr;
1628 set_current_state(TASK_STOPPED);
1629 spin_unlock_irq(&sighand->siglock);
1631 else if (thread_group_empty(current)) {
1633 * Lock must be held through transition to stopped state.
1635 current->exit_code = signr;
1636 set_current_state(TASK_STOPPED);
1637 spin_unlock_irq(&sighand->siglock);
1641 * There is no group stop already in progress.
1642 * We must initiate one now, but that requires
1643 * dropping siglock to get both the tasklist lock
1644 * and siglock again in the proper order. Note that
1645 * this allows an intervening SIGCONT to be posted.
1646 * We need to check for that and bail out if necessary.
1648 struct task_struct *t;
1650 spin_unlock_irq(&sighand->siglock);
1652 /* signals can be posted during this window */
1654 read_lock(&tasklist_lock);
1655 spin_lock_irq(&sighand->siglock);
1657 if (unlikely(sig->group_exit)) {
1659 * There is a group exit in progress now.
1660 * We'll just ignore the stop and process the
1661 * associated fatal signal.
1663 spin_unlock_irq(&sighand->siglock);
1664 read_unlock(&tasklist_lock);
1668 if (unlikely(sig_avoid_stop_race())) {
1670 * Either a SIGCONT or a SIGKILL signal was
1671 * posted in the siglock-not-held window.
1673 spin_unlock_irq(&sighand->siglock);
1674 read_unlock(&tasklist_lock);
1678 if (sig->group_stop_count == 0) {
1679 sig->group_exit_code = signr;
1681 for (t = next_thread(current); t != current;
1684 * Setting state to TASK_STOPPED for a group
1685 * stop is always done with the siglock held,
1686 * so this check has no races.
1688 if (t->state < TASK_STOPPED) {
1690 signal_wake_up(t, 0);
1692 sig->group_stop_count = stop_count;
1695 /* A race with another thread while unlocked. */
1696 signr = sig->group_exit_code;
1697 stop_count = --sig->group_stop_count;
1700 current->exit_code = signr;
1701 set_current_state(TASK_STOPPED);
1703 spin_unlock_irq(&sighand->siglock);
1704 read_unlock(&tasklist_lock);
1707 finish_stop(stop_count);
1711 * Do appropriate magic when group_stop_count > 0.
1712 * We return nonzero if we stopped, after releasing the siglock.
1713 * We return zero if we still hold the siglock and should look
1714 * for another signal without checking group_stop_count again.
1716 static inline int handle_group_stop(void)
1720 if (current->signal->group_exit_task == current) {
1722 * Group stop is so we can do a core dump,
1723 * We are the initiating thread, so get on with it.
1725 current->signal->group_exit_task = NULL;
1729 if (current->signal->group_exit)
1731 * Group stop is so another thread can do a core dump,
1732 * or else we are racing against a death signal.
1733 * Just punt the stop so we can get the next signal.
1738 * There is a group stop in progress. We stop
1739 * without any associated signal being in our queue.
1741 stop_count = --current->signal->group_stop_count;
1742 current->exit_code = current->signal->group_exit_code;
1743 set_current_state(TASK_STOPPED);
1744 spin_unlock_irq(¤t->sighand->siglock);
1745 finish_stop(stop_count);
1749 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1751 sigset_t *mask = ¤t->blocked;
1755 spin_lock_irq(¤t->sighand->siglock);
1757 struct k_sigaction *ka;
1759 if (unlikely(current->signal->group_stop_count > 0) &&
1760 handle_group_stop())
1763 signr = dequeue_signal(current, mask, info);
1766 break; /* will return 0 */
1768 if ((signr == SIGSEGV) && print_fatal_signals) {
1769 spin_unlock_irq(¤t->sighand->siglock);
1770 print_fatal_signal(regs, signr);
1771 spin_lock_irq(¤t->sighand->siglock);
1773 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1774 ptrace_signal_deliver(regs, cookie);
1777 * If there is a group stop in progress,
1778 * we must participate in the bookkeeping.
1780 if (current->signal->group_stop_count > 0)
1781 --current->signal->group_stop_count;
1783 /* Let the debugger run. */
1784 current->exit_code = signr;
1785 current->last_siginfo = info;
1786 set_current_state(TASK_STOPPED);
1787 spin_unlock_irq(¤t->sighand->siglock);
1788 notify_parent(current, SIGCHLD);
1791 current->last_siginfo = NULL;
1793 /* We're back. Did the debugger cancel the sig? */
1794 spin_lock_irq(¤t->sighand->siglock);
1795 signr = current->exit_code;
1799 current->exit_code = 0;
1801 /* Update the siginfo structure if the signal has
1802 changed. If the debugger wanted something
1803 specific in the siginfo structure then it should
1804 have updated *info via PTRACE_SETSIGINFO. */
1805 if (signr != info->si_signo) {
1806 info->si_signo = signr;
1808 info->si_code = SI_USER;
1809 info->si_pid = current->parent->pid;
1810 info->si_uid = current->parent->uid;
1813 /* If the (new) signal is now blocked, requeue it. */
1814 if (sigismember(¤t->blocked, signr)) {
1815 specific_send_sig_info(signr, info, current);
1820 ka = ¤t->sighand->action[signr-1];
1821 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1823 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1824 break; /* will return non-zero "signr" value */
1827 * Now we are doing the default action for this signal.
1829 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1832 /* Init gets no signals it doesn't want. */
1833 if (current->pid == 1)
1836 if (sig_kernel_stop(signr)) {
1838 * The default action is to stop all threads in
1839 * the thread group. The job control signals
1840 * do nothing in an orphaned pgrp, but SIGSTOP
1841 * always works. Note that siglock needs to be
1842 * dropped during the call to is_orphaned_pgrp()
1843 * because of lock ordering with tasklist_lock.
1844 * This allows an intervening SIGCONT to be posted.
1845 * We need to check for that and bail out if necessary.
1847 if (signr == SIGSTOP) {
1848 do_signal_stop(signr); /* releases siglock */
1851 spin_unlock_irq(¤t->sighand->siglock);
1853 /* signals can be posted during this window */
1855 if (is_orphaned_pgrp(process_group(current)))
1858 spin_lock_irq(¤t->sighand->siglock);
1859 if (unlikely(sig_avoid_stop_race())) {
1861 * Either a SIGCONT or a SIGKILL signal was
1862 * posted in the siglock-not-held window.
1867 do_signal_stop(signr); /* releases siglock */
1871 spin_unlock_irq(¤t->sighand->siglock);
1874 * Anything else is fatal, maybe with a core dump.
1876 current->flags |= PF_SIGNALED;
1877 if (print_fatal_signals)
1878 print_fatal_signal(regs, signr);
1879 if (sig_kernel_coredump(signr) &&
1880 do_coredump((long)signr, signr, regs)) {
1882 * That killed all other threads in the group and
1883 * synchronized with their demise, so there can't
1884 * be any more left to kill now. The group_exit
1885 * flags are set by do_coredump. Note that
1886 * thread_group_empty won't always be true yet,
1887 * because those threads were blocked in __exit_mm
1888 * and we just let them go to finish dying.
1890 const int code = signr | 0x80;
1891 BUG_ON(!current->signal->group_exit);
1892 BUG_ON(current->signal->group_exit_code != code);
1898 * Death signals, no core dump.
1900 do_group_exit(signr);
1903 spin_unlock_irq(¤t->sighand->siglock);
1909 EXPORT_SYMBOL(recalc_sigpending);
1910 EXPORT_SYMBOL_GPL(dequeue_signal);
1911 EXPORT_SYMBOL(flush_signals);
1912 EXPORT_SYMBOL(force_sig);
1913 EXPORT_SYMBOL(force_sig_info);
1914 EXPORT_SYMBOL(kill_pg);
1915 EXPORT_SYMBOL(kill_pg_info);
1916 EXPORT_SYMBOL(kill_proc);
1917 EXPORT_SYMBOL(kill_proc_info);
1918 EXPORT_SYMBOL(kill_sl);
1919 EXPORT_SYMBOL(kill_sl_info);
1920 EXPORT_SYMBOL(notify_parent);
1921 EXPORT_SYMBOL(send_sig);
1922 EXPORT_SYMBOL(send_sig_info);
1923 EXPORT_SYMBOL(send_group_sig_info);
1924 EXPORT_SYMBOL(sigqueue_alloc);
1925 EXPORT_SYMBOL(sigqueue_free);
1926 EXPORT_SYMBOL(send_sigqueue);
1927 EXPORT_SYMBOL(send_group_sigqueue);
1928 EXPORT_SYMBOL(sigprocmask);
1929 EXPORT_SYMBOL(block_all_signals);
1930 EXPORT_SYMBOL(unblock_all_signals);
1934 * System call entry points.
1937 asmlinkage long sys_restart_syscall(void)
1939 struct restart_block *restart = ¤t_thread_info()->restart_block;
1940 return restart->fn(restart);
1943 long do_no_restart_syscall(struct restart_block *param)
1949 * We don't need to get the kernel lock - this is all local to this
1950 * particular thread.. (and that's good, because this is _heavily_
1951 * used by various programs)
1955 * This is also useful for kernel threads that want to temporarily
1956 * (or permanently) block certain signals.
1958 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1959 * interface happily blocks "unblockable" signals like SIGKILL
1962 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1967 spin_lock_irq(¤t->sighand->siglock);
1968 old_block = current->blocked;
1972 sigorsets(¤t->blocked, ¤t->blocked, set);
1975 signandsets(¤t->blocked, ¤t->blocked, set);
1978 current->blocked = *set;
1983 recalc_sigpending();
1984 spin_unlock_irq(¤t->sighand->siglock);
1986 *oldset = old_block;
1991 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1993 int error = -EINVAL;
1994 sigset_t old_set, new_set;
1996 /* XXX: Don't preclude handling different sized sigset_t's. */
1997 if (sigsetsize != sizeof(sigset_t))
2002 if (copy_from_user(&new_set, set, sizeof(*set)))
2004 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2006 error = sigprocmask(how, &new_set, &old_set);
2012 spin_lock_irq(¤t->sighand->siglock);
2013 old_set = current->blocked;
2014 spin_unlock_irq(¤t->sighand->siglock);
2018 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2026 long do_sigpending(void __user *set, unsigned long sigsetsize)
2028 long error = -EINVAL;
2031 if (sigsetsize > sizeof(sigset_t))
2034 spin_lock_irq(¤t->sighand->siglock);
2035 sigorsets(&pending, ¤t->pending.signal,
2036 ¤t->signal->shared_pending.signal);
2037 spin_unlock_irq(¤t->sighand->siglock);
2039 /* Outside the lock because only this thread touches it. */
2040 sigandsets(&pending, ¤t->blocked, &pending);
2043 if (!copy_to_user(set, &pending, sigsetsize))
2051 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2053 return do_sigpending(set, sigsetsize);
2056 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2058 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2062 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2064 if (from->si_code < 0)
2065 return __copy_to_user(to, from, sizeof(siginfo_t))
2068 * If you change siginfo_t structure, please be sure
2069 * this code is fixed accordingly.
2070 * It should never copy any pad contained in the structure
2071 * to avoid security leaks, but must copy the generic
2072 * 3 ints plus the relevant union member.
2074 err = __put_user(from->si_signo, &to->si_signo);
2075 err |= __put_user(from->si_errno, &to->si_errno);
2076 err |= __put_user((short)from->si_code, &to->si_code);
2077 switch (from->si_code & __SI_MASK) {
2079 err |= __put_user(from->si_pid, &to->si_pid);
2080 err |= __put_user(from->si_uid, &to->si_uid);
2083 err |= __put_user(from->si_tid, &to->si_tid);
2084 err |= __put_user(from->si_overrun, &to->si_overrun);
2085 err |= __put_user(from->si_ptr, &to->si_ptr);
2088 err |= __put_user(from->si_band, &to->si_band);
2089 err |= __put_user(from->si_fd, &to->si_fd);
2092 err |= __put_user(from->si_addr, &to->si_addr);
2093 #ifdef __ARCH_SI_TRAPNO
2094 err |= __put_user(from->si_trapno, &to->si_trapno);
2098 err |= __put_user(from->si_pid, &to->si_pid);
2099 err |= __put_user(from->si_uid, &to->si_uid);
2100 err |= __put_user(from->si_status, &to->si_status);
2101 err |= __put_user(from->si_utime, &to->si_utime);
2102 err |= __put_user(from->si_stime, &to->si_stime);
2104 case __SI_RT: /* This is not generated by the kernel as of now. */
2105 case __SI_MESGQ: /* But this is */
2106 err |= __put_user(from->si_pid, &to->si_pid);
2107 err |= __put_user(from->si_uid, &to->si_uid);
2108 err |= __put_user(from->si_ptr, &to->si_ptr);
2110 default: /* this is just in case for now ... */
2111 err |= __put_user(from->si_pid, &to->si_pid);
2112 err |= __put_user(from->si_uid, &to->si_uid);
2121 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2122 siginfo_t __user *uinfo,
2123 const struct timespec __user *uts,
2132 /* XXX: Don't preclude handling different sized sigset_t's. */
2133 if (sigsetsize != sizeof(sigset_t))
2136 if (copy_from_user(&these, uthese, sizeof(these)))
2140 * Invert the set of allowed signals to get those we
2143 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2147 if (copy_from_user(&ts, uts, sizeof(ts)))
2149 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2154 spin_lock_irq(¤t->sighand->siglock);
2155 sig = dequeue_signal(current, &these, &info);
2157 timeout = MAX_SCHEDULE_TIMEOUT;
2159 timeout = (timespec_to_jiffies(&ts)
2160 + (ts.tv_sec || ts.tv_nsec));
2163 /* None ready -- temporarily unblock those we're
2164 * interested while we are sleeping in so that we'll
2165 * be awakened when they arrive. */
2166 current->real_blocked = current->blocked;
2167 sigandsets(¤t->blocked, ¤t->blocked, &these);
2168 recalc_sigpending();
2169 spin_unlock_irq(¤t->sighand->siglock);
2171 current->state = TASK_INTERRUPTIBLE;
2172 timeout = schedule_timeout(timeout);
2174 spin_lock_irq(¤t->sighand->siglock);
2175 sig = dequeue_signal(current, &these, &info);
2176 current->blocked = current->real_blocked;
2177 siginitset(¤t->real_blocked, 0);
2178 recalc_sigpending();
2181 spin_unlock_irq(¤t->sighand->siglock);
2186 if (copy_siginfo_to_user(uinfo, &info))
2199 sys_kill(int pid, int sig)
2201 struct siginfo info;
2203 info.si_signo = sig;
2205 info.si_code = SI_USER;
2206 info.si_pid = current->tgid;
2207 info.si_uid = current->uid;
2209 return kill_something_info(sig, &info, pid);
2213 * sys_tgkill - send signal to one specific thread
2214 * @tgid: the thread group ID of the thread
2215 * @pid: the PID of the thread
2216 * @sig: signal to be sent
2218 * This syscall also checks the tgid and returns -ESRCH even if the PID
2219 * exists but it's not belonging to the target process anymore. This
2220 * method solves the problem of threads exiting and PIDs getting reused.
2222 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2224 struct siginfo info;
2226 struct task_struct *p;
2228 /* This is only valid for single tasks */
2229 if (pid <= 0 || tgid <= 0)
2232 info.si_signo = sig;
2234 info.si_code = SI_TKILL;
2235 info.si_pid = current->tgid;
2236 info.si_uid = current->uid;
2238 read_lock(&tasklist_lock);
2239 p = find_task_by_pid(pid);
2241 if (p && (p->tgid == tgid)) {
2242 error = check_kill_permission(sig, &info, p);
2244 * The null signal is a permissions and process existence
2245 * probe. No signal is actually delivered.
2247 if (!error && sig && p->sighand) {
2248 spin_lock_irq(&p->sighand->siglock);
2249 handle_stop_signal(sig, p);
2250 error = specific_send_sig_info(sig, &info, p);
2251 spin_unlock_irq(&p->sighand->siglock);
2254 read_unlock(&tasklist_lock);
2259 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2262 sys_tkill(int pid, int sig)
2264 struct siginfo info;
2266 struct task_struct *p;
2268 /* This is only valid for single tasks */
2272 info.si_signo = sig;
2274 info.si_code = SI_TKILL;
2275 info.si_pid = current->tgid;
2276 info.si_uid = current->uid;
2278 read_lock(&tasklist_lock);
2279 p = find_task_by_pid(pid);
2282 error = check_kill_permission(sig, &info, p);
2284 * The null signal is a permissions and process existence
2285 * probe. No signal is actually delivered.
2287 if (!error && sig && p->sighand) {
2288 spin_lock_irq(&p->sighand->siglock);
2289 handle_stop_signal(sig, p);
2290 error = specific_send_sig_info(sig, &info, p);
2291 spin_unlock_irq(&p->sighand->siglock);
2294 read_unlock(&tasklist_lock);
2299 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2303 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2306 /* Not even root can pretend to send signals from the kernel.
2307 Nor can they impersonate a kill(), which adds source info. */
2308 if (info.si_code >= 0)
2310 info.si_signo = sig;
2312 /* POSIX.1b doesn't mention process groups. */
2313 return kill_proc_info(sig, &info, pid);
2317 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2319 struct k_sigaction *k;
2321 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2324 k = ¤t->sighand->action[sig-1];
2326 spin_lock_irq(¤t->sighand->siglock);
2327 if (signal_pending(current)) {
2329 * If there might be a fatal signal pending on multiple
2330 * threads, make sure we take it before changing the action.
2332 spin_unlock_irq(¤t->sighand->siglock);
2333 return -ERESTARTNOINTR;
2342 * "Setting a signal action to SIG_IGN for a signal that is
2343 * pending shall cause the pending signal to be discarded,
2344 * whether or not it is blocked."
2346 * "Setting a signal action to SIG_DFL for a signal that is
2347 * pending and whose default action is to ignore the signal
2348 * (for example, SIGCHLD), shall cause the pending signal to
2349 * be discarded, whether or not it is blocked"
2351 if (act->sa.sa_handler == SIG_IGN ||
2352 (act->sa.sa_handler == SIG_DFL &&
2353 sig_kernel_ignore(sig))) {
2355 * This is a fairly rare case, so we only take the
2356 * tasklist_lock once we're sure we'll need it.
2357 * Now we must do this little unlock and relock
2358 * dance to maintain the lock hierarchy.
2360 struct task_struct *t = current;
2361 spin_unlock_irq(&t->sighand->siglock);
2362 read_lock(&tasklist_lock);
2363 spin_lock_irq(&t->sighand->siglock);
2365 sigdelsetmask(&k->sa.sa_mask,
2366 sigmask(SIGKILL) | sigmask(SIGSTOP));
2367 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2369 rm_from_queue(sigmask(sig), &t->pending);
2370 recalc_sigpending_tsk(t);
2372 } while (t != current);
2373 spin_unlock_irq(¤t->sighand->siglock);
2374 read_unlock(&tasklist_lock);
2379 sigdelsetmask(&k->sa.sa_mask,
2380 sigmask(SIGKILL) | sigmask(SIGSTOP));
2383 spin_unlock_irq(¤t->sighand->siglock);
2388 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2394 oss.ss_sp = (void *) current->sas_ss_sp;
2395 oss.ss_size = current->sas_ss_size;
2396 oss.ss_flags = sas_ss_flags(sp);
2405 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2406 || __get_user(ss_sp, &uss->ss_sp)
2407 || __get_user(ss_flags, &uss->ss_flags)
2408 || __get_user(ss_size, &uss->ss_size))
2412 if (on_sig_stack(sp))
2418 * Note - this code used to test ss_flags incorrectly
2419 * old code may have been written using ss_flags==0
2420 * to mean ss_flags==SS_ONSTACK (as this was the only
2421 * way that worked) - this fix preserves that older
2424 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2427 if (ss_flags == SS_DISABLE) {
2432 if (ss_size < MINSIGSTKSZ)
2436 current->sas_ss_sp = (unsigned long) ss_sp;
2437 current->sas_ss_size = ss_size;
2442 if (copy_to_user(uoss, &oss, sizeof(oss)))
2451 #ifdef __ARCH_WANT_SYS_SIGPENDING
2454 sys_sigpending(old_sigset_t __user *set)
2456 return do_sigpending(set, sizeof(*set));
2461 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2462 /* Some platforms have their own version with special arguments others
2463 support only sys_rt_sigprocmask. */
2466 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2469 old_sigset_t old_set, new_set;
2473 if (copy_from_user(&new_set, set, sizeof(*set)))
2475 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2477 spin_lock_irq(¤t->sighand->siglock);
2478 old_set = current->blocked.sig[0];
2486 sigaddsetmask(¤t->blocked, new_set);
2489 sigdelsetmask(¤t->blocked, new_set);
2492 current->blocked.sig[0] = new_set;
2496 recalc_sigpending();
2497 spin_unlock_irq(¤t->sighand->siglock);
2503 old_set = current->blocked.sig[0];
2506 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2513 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2515 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2517 sys_rt_sigaction(int sig,
2518 const struct sigaction __user *act,
2519 struct sigaction __user *oact,
2522 struct k_sigaction new_sa, old_sa;
2525 /* XXX: Don't preclude handling different sized sigset_t's. */
2526 if (sigsetsize != sizeof(sigset_t))
2530 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2534 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2537 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2543 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2545 #ifdef __ARCH_WANT_SYS_SGETMASK
2548 * For backwards compatibility. Functionality superseded by sigprocmask.
2554 return current->blocked.sig[0];
2558 sys_ssetmask(int newmask)
2562 spin_lock_irq(¤t->sighand->siglock);
2563 old = current->blocked.sig[0];
2565 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2567 recalc_sigpending();
2568 spin_unlock_irq(¤t->sighand->siglock);
2572 #endif /* __ARCH_WANT_SGETMASK */
2574 #ifdef __ARCH_WANT_SYS_SIGNAL
2576 * For backwards compatibility. Functionality superseded by sigaction.
2578 asmlinkage unsigned long
2579 sys_signal(int sig, __sighandler_t handler)
2581 struct k_sigaction new_sa, old_sa;
2584 new_sa.sa.sa_handler = handler;
2585 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2587 ret = do_sigaction(sig, &new_sa, &old_sa);
2589 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2591 #endif /* __ARCH_WANT_SYS_SIGNAL */
2593 #ifdef __ARCH_WANT_SYS_PAUSE
2598 current->state = TASK_INTERRUPTIBLE;
2600 return -ERESTARTNOHAND;
2605 void __init signals_init(void)
2608 kmem_cache_create("sigqueue",
2609 sizeof(struct sigqueue),
2610 __alignof__(struct sigqueue),
2611 SLAB_PANIC, NULL, NULL);