2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
35 atomic_t nr_queued_signals;
36 int max_queued_signals = 1024;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 #define sig_avoid_stop_race() \
157 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
158 sigtestsetmask(¤t->signal->shared_pending.signal, \
159 M(SIGCONT) | M(SIGKILL)))
161 static int sig_ignored(struct task_struct *t, int sig)
166 * Tracers always want to know about signals..
168 if (t->ptrace & PT_PTRACED)
172 * Blocked signals are never ignored, since the
173 * signal handler may change by the time it is
176 if (sigismember(&t->blocked, sig))
179 /* Is it explicitly or implicitly ignored? */
180 handler = t->sighand->action[sig-1].sa.sa_handler;
181 return handler == SIG_IGN ||
182 (handler == SIG_DFL && sig_kernel_ignore(sig));
186 * Re-calculate pending state from the set of locally pending
187 * signals, globally pending signals, and blocked signals.
189 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
194 switch (_NSIG_WORDS) {
196 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
197 ready |= signal->sig[i] &~ blocked->sig[i];
200 case 4: ready = signal->sig[3] &~ blocked->sig[3];
201 ready |= signal->sig[2] &~ blocked->sig[2];
202 ready |= signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 2: ready = signal->sig[1] &~ blocked->sig[1];
207 ready |= signal->sig[0] &~ blocked->sig[0];
210 case 1: ready = signal->sig[0] &~ blocked->sig[0];
215 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
217 fastcall void recalc_sigpending_tsk(struct task_struct *t)
219 if (t->signal->group_stop_count > 0 ||
220 PENDING(&t->pending, &t->blocked) ||
221 PENDING(&t->signal->shared_pending, &t->blocked))
222 set_tsk_thread_flag(t, TIF_SIGPENDING);
224 clear_tsk_thread_flag(t, TIF_SIGPENDING);
227 void recalc_sigpending(void)
229 recalc_sigpending_tsk(current);
232 /* Given the mask, find the first available signal that should be serviced. */
235 next_signal(struct sigpending *pending, sigset_t *mask)
237 unsigned long i, *s, *m, x;
240 s = pending->signal.sig;
242 switch (_NSIG_WORDS) {
244 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
245 if ((x = *s &~ *m) != 0) {
246 sig = ffz(~x) + i*_NSIG_BPW + 1;
251 case 2: if ((x = s[0] &~ m[0]) != 0)
253 else if ((x = s[1] &~ m[1]) != 0)
260 case 1: if ((x = *s &~ *m) != 0)
268 struct sigqueue *__sigqueue_alloc(void)
270 struct sigqueue *q = 0;
272 if (atomic_read(&nr_queued_signals) < max_queued_signals)
273 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
275 atomic_inc(&nr_queued_signals);
276 INIT_LIST_HEAD(&q->list);
283 static inline void __sigqueue_free(struct sigqueue *q)
285 if (q->flags & SIGQUEUE_PREALLOC)
287 kmem_cache_free(sigqueue_cachep, q);
288 atomic_dec(&nr_queued_signals);
291 static void flush_sigqueue(struct sigpending *queue)
295 sigemptyset(&queue->signal);
296 while (!list_empty(&queue->list)) {
297 q = list_entry(queue->list.next, struct sigqueue , list);
298 list_del_init(&q->list);
304 * Flush all pending signals for a task.
308 flush_signals(struct task_struct *t)
312 spin_lock_irqsave(&t->sighand->siglock, flags);
313 clear_tsk_thread_flag(t,TIF_SIGPENDING);
314 flush_sigqueue(&t->pending);
315 flush_sigqueue(&t->signal->shared_pending);
316 spin_unlock_irqrestore(&t->sighand->siglock, flags);
320 * This function expects the tasklist_lock write-locked.
322 void __exit_sighand(struct task_struct *tsk)
324 struct sighand_struct * sighand = tsk->sighand;
326 /* Ok, we're done with the signal handlers */
328 if (atomic_dec_and_test(&sighand->count))
329 kmem_cache_free(sighand_cachep, sighand);
332 void exit_sighand(struct task_struct *tsk)
334 write_lock_irq(&tasklist_lock);
336 write_unlock_irq(&tasklist_lock);
340 * This function expects the tasklist_lock write-locked.
342 void __exit_signal(struct task_struct *tsk)
344 struct signal_struct * sig = tsk->signal;
345 struct sighand_struct * sighand = tsk->sighand;
349 if (!atomic_read(&sig->count))
351 spin_lock(&sighand->siglock);
352 if (atomic_dec_and_test(&sig->count)) {
353 if (tsk == sig->curr_target)
354 sig->curr_target = next_thread(tsk);
356 spin_unlock(&sighand->siglock);
357 flush_sigqueue(&sig->shared_pending);
360 * If there is any task waiting for the group exit
363 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
364 wake_up_process(sig->group_exit_task);
365 sig->group_exit_task = NULL;
367 if (tsk == sig->curr_target)
368 sig->curr_target = next_thread(tsk);
370 spin_unlock(&sighand->siglock);
371 sig = NULL; /* Marker for below. */
373 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
374 flush_sigqueue(&tsk->pending);
377 * We are cleaning up the signal_struct here. We delayed
378 * calling exit_itimers until after flush_sigqueue, just in
379 * case our thread-local pending queue contained a queued
380 * timer signal that would have been cleared in
381 * exit_itimers. When that called sigqueue_free, it would
382 * attempt to re-take the tasklist_lock and deadlock. This
383 * can never happen if we ensure that all queues the
384 * timer's signal might be queued on have been flushed
385 * first. The shared_pending queue, and our own pending
386 * queue are the only queues the timer could be on, since
387 * there are no other threads left in the group and timer
388 * signals are constrained to threads inside the group.
391 kmem_cache_free(signal_cachep, sig);
395 void exit_signal(struct task_struct *tsk)
397 write_lock_irq(&tasklist_lock);
399 write_unlock_irq(&tasklist_lock);
403 * Flush all handlers for a task.
407 flush_signal_handlers(struct task_struct *t, int force_default)
410 struct k_sigaction *ka = &t->sighand->action[0];
411 for (i = _NSIG ; i != 0 ; i--) {
412 if (force_default || ka->sa.sa_handler != SIG_IGN)
413 ka->sa.sa_handler = SIG_DFL;
415 sigemptyset(&ka->sa.sa_mask);
421 /* Notify the system that a driver wants to block all signals for this
422 * process, and wants to be notified if any signals at all were to be
423 * sent/acted upon. If the notifier routine returns non-zero, then the
424 * signal will be acted upon after all. If the notifier routine returns 0,
425 * then then signal will be blocked. Only one block per process is
426 * allowed. priv is a pointer to private data that the notifier routine
427 * can use to determine if the signal should be blocked or not. */
430 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
434 spin_lock_irqsave(¤t->sighand->siglock, flags);
435 current->notifier_mask = mask;
436 current->notifier_data = priv;
437 current->notifier = notifier;
438 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
441 /* Notify the system that blocking has ended. */
444 unblock_all_signals(void)
448 spin_lock_irqsave(¤t->sighand->siglock, flags);
449 current->notifier = NULL;
450 current->notifier_data = NULL;
452 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
455 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
457 struct sigqueue *q, *first = 0;
458 int still_pending = 0;
460 if (unlikely(!sigismember(&list->signal, sig)))
464 * Collect the siginfo appropriate to this signal. Check if
465 * there is another siginfo for the same signal.
467 list_for_each_entry(q, &list->list, list) {
468 if (q->info.si_signo == sig) {
477 list_del_init(&first->list);
478 copy_siginfo(info, &first->info);
479 __sigqueue_free(first);
481 sigdelset(&list->signal, sig);
484 /* Ok, it wasn't in the queue. This must be
485 a fast-pathed signal or we must have been
486 out of queue space. So zero out the info.
488 sigdelset(&list->signal, sig);
489 info->si_signo = sig;
498 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
503 sig = next_signal(pending, mask);
505 if (current->notifier) {
506 if (sigismember(current->notifier_mask, sig)) {
507 if (!(current->notifier)(current->notifier_data)) {
508 clear_thread_flag(TIF_SIGPENDING);
514 if (!collect_signal(sig, pending, info))
524 * Dequeue a signal and return the element to the caller, which is
525 * expected to free it.
527 * All callers have to hold the siglock.
529 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
531 int signr = __dequeue_signal(&tsk->pending, mask, info);
533 signr = __dequeue_signal(&tsk->signal->shared_pending,
536 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
537 info->si_sys_private){
538 do_schedule_next_timer(info);
544 * Tell a process that it has a new active signal..
546 * NOTE! we rely on the previous spin_lock to
547 * lock interrupts for us! We can only be called with
548 * "siglock" held, and the local interrupt must
549 * have been disabled when that got acquired!
551 * No need to set need_resched since signal event passing
552 * goes through ->blocked
554 void signal_wake_up(struct task_struct *t, int resume)
558 set_tsk_thread_flag(t, TIF_SIGPENDING);
561 * If resume is set, we want to wake it up in the TASK_STOPPED case.
562 * We don't check for TASK_STOPPED because there is a race with it
563 * executing another processor and just now entering stopped state.
564 * By calling wake_up_process any time resume is set, we ensure
565 * the process will wake up and handle its stop or death signal.
567 mask = TASK_INTERRUPTIBLE;
569 mask |= TASK_STOPPED;
570 if (!wake_up_state(t, mask))
575 * Remove signals in mask from the pending set and queue.
576 * Returns 1 if any signals were found.
578 * All callers must be holding the siglock.
580 static int rm_from_queue(unsigned long mask, struct sigpending *s)
582 struct sigqueue *q, *n;
584 if (!sigtestsetmask(&s->signal, mask))
587 sigdelsetmask(&s->signal, mask);
588 list_for_each_entry_safe(q, n, &s->list, list) {
589 if (q->info.si_signo < SIGRTMIN &&
590 (mask & sigmask(q->info.si_signo))) {
591 list_del_init(&q->list);
599 * Bad permissions for sending the signal
601 static int check_kill_permission(int sig, struct siginfo *info,
602 struct task_struct *t)
605 if (sig < 0 || sig > _NSIG)
608 if ((!info || ((unsigned long)info != 1 &&
609 (unsigned long)info != 2 && SI_FROMUSER(info)))
610 && ((sig != SIGCONT) ||
611 (current->signal->session != t->signal->session))
612 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
613 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
614 && !capable(CAP_KILL))
616 return security_task_kill(t, info, sig);
620 static void do_notify_parent_cldstop(struct task_struct *tsk,
621 struct task_struct *parent);
624 * Handle magic process-wide effects of stop/continue signals.
625 * Unlike the signal actions, these happen immediately at signal-generation
626 * time regardless of blocking, ignoring, or handling. This does the
627 * actual continuing for SIGCONT, but not the actual stopping for stop
628 * signals. The process stop is done as a signal action for SIG_DFL.
630 static void handle_stop_signal(int sig, struct task_struct *p)
632 struct task_struct *t;
634 if (sig_kernel_stop(sig)) {
636 * This is a stop signal. Remove SIGCONT from all queues.
638 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
641 rm_from_queue(sigmask(SIGCONT), &t->pending);
644 } else if (sig == SIGCONT) {
646 * Remove all stop signals from all queues,
647 * and wake all threads.
649 if (unlikely(p->signal->group_stop_count > 0)) {
651 * There was a group stop in progress. We'll
652 * pretend it finished before we got here. We are
653 * obliged to report it to the parent: if the
654 * SIGSTOP happened "after" this SIGCONT, then it
655 * would have cleared this pending SIGCONT. If it
656 * happened "before" this SIGCONT, then the parent
657 * got the SIGCHLD about the stop finishing before
658 * the continue happened. We do the notification
659 * now, and it's as if the stop had finished and
660 * the SIGCHLD was pending on entry to this kill.
662 p->signal->group_stop_count = 0;
663 if (p->ptrace & PT_PTRACED)
664 do_notify_parent_cldstop(p, p->parent);
666 do_notify_parent_cldstop(
668 p->group_leader->real_parent);
670 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
674 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
677 * If there is a handler for SIGCONT, we must make
678 * sure that no thread returns to user mode before
679 * we post the signal, in case it was the only
680 * thread eligible to run the signal handler--then
681 * it must not do anything between resuming and
682 * running the handler. With the TIF_SIGPENDING
683 * flag set, the thread will pause and acquire the
684 * siglock that we hold now and until we've queued
685 * the pending signal.
687 * Wake up the stopped thread _after_ setting
690 state = TASK_STOPPED;
691 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
693 state |= TASK_INTERRUPTIBLE;
695 wake_up_state(t, state);
702 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
704 struct sigqueue * q = NULL;
708 * fast-pathed signals for kernel-internal things like SIGSTOP
711 if ((unsigned long)info == 2)
714 /* Real-time signals must be queued if sent by sigqueue, or
715 some other real-time mechanism. It is implementation
716 defined whether kill() does so. We attempt to do so, on
717 the principle of least surprise, but since kill is not
718 allowed to fail with EAGAIN when low on memory we just
719 make sure at least one signal gets delivered and don't
720 pass on the info struct. */
722 if (atomic_read(&nr_queued_signals) < max_queued_signals)
723 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
726 atomic_inc(&nr_queued_signals);
728 list_add_tail(&q->list, &signals->list);
729 switch ((unsigned long) info) {
731 q->info.si_signo = sig;
732 q->info.si_errno = 0;
733 q->info.si_code = SI_USER;
734 q->info.si_pid = current->pid;
735 q->info.si_uid = current->uid;
738 q->info.si_signo = sig;
739 q->info.si_errno = 0;
740 q->info.si_code = SI_KERNEL;
745 copy_siginfo(&q->info, info);
749 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
750 && info->si_code != SI_USER)
752 * Queue overflow, abort. We may abort if the signal was rt
753 * and sent by user using something other than kill().
756 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
758 * Set up a return to indicate that we dropped
761 ret = info->si_sys_private;
765 sigaddset(&signals->signal, sig);
769 #define LEGACY_QUEUE(sigptr, sig) \
770 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
774 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
778 if (!irqs_disabled())
781 if (!spin_is_locked(&t->sighand->siglock))
785 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
787 * Set up a return to indicate that we dropped the signal.
789 ret = info->si_sys_private;
791 /* Short-circuit ignored signals. */
792 if (sig_ignored(t, sig))
795 /* Support queueing exactly one non-rt signal, so that we
796 can get more detailed information about the cause of
798 if (LEGACY_QUEUE(&t->pending, sig))
801 ret = send_signal(sig, info, &t->pending);
802 if (!ret && !sigismember(&t->blocked, sig))
803 signal_wake_up(t, sig == SIGKILL);
809 * Force a signal that the process can't ignore: if necessary
810 * we unblock the signal and change any SIG_IGN to SIG_DFL.
814 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
816 unsigned long int flags;
819 spin_lock_irqsave(&t->sighand->siglock, flags);
820 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
821 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
822 sigdelset(&t->blocked, sig);
823 recalc_sigpending_tsk(t);
825 ret = specific_send_sig_info(sig, info, t);
826 spin_unlock_irqrestore(&t->sighand->siglock, flags);
832 force_sig_specific(int sig, struct task_struct *t)
834 unsigned long int flags;
836 spin_lock_irqsave(&t->sighand->siglock, flags);
837 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
838 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
839 sigdelset(&t->blocked, sig);
840 recalc_sigpending_tsk(t);
841 specific_send_sig_info(sig, (void *)2, t);
842 spin_unlock_irqrestore(&t->sighand->siglock, flags);
846 * Test if P wants to take SIG. After we've checked all threads with this,
847 * it's equivalent to finding no threads not blocking SIG. Any threads not
848 * blocking SIG were ruled out because they are not running and already
849 * have pending signals. Such threads will dequeue from the shared queue
850 * as soon as they're available, so putting the signal on the shared queue
851 * will be equivalent to sending it to one such thread.
853 #define wants_signal(sig, p, mask) \
854 (!sigismember(&(p)->blocked, sig) \
855 && !((p)->state & mask) \
856 && !((p)->flags & PF_EXITING) \
857 && (task_curr(p) || !signal_pending(p)))
861 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
863 struct task_struct *t;
866 * Now find a thread we can wake up to take the signal off the queue.
868 * If the main thread wants the signal, it gets first crack.
869 * Probably the least surprising to the average bear.
871 if (wants_signal(sig, p, mask))
873 else if (thread_group_empty(p))
875 * There is just one thread and it does not need to be woken.
876 * It will dequeue unblocked signals before it runs again.
881 * Otherwise try to find a suitable thread.
883 t = p->signal->curr_target;
885 /* restart balancing at this thread */
886 t = p->signal->curr_target = p;
887 BUG_ON(t->tgid != p->tgid);
889 while (!wants_signal(sig, t, mask)) {
891 if (t == p->signal->curr_target)
893 * No thread needs to be woken.
894 * Any eligible threads will see
895 * the signal in the queue soon.
899 p->signal->curr_target = t;
903 * Found a killable thread. If the signal will be fatal,
904 * then start taking the whole group down immediately.
906 if (sig_fatal(p, sig) && !p->signal->group_exit &&
907 !sigismember(&t->real_blocked, sig) &&
908 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
910 * This signal will be fatal to the whole group.
912 if (!sig_kernel_coredump(sig)) {
914 * Start a group exit and wake everybody up.
915 * This way we don't have other threads
916 * running and doing things after a slower
917 * thread has the fatal signal pending.
919 p->signal->group_exit = 1;
920 p->signal->group_exit_code = sig;
921 p->signal->group_stop_count = 0;
924 sigaddset(&t->pending.signal, SIGKILL);
925 signal_wake_up(t, 1);
932 * There will be a core dump. We make all threads other
933 * than the chosen one go into a group stop so that nothing
934 * happens until it gets scheduled, takes the signal off
935 * the shared queue, and does the core dump. This is a
936 * little more complicated than strictly necessary, but it
937 * keeps the signal state that winds up in the core dump
938 * unchanged from the death state, e.g. which thread had
939 * the core-dump signal unblocked.
941 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
942 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
943 p->signal->group_stop_count = 0;
944 p->signal->group_exit_task = t;
947 p->signal->group_stop_count++;
948 signal_wake_up(t, 0);
951 wake_up_process(p->signal->group_exit_task);
956 * The signal is already in the shared-pending queue.
957 * Tell the chosen thread to wake up and dequeue it.
959 signal_wake_up(t, sig == SIGKILL);
964 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
970 if (!spin_is_locked(&p->sighand->siglock))
973 handle_stop_signal(sig, p);
975 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
977 * Set up a return to indicate that we dropped the signal.
979 ret = info->si_sys_private;
981 /* Short-circuit ignored signals. */
982 if (sig_ignored(p, sig))
985 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
986 /* This is a non-RT signal and we already have one queued. */
990 * Don't bother zombies and stopped tasks (but
991 * SIGKILL will punch through stopped state)
993 mask = TASK_DEAD | TASK_ZOMBIE;
995 mask |= TASK_STOPPED;
998 * Put this signal on the shared-pending queue, or fail with EAGAIN.
999 * We always use the shared queue for process-wide signals,
1000 * to avoid several races.
1002 ret = send_signal(sig, info, &p->signal->shared_pending);
1006 __group_complete_signal(sig, p, mask);
1011 * Nuke all other threads in the group.
1013 void zap_other_threads(struct task_struct *p)
1015 struct task_struct *t;
1017 p->signal->group_stop_count = 0;
1019 if (thread_group_empty(p))
1022 for (t = next_thread(p); t != p; t = next_thread(t)) {
1024 * Don't bother with already dead threads
1026 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1030 * We don't want to notify the parent, since we are
1031 * killed as part of a thread group due to another
1032 * thread doing an execve() or similar. So set the
1033 * exit signal to -1 to allow immediate reaping of
1034 * the process. But don't detach the thread group
1037 if (t != p->group_leader)
1038 t->exit_signal = -1;
1040 sigaddset(&t->pending.signal, SIGKILL);
1041 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1042 signal_wake_up(t, 1);
1047 * Must be called with the tasklist_lock held for reading!
1049 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1051 unsigned long flags;
1054 if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
1057 ret = check_kill_permission(sig, info, p);
1058 if (!ret && sig && p->sighand) {
1059 spin_lock_irqsave(&p->sighand->siglock, flags);
1060 ret = __group_send_sig_info(sig, info, p);
1061 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1068 * kill_pg_info() sends a signal to a process group: this is what the tty
1069 * control characters do (^C, ^Z etc)
1072 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1074 struct task_struct *p;
1075 struct list_head *l;
1085 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1089 err = group_send_sig_info(sig, info, p);
1093 return found ? retval : -ESRCH;
1097 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1101 read_lock(&tasklist_lock);
1102 retval = __kill_pg_info(sig, info, pgrp);
1103 read_unlock(&tasklist_lock);
1109 * kill_sl_info() sends a signal to the session leader: this is used
1110 * to send SIGHUP to the controlling process of a terminal when
1111 * the connection is lost.
1116 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1118 int err, retval = -EINVAL;
1120 struct list_head *l;
1121 struct task_struct *p;
1127 read_lock(&tasklist_lock);
1128 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1129 if (!p->signal->leader)
1131 err = group_send_sig_info(sig, info, p);
1135 read_unlock(&tasklist_lock);
1141 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1144 struct task_struct *p;
1146 read_lock(&tasklist_lock);
1147 p = find_task_by_pid(pid);
1150 error = group_send_sig_info(sig, info, p);
1151 read_unlock(&tasklist_lock);
1157 * kill_something_info() interprets pid in interesting ways just like kill(2).
1159 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1160 * is probably wrong. Should make it like BSD or SYSV.
1163 static int kill_something_info(int sig, struct siginfo *info, int pid)
1166 return kill_pg_info(sig, info, process_group(current));
1167 } else if (pid == -1) {
1168 int retval = 0, count = 0;
1169 struct task_struct * p;
1171 read_lock(&tasklist_lock);
1172 for_each_process(p) {
1173 if (p->pid > 1 && p->tgid != current->tgid) {
1174 int err = group_send_sig_info(sig, info, p);
1180 read_unlock(&tasklist_lock);
1181 return count ? retval : -ESRCH;
1182 } else if (pid < 0) {
1183 return kill_pg_info(sig, info, -pid);
1185 return kill_proc_info(sig, info, pid);
1190 * These are for backward compatibility with the rest of the kernel source.
1194 * These two are the most common entry points. They send a signal
1195 * just to the specific thread.
1198 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1201 unsigned long flags;
1204 * We need the tasklist lock even for the specific
1205 * thread case (when we don't need to follow the group
1206 * lists) in order to avoid races with "p->sighand"
1207 * going away or changing from under us.
1209 read_lock(&tasklist_lock);
1210 spin_lock_irqsave(&p->sighand->siglock, flags);
1211 ret = specific_send_sig_info(sig, info, p);
1212 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1213 read_unlock(&tasklist_lock);
1218 send_sig(int sig, struct task_struct *p, int priv)
1220 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1224 * This is the entry point for "process-wide" signals.
1225 * They will go to an appropriate thread in the thread group.
1228 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1231 read_lock(&tasklist_lock);
1232 ret = group_send_sig_info(sig, info, p);
1233 read_unlock(&tasklist_lock);
1238 force_sig(int sig, struct task_struct *p)
1240 force_sig_info(sig, (void*)1L, p);
1244 kill_pg(pid_t pgrp, int sig, int priv)
1246 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1250 kill_sl(pid_t sess, int sig, int priv)
1252 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1256 kill_proc(pid_t pid, int sig, int priv)
1258 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1262 * These functions support sending signals using preallocated sigqueue
1263 * structures. This is needed "because realtime applications cannot
1264 * afford to lose notifications of asynchronous events, like timer
1265 * expirations or I/O completions". In the case of Posix Timers
1266 * we allocate the sigqueue structure from the timer_create. If this
1267 * allocation fails we are able to report the failure to the application
1268 * with an EAGAIN error.
1271 struct sigqueue *sigqueue_alloc(void)
1275 if ((q = __sigqueue_alloc()))
1276 q->flags |= SIGQUEUE_PREALLOC;
1280 void sigqueue_free(struct sigqueue *q)
1282 unsigned long flags;
1283 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1285 * If the signal is still pending remove it from the
1288 if (unlikely(!list_empty(&q->list))) {
1289 read_lock(&tasklist_lock);
1290 spin_lock_irqsave(q->lock, flags);
1291 if (!list_empty(&q->list))
1292 list_del_init(&q->list);
1293 spin_unlock_irqrestore(q->lock, flags);
1294 read_unlock(&tasklist_lock);
1296 q->flags &= ~SIGQUEUE_PREALLOC;
1301 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1303 unsigned long flags;
1307 * We need the tasklist lock even for the specific
1308 * thread case (when we don't need to follow the group
1309 * lists) in order to avoid races with "p->sighand"
1310 * going away or changing from under us.
1312 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1313 read_lock(&tasklist_lock);
1314 spin_lock_irqsave(&p->sighand->siglock, flags);
1316 if (unlikely(!list_empty(&q->list))) {
1318 * If an SI_TIMER entry is already queue just increment
1319 * the overrun count.
1321 if (q->info.si_code != SI_TIMER)
1323 q->info.si_overrun++;
1326 /* Short-circuit ignored signals. */
1327 if (sig_ignored(p, sig)) {
1332 q->lock = &p->sighand->siglock;
1333 list_add_tail(&q->list, &p->pending.list);
1334 sigaddset(&p->pending.signal, sig);
1335 if (!sigismember(&p->blocked, sig))
1336 signal_wake_up(p, sig == SIGKILL);
1339 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1340 read_unlock(&tasklist_lock);
1345 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1347 unsigned long flags;
1351 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1352 read_lock(&tasklist_lock);
1353 spin_lock_irqsave(&p->sighand->siglock, flags);
1354 handle_stop_signal(sig, p);
1356 /* Short-circuit ignored signals. */
1357 if (sig_ignored(p, sig)) {
1362 if (unlikely(!list_empty(&q->list))) {
1364 * If an SI_TIMER entry is already queue just increment
1365 * the overrun count. Other uses should not try to
1366 * send the signal multiple times.
1368 if (q->info.si_code != SI_TIMER)
1370 q->info.si_overrun++;
1374 * Don't bother zombies and stopped tasks (but
1375 * SIGKILL will punch through stopped state)
1377 mask = TASK_DEAD | TASK_ZOMBIE;
1379 mask |= TASK_STOPPED;
1382 * Put this signal on the shared-pending queue.
1383 * We always use the shared queue for process-wide signals,
1384 * to avoid several races.
1386 q->lock = &p->sighand->siglock;
1387 list_add_tail(&q->list, &p->signal->shared_pending.list);
1388 sigaddset(&p->signal->shared_pending.signal, sig);
1390 __group_complete_signal(sig, p, mask);
1392 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1393 read_unlock(&tasklist_lock);
1398 * Joy. Or not. Pthread wants us to wake up every thread
1399 * in our parent group.
1401 static void __wake_up_parent(struct task_struct *p,
1402 struct task_struct *parent)
1404 struct task_struct *tsk = parent;
1407 * Fortunately this is not necessary for thread groups:
1409 if (p->tgid == tsk->tgid) {
1410 wake_up_interruptible_sync(&tsk->wait_chldexit);
1415 wake_up_interruptible_sync(&tsk->wait_chldexit);
1416 tsk = next_thread(tsk);
1417 if (tsk->signal != parent->signal)
1419 } while (tsk != parent);
1423 * Let a parent know about a status change of a child.
1426 void do_notify_parent(struct task_struct *tsk, int sig)
1428 struct siginfo info;
1429 unsigned long flags;
1431 struct sighand_struct *psig;
1436 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1437 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1439 info.si_signo = sig;
1441 info.si_pid = tsk->pid;
1442 info.si_uid = tsk->uid;
1444 /* FIXME: find out whether or not this is supposed to be c*time. */
1445 info.si_utime = tsk->utime;
1446 info.si_stime = tsk->stime;
1448 status = tsk->exit_code & 0x7f;
1449 why = SI_KERNEL; /* shouldn't happen */
1450 switch (tsk->state) {
1452 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1453 if (tsk->ptrace & PT_PTRACED)
1460 if (tsk->exit_code & 0x80)
1462 else if (tsk->exit_code & 0x7f)
1466 status = tsk->exit_code >> 8;
1471 info.si_status = status;
1473 psig = tsk->parent->sighand;
1474 spin_lock_irqsave(&psig->siglock, flags);
1475 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1476 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1477 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1479 * We are exiting and our parent doesn't care. POSIX.1
1480 * defines special semantics for setting SIGCHLD to SIG_IGN
1481 * or setting the SA_NOCLDWAIT flag: we should be reaped
1482 * automatically and not left for our parent's wait4 call.
1483 * Rather than having the parent do it as a magic kind of
1484 * signal handler, we just set this to tell do_exit that we
1485 * can be cleaned up without becoming a zombie. Note that
1486 * we still call __wake_up_parent in this case, because a
1487 * blocked sys_wait4 might now return -ECHILD.
1489 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1490 * is implementation-defined: we do (if you don't want
1491 * it, just use SIG_IGN instead).
1493 tsk->exit_signal = -1;
1494 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1497 if (sig > 0 && sig <= _NSIG)
1498 __group_send_sig_info(sig, &info, tsk->parent);
1499 __wake_up_parent(tsk, tsk->parent);
1500 spin_unlock_irqrestore(&psig->siglock, flags);
1505 * We need the tasklist lock because it's the only
1506 * thing that protects out "parent" pointer.
1508 * exit.c calls "do_notify_parent()" directly, because
1509 * it already has the tasklist lock.
1512 notify_parent(struct task_struct *tsk, int sig)
1515 read_lock(&tasklist_lock);
1516 do_notify_parent(tsk, sig);
1517 read_unlock(&tasklist_lock);
1522 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1524 struct siginfo info;
1525 unsigned long flags;
1526 struct sighand_struct *sighand;
1528 info.si_signo = SIGCHLD;
1530 info.si_pid = tsk->pid;
1531 info.si_uid = tsk->uid;
1533 /* FIXME: find out whether or not this is supposed to be c*time. */
1534 info.si_utime = tsk->utime;
1535 info.si_stime = tsk->stime;
1537 info.si_status = tsk->exit_code & 0x7f;
1538 info.si_code = CLD_STOPPED;
1540 sighand = parent->sighand;
1541 spin_lock_irqsave(&sighand->siglock, flags);
1542 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1543 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1544 __group_send_sig_info(SIGCHLD, &info, parent);
1546 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1548 __wake_up_parent(tsk, parent);
1549 spin_unlock_irqrestore(&sighand->siglock, flags);
1553 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1556 finish_stop(int stop_count)
1559 * If there are no other threads in the group, or if there is
1560 * a group stop in progress and we are the last to stop,
1561 * report to the parent. When ptraced, every thread reports itself.
1563 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1564 read_lock(&tasklist_lock);
1565 do_notify_parent_cldstop(current, current->parent);
1566 read_unlock(&tasklist_lock);
1568 else if (stop_count == 0) {
1569 read_lock(&tasklist_lock);
1570 do_notify_parent_cldstop(current->group_leader,
1571 current->group_leader->real_parent);
1572 read_unlock(&tasklist_lock);
1577 * Now we don't run again until continued.
1579 current->exit_code = 0;
1583 * This performs the stopping for SIGSTOP and other stop signals.
1584 * We have to stop all threads in the thread group.
1587 do_signal_stop(int signr)
1589 struct signal_struct *sig = current->signal;
1590 struct sighand_struct *sighand = current->sighand;
1591 int stop_count = -1;
1593 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1595 if (sig->group_stop_count > 0) {
1597 * There is a group stop in progress. We don't need to
1598 * start another one.
1600 signr = sig->group_exit_code;
1601 stop_count = --sig->group_stop_count;
1602 current->exit_code = signr;
1603 set_current_state(TASK_STOPPED);
1604 spin_unlock_irq(&sighand->siglock);
1606 else if (thread_group_empty(current)) {
1608 * Lock must be held through transition to stopped state.
1610 current->exit_code = signr;
1611 set_current_state(TASK_STOPPED);
1612 spin_unlock_irq(&sighand->siglock);
1616 * There is no group stop already in progress.
1617 * We must initiate one now, but that requires
1618 * dropping siglock to get both the tasklist lock
1619 * and siglock again in the proper order. Note that
1620 * this allows an intervening SIGCONT to be posted.
1621 * We need to check for that and bail out if necessary.
1623 struct task_struct *t;
1625 spin_unlock_irq(&sighand->siglock);
1627 /* signals can be posted during this window */
1629 read_lock(&tasklist_lock);
1630 spin_lock_irq(&sighand->siglock);
1632 if (unlikely(sig->group_exit)) {
1634 * There is a group exit in progress now.
1635 * We'll just ignore the stop and process the
1636 * associated fatal signal.
1638 spin_unlock_irq(&sighand->siglock);
1639 read_unlock(&tasklist_lock);
1643 if (unlikely(sig_avoid_stop_race())) {
1645 * Either a SIGCONT or a SIGKILL signal was
1646 * posted in the siglock-not-held window.
1648 spin_unlock_irq(&sighand->siglock);
1649 read_unlock(&tasklist_lock);
1653 if (sig->group_stop_count == 0) {
1654 sig->group_exit_code = signr;
1656 for (t = next_thread(current); t != current;
1659 * Setting state to TASK_STOPPED for a group
1660 * stop is always done with the siglock held,
1661 * so this check has no races.
1663 if (t->state < TASK_STOPPED) {
1665 signal_wake_up(t, 0);
1667 sig->group_stop_count = stop_count;
1670 /* A race with another thread while unlocked. */
1671 signr = sig->group_exit_code;
1672 stop_count = --sig->group_stop_count;
1675 current->exit_code = signr;
1676 set_current_state(TASK_STOPPED);
1678 spin_unlock_irq(&sighand->siglock);
1679 read_unlock(&tasklist_lock);
1682 finish_stop(stop_count);
1686 * Do appropriate magic when group_stop_count > 0.
1687 * We return nonzero if we stopped, after releasing the siglock.
1688 * We return zero if we still hold the siglock and should look
1689 * for another signal without checking group_stop_count again.
1691 static inline int handle_group_stop(void)
1695 if (current->signal->group_exit_task == current) {
1697 * Group stop is so we can do a core dump,
1698 * We are the initiating thread, so get on with it.
1700 current->signal->group_exit_task = NULL;
1704 if (current->signal->group_exit)
1706 * Group stop is so another thread can do a core dump,
1707 * or else we are racing against a death signal.
1708 * Just punt the stop so we can get the next signal.
1713 * There is a group stop in progress. We stop
1714 * without any associated signal being in our queue.
1716 stop_count = --current->signal->group_stop_count;
1717 current->exit_code = current->signal->group_exit_code;
1718 set_current_state(TASK_STOPPED);
1719 spin_unlock_irq(¤t->sighand->siglock);
1720 finish_stop(stop_count);
1724 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1726 sigset_t *mask = ¤t->blocked;
1730 spin_lock_irq(¤t->sighand->siglock);
1732 struct k_sigaction *ka;
1734 if (unlikely(current->signal->group_stop_count > 0) &&
1735 handle_group_stop())
1738 signr = dequeue_signal(current, mask, info);
1741 break; /* will return 0 */
1743 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1744 ptrace_signal_deliver(regs, cookie);
1747 * If there is a group stop in progress,
1748 * we must participate in the bookkeeping.
1750 if (current->signal->group_stop_count > 0)
1751 --current->signal->group_stop_count;
1753 /* Let the debugger run. */
1754 current->exit_code = signr;
1755 current->last_siginfo = info;
1756 set_current_state(TASK_STOPPED);
1757 spin_unlock_irq(¤t->sighand->siglock);
1758 notify_parent(current, SIGCHLD);
1761 current->last_siginfo = NULL;
1763 /* We're back. Did the debugger cancel the sig? */
1764 spin_lock_irq(¤t->sighand->siglock);
1765 signr = current->exit_code;
1769 current->exit_code = 0;
1771 /* Update the siginfo structure if the signal has
1772 changed. If the debugger wanted something
1773 specific in the siginfo structure then it should
1774 have updated *info via PTRACE_SETSIGINFO. */
1775 if (signr != info->si_signo) {
1776 info->si_signo = signr;
1778 info->si_code = SI_USER;
1779 info->si_pid = current->parent->pid;
1780 info->si_uid = current->parent->uid;
1783 /* If the (new) signal is now blocked, requeue it. */
1784 if (sigismember(¤t->blocked, signr)) {
1785 specific_send_sig_info(signr, info, current);
1790 ka = ¤t->sighand->action[signr-1];
1791 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1793 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1794 break; /* will return non-zero "signr" value */
1797 * Now we are doing the default action for this signal.
1799 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1802 /* Init gets no signals it doesn't want. */
1803 if (current->pid == 1)
1806 if (sig_kernel_stop(signr)) {
1808 * The default action is to stop all threads in
1809 * the thread group. The job control signals
1810 * do nothing in an orphaned pgrp, but SIGSTOP
1811 * always works. Note that siglock needs to be
1812 * dropped during the call to is_orphaned_pgrp()
1813 * because of lock ordering with tasklist_lock.
1814 * This allows an intervening SIGCONT to be posted.
1815 * We need to check for that and bail out if necessary.
1817 if (signr == SIGSTOP) {
1818 do_signal_stop(signr); /* releases siglock */
1821 spin_unlock_irq(¤t->sighand->siglock);
1823 /* signals can be posted during this window */
1825 if (is_orphaned_pgrp(process_group(current)))
1828 spin_lock_irq(¤t->sighand->siglock);
1829 if (unlikely(sig_avoid_stop_race())) {
1831 * Either a SIGCONT or a SIGKILL signal was
1832 * posted in the siglock-not-held window.
1837 do_signal_stop(signr); /* releases siglock */
1841 spin_unlock_irq(¤t->sighand->siglock);
1844 * Anything else is fatal, maybe with a core dump.
1846 current->flags |= PF_SIGNALED;
1847 if (sig_kernel_coredump(signr) &&
1848 do_coredump((long)signr, signr, regs)) {
1850 * That killed all other threads in the group and
1851 * synchronized with their demise, so there can't
1852 * be any more left to kill now. The group_exit
1853 * flags are set by do_coredump. Note that
1854 * thread_group_empty won't always be true yet,
1855 * because those threads were blocked in __exit_mm
1856 * and we just let them go to finish dying.
1858 const int code = signr | 0x80;
1859 BUG_ON(!current->signal->group_exit);
1860 BUG_ON(current->signal->group_exit_code != code);
1866 * Death signals, no core dump.
1868 do_group_exit(signr);
1871 spin_unlock_irq(¤t->sighand->siglock);
1877 EXPORT_SYMBOL(recalc_sigpending);
1878 EXPORT_SYMBOL_GPL(dequeue_signal);
1879 EXPORT_SYMBOL(flush_signals);
1880 EXPORT_SYMBOL(force_sig);
1881 EXPORT_SYMBOL(force_sig_info);
1882 EXPORT_SYMBOL(kill_pg);
1883 EXPORT_SYMBOL(kill_pg_info);
1884 EXPORT_SYMBOL(kill_proc);
1885 EXPORT_SYMBOL(kill_proc_info);
1886 EXPORT_SYMBOL(kill_sl);
1887 EXPORT_SYMBOL(kill_sl_info);
1888 EXPORT_SYMBOL(notify_parent);
1889 EXPORT_SYMBOL(send_sig);
1890 EXPORT_SYMBOL(send_sig_info);
1891 EXPORT_SYMBOL(send_group_sig_info);
1892 EXPORT_SYMBOL(sigqueue_alloc);
1893 EXPORT_SYMBOL(sigqueue_free);
1894 EXPORT_SYMBOL(send_sigqueue);
1895 EXPORT_SYMBOL(send_group_sigqueue);
1896 EXPORT_SYMBOL(sigprocmask);
1897 EXPORT_SYMBOL(block_all_signals);
1898 EXPORT_SYMBOL(unblock_all_signals);
1902 * System call entry points.
1905 asmlinkage long sys_restart_syscall(void)
1907 struct restart_block *restart = ¤t_thread_info()->restart_block;
1908 return restart->fn(restart);
1911 long do_no_restart_syscall(struct restart_block *param)
1917 * We don't need to get the kernel lock - this is all local to this
1918 * particular thread.. (and that's good, because this is _heavily_
1919 * used by various programs)
1923 * This is also useful for kernel threads that want to temporarily
1924 * (or permanently) block certain signals.
1926 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1927 * interface happily blocks "unblockable" signals like SIGKILL
1930 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1935 spin_lock_irq(¤t->sighand->siglock);
1936 old_block = current->blocked;
1940 sigorsets(¤t->blocked, ¤t->blocked, set);
1943 signandsets(¤t->blocked, ¤t->blocked, set);
1946 current->blocked = *set;
1951 recalc_sigpending();
1952 spin_unlock_irq(¤t->sighand->siglock);
1954 *oldset = old_block;
1959 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1961 int error = -EINVAL;
1962 sigset_t old_set, new_set;
1964 /* XXX: Don't preclude handling different sized sigset_t's. */
1965 if (sigsetsize != sizeof(sigset_t))
1970 if (copy_from_user(&new_set, set, sizeof(*set)))
1972 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1974 error = sigprocmask(how, &new_set, &old_set);
1980 spin_lock_irq(¤t->sighand->siglock);
1981 old_set = current->blocked;
1982 spin_unlock_irq(¤t->sighand->siglock);
1986 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1994 long do_sigpending(void __user *set, unsigned long sigsetsize)
1996 long error = -EINVAL;
1999 if (sigsetsize > sizeof(sigset_t))
2002 spin_lock_irq(¤t->sighand->siglock);
2003 sigorsets(&pending, ¤t->pending.signal,
2004 ¤t->signal->shared_pending.signal);
2005 spin_unlock_irq(¤t->sighand->siglock);
2007 /* Outside the lock because only this thread touches it. */
2008 sigandsets(&pending, ¤t->blocked, &pending);
2011 if (!copy_to_user(set, &pending, sigsetsize))
2019 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2021 return do_sigpending(set, sigsetsize);
2024 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2026 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2030 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2032 if (from->si_code < 0)
2033 return __copy_to_user(to, from, sizeof(siginfo_t))
2036 * If you change siginfo_t structure, please be sure
2037 * this code is fixed accordingly.
2038 * It should never copy any pad contained in the structure
2039 * to avoid security leaks, but must copy the generic
2040 * 3 ints plus the relevant union member.
2042 err = __put_user(from->si_signo, &to->si_signo);
2043 err |= __put_user(from->si_errno, &to->si_errno);
2044 err |= __put_user((short)from->si_code, &to->si_code);
2045 switch (from->si_code & __SI_MASK) {
2047 err |= __put_user(from->si_pid, &to->si_pid);
2048 err |= __put_user(from->si_uid, &to->si_uid);
2051 err |= __put_user(from->si_tid, &to->si_tid);
2052 err |= __put_user(from->si_overrun, &to->si_overrun);
2053 err |= __put_user(from->si_ptr, &to->si_ptr);
2056 err |= __put_user(from->si_band, &to->si_band);
2057 err |= __put_user(from->si_fd, &to->si_fd);
2060 err |= __put_user(from->si_addr, &to->si_addr);
2061 #ifdef __ARCH_SI_TRAPNO
2062 err |= __put_user(from->si_trapno, &to->si_trapno);
2066 err |= __put_user(from->si_pid, &to->si_pid);
2067 err |= __put_user(from->si_uid, &to->si_uid);
2068 err |= __put_user(from->si_status, &to->si_status);
2069 err |= __put_user(from->si_utime, &to->si_utime);
2070 err |= __put_user(from->si_stime, &to->si_stime);
2072 case __SI_RT: /* This is not generated by the kernel as of now. */
2073 case __SI_MESGQ: /* But this is */
2074 err |= __put_user(from->si_pid, &to->si_pid);
2075 err |= __put_user(from->si_uid, &to->si_uid);
2076 err |= __put_user(from->si_ptr, &to->si_ptr);
2078 default: /* this is just in case for now ... */
2079 err |= __put_user(from->si_pid, &to->si_pid);
2080 err |= __put_user(from->si_uid, &to->si_uid);
2089 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2090 siginfo_t __user *uinfo,
2091 const struct timespec __user *uts,
2100 /* XXX: Don't preclude handling different sized sigset_t's. */
2101 if (sigsetsize != sizeof(sigset_t))
2104 if (copy_from_user(&these, uthese, sizeof(these)))
2108 * Invert the set of allowed signals to get those we
2111 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2115 if (copy_from_user(&ts, uts, sizeof(ts)))
2117 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2122 spin_lock_irq(¤t->sighand->siglock);
2123 sig = dequeue_signal(current, &these, &info);
2125 timeout = MAX_SCHEDULE_TIMEOUT;
2127 timeout = (timespec_to_jiffies(&ts)
2128 + (ts.tv_sec || ts.tv_nsec));
2131 /* None ready -- temporarily unblock those we're
2132 * interested while we are sleeping in so that we'll
2133 * be awakened when they arrive. */
2134 current->real_blocked = current->blocked;
2135 sigandsets(¤t->blocked, ¤t->blocked, &these);
2136 recalc_sigpending();
2137 spin_unlock_irq(¤t->sighand->siglock);
2139 current->state = TASK_INTERRUPTIBLE;
2140 timeout = schedule_timeout(timeout);
2142 spin_lock_irq(¤t->sighand->siglock);
2143 sig = dequeue_signal(current, &these, &info);
2144 current->blocked = current->real_blocked;
2145 siginitset(¤t->real_blocked, 0);
2146 recalc_sigpending();
2149 spin_unlock_irq(¤t->sighand->siglock);
2154 if (copy_siginfo_to_user(uinfo, &info))
2167 sys_kill(int pid, int sig)
2169 struct siginfo info;
2171 info.si_signo = sig;
2173 info.si_code = SI_USER;
2174 info.si_pid = current->tgid;
2175 info.si_uid = current->uid;
2177 return kill_something_info(sig, &info, pid);
2181 * sys_tgkill - send signal to one specific thread
2182 * @tgid: the thread group ID of the thread
2183 * @pid: the PID of the thread
2184 * @sig: signal to be sent
2186 * This syscall also checks the tgid and returns -ESRCH even if the PID
2187 * exists but it's not belonging to the target process anymore. This
2188 * method solves the problem of threads exiting and PIDs getting reused.
2190 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2192 struct siginfo info;
2194 struct task_struct *p;
2196 /* This is only valid for single tasks */
2197 if (pid <= 0 || tgid <= 0)
2200 info.si_signo = sig;
2202 info.si_code = SI_TKILL;
2203 info.si_pid = current->tgid;
2204 info.si_uid = current->uid;
2206 read_lock(&tasklist_lock);
2207 p = find_task_by_pid(pid);
2209 if (p && (p->tgid == tgid)) {
2210 error = check_kill_permission(sig, &info, p);
2212 * The null signal is a permissions and process existence
2213 * probe. No signal is actually delivered.
2215 if (!error && sig && p->sighand) {
2216 spin_lock_irq(&p->sighand->siglock);
2217 handle_stop_signal(sig, p);
2218 error = specific_send_sig_info(sig, &info, p);
2219 spin_unlock_irq(&p->sighand->siglock);
2222 read_unlock(&tasklist_lock);
2227 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2230 sys_tkill(int pid, int sig)
2232 struct siginfo info;
2234 struct task_struct *p;
2236 /* This is only valid for single tasks */
2240 info.si_signo = sig;
2242 info.si_code = SI_TKILL;
2243 info.si_pid = current->tgid;
2244 info.si_uid = current->uid;
2246 read_lock(&tasklist_lock);
2247 p = find_task_by_pid(pid);
2250 error = check_kill_permission(sig, &info, p);
2252 * The null signal is a permissions and process existence
2253 * probe. No signal is actually delivered.
2255 if (!error && sig && p->sighand) {
2256 spin_lock_irq(&p->sighand->siglock);
2257 handle_stop_signal(sig, p);
2258 error = specific_send_sig_info(sig, &info, p);
2259 spin_unlock_irq(&p->sighand->siglock);
2262 read_unlock(&tasklist_lock);
2267 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2271 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2274 /* Not even root can pretend to send signals from the kernel.
2275 Nor can they impersonate a kill(), which adds source info. */
2276 if (info.si_code >= 0)
2278 info.si_signo = sig;
2280 /* POSIX.1b doesn't mention process groups. */
2281 return kill_proc_info(sig, &info, pid);
2285 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2287 struct k_sigaction *k;
2289 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2292 k = ¤t->sighand->action[sig-1];
2294 spin_lock_irq(¤t->sighand->siglock);
2295 if (signal_pending(current)) {
2297 * If there might be a fatal signal pending on multiple
2298 * threads, make sure we take it before changing the action.
2300 spin_unlock_irq(¤t->sighand->siglock);
2301 return -ERESTARTNOINTR;
2310 * "Setting a signal action to SIG_IGN for a signal that is
2311 * pending shall cause the pending signal to be discarded,
2312 * whether or not it is blocked."
2314 * "Setting a signal action to SIG_DFL for a signal that is
2315 * pending and whose default action is to ignore the signal
2316 * (for example, SIGCHLD), shall cause the pending signal to
2317 * be discarded, whether or not it is blocked"
2319 if (act->sa.sa_handler == SIG_IGN ||
2320 (act->sa.sa_handler == SIG_DFL &&
2321 sig_kernel_ignore(sig))) {
2323 * This is a fairly rare case, so we only take the
2324 * tasklist_lock once we're sure we'll need it.
2325 * Now we must do this little unlock and relock
2326 * dance to maintain the lock hierarchy.
2328 struct task_struct *t = current;
2329 spin_unlock_irq(&t->sighand->siglock);
2330 read_lock(&tasklist_lock);
2331 spin_lock_irq(&t->sighand->siglock);
2333 sigdelsetmask(&k->sa.sa_mask,
2334 sigmask(SIGKILL) | sigmask(SIGSTOP));
2335 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2337 rm_from_queue(sigmask(sig), &t->pending);
2338 recalc_sigpending_tsk(t);
2340 } while (t != current);
2341 spin_unlock_irq(¤t->sighand->siglock);
2342 read_unlock(&tasklist_lock);
2347 sigdelsetmask(&k->sa.sa_mask,
2348 sigmask(SIGKILL) | sigmask(SIGSTOP));
2351 spin_unlock_irq(¤t->sighand->siglock);
2356 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2362 oss.ss_sp = (void *) current->sas_ss_sp;
2363 oss.ss_size = current->sas_ss_size;
2364 oss.ss_flags = sas_ss_flags(sp);
2373 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2374 || __get_user(ss_sp, &uss->ss_sp)
2375 || __get_user(ss_flags, &uss->ss_flags)
2376 || __get_user(ss_size, &uss->ss_size))
2380 if (on_sig_stack(sp))
2386 * Note - this code used to test ss_flags incorrectly
2387 * old code may have been written using ss_flags==0
2388 * to mean ss_flags==SS_ONSTACK (as this was the only
2389 * way that worked) - this fix preserves that older
2392 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2395 if (ss_flags == SS_DISABLE) {
2400 if (ss_size < MINSIGSTKSZ)
2404 current->sas_ss_sp = (unsigned long) ss_sp;
2405 current->sas_ss_size = ss_size;
2410 if (copy_to_user(uoss, &oss, sizeof(oss)))
2419 #ifdef __ARCH_WANT_SYS_SIGPENDING
2422 sys_sigpending(old_sigset_t __user *set)
2424 return do_sigpending(set, sizeof(*set));
2429 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2430 /* Some platforms have their own version with special arguments others
2431 support only sys_rt_sigprocmask. */
2434 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2437 old_sigset_t old_set, new_set;
2441 if (copy_from_user(&new_set, set, sizeof(*set)))
2443 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2445 spin_lock_irq(¤t->sighand->siglock);
2446 old_set = current->blocked.sig[0];
2454 sigaddsetmask(¤t->blocked, new_set);
2457 sigdelsetmask(¤t->blocked, new_set);
2460 current->blocked.sig[0] = new_set;
2464 recalc_sigpending();
2465 spin_unlock_irq(¤t->sighand->siglock);
2471 old_set = current->blocked.sig[0];
2474 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2481 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2483 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2485 sys_rt_sigaction(int sig,
2486 const struct sigaction __user *act,
2487 struct sigaction __user *oact,
2490 struct k_sigaction new_sa, old_sa;
2493 /* XXX: Don't preclude handling different sized sigset_t's. */
2494 if (sigsetsize != sizeof(sigset_t))
2498 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2502 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2505 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2511 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2513 #ifdef __ARCH_WANT_SYS_SGETMASK
2516 * For backwards compatibility. Functionality superseded by sigprocmask.
2522 return current->blocked.sig[0];
2526 sys_ssetmask(int newmask)
2530 spin_lock_irq(¤t->sighand->siglock);
2531 old = current->blocked.sig[0];
2533 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2535 recalc_sigpending();
2536 spin_unlock_irq(¤t->sighand->siglock);
2540 #endif /* __ARCH_WANT_SGETMASK */
2542 #ifdef __ARCH_WANT_SYS_SIGNAL
2544 * For backwards compatibility. Functionality superseded by sigaction.
2546 asmlinkage unsigned long
2547 sys_signal(int sig, __sighandler_t handler)
2549 struct k_sigaction new_sa, old_sa;
2552 new_sa.sa.sa_handler = handler;
2553 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2555 ret = do_sigaction(sig, &new_sa, &old_sa);
2557 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2559 #endif /* __ARCH_WANT_SYS_SIGNAL */
2561 #ifdef __ARCH_WANT_SYS_PAUSE
2566 current->state = TASK_INTERRUPTIBLE;
2568 return -ERESTARTNOHAND;
2573 void __init signals_init(void)
2576 kmem_cache_create("sigqueue",
2577 sizeof(struct sigqueue),
2578 __alignof__(struct sigqueue),
2579 SLAB_PANIC, NULL, NULL);