2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
29 extern void k_getrusage(struct task_struct *, int, struct rusage *);
32 * SLAB caches for signal bits.
35 static kmem_cache_t *sigqueue_cachep;
38 * In POSIX a signal is sent either to a specific thread (Linux task)
39 * or to the process as a whole (Linux thread group). How the signal
40 * is sent determines whether it's to one thread or the whole group,
41 * which determines which signal mask(s) are involved in blocking it
42 * from being delivered until later. When the signal is delivered,
43 * either it's caught or ignored by a user handler or it has a default
44 * effect that applies to the whole thread group (POSIX process).
46 * The possible effects an unblocked signal set to SIG_DFL can have are:
47 * ignore - Nothing Happens
48 * terminate - kill the process, i.e. all threads in the group,
49 * similar to exit_group. The group leader (only) reports
50 * WIFSIGNALED status to its parent.
51 * coredump - write a core dump file describing all threads using
52 * the same mm and then kill all those threads
53 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
55 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
56 * Other signals when not blocked and set to SIG_DFL behaves as follows.
57 * The job control signals also have other special effects.
59 * +--------------------+------------------+
60 * | POSIX signal | default action |
61 * +--------------------+------------------+
62 * | SIGHUP | terminate |
63 * | SIGINT | terminate |
64 * | SIGQUIT | coredump |
65 * | SIGILL | coredump |
66 * | SIGTRAP | coredump |
67 * | SIGABRT/SIGIOT | coredump |
68 * | SIGBUS | coredump |
69 * | SIGFPE | coredump |
70 * | SIGKILL | terminate(+) |
71 * | SIGUSR1 | terminate |
72 * | SIGSEGV | coredump |
73 * | SIGUSR2 | terminate |
74 * | SIGPIPE | terminate |
75 * | SIGALRM | terminate |
76 * | SIGTERM | terminate |
77 * | SIGCHLD | ignore |
78 * | SIGCONT | ignore(*) |
79 * | SIGSTOP | stop(*)(+) |
80 * | SIGTSTP | stop(*) |
81 * | SIGTTIN | stop(*) |
82 * | SIGTTOU | stop(*) |
84 * | SIGXCPU | coredump |
85 * | SIGXFSZ | coredump |
86 * | SIGVTALRM | terminate |
87 * | SIGPROF | terminate |
88 * | SIGPOLL/SIGIO | terminate |
89 * | SIGSYS/SIGUNUSED | coredump |
90 * | SIGSTKFLT | terminate |
91 * | SIGWINCH | ignore |
92 * | SIGPWR | terminate |
93 * | SIGRTMIN-SIGRTMAX | terminate |
94 * +--------------------+------------------+
95 * | non-POSIX signal | default action |
96 * +--------------------+------------------+
97 * | SIGEMT | coredump |
98 * +--------------------+------------------+
100 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
101 * (*) Special job control effects:
102 * When SIGCONT is sent, it resumes the process (all threads in the group)
103 * from TASK_STOPPED state and also clears any pending/queued stop signals
104 * (any of those marked with "stop(*)"). This happens regardless of blocking,
105 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
106 * any pending/queued SIGCONT signals; this happens regardless of blocking,
107 * catching, or ignored the stop signal, though (except for SIGSTOP) the
108 * default action of stopping the process may happen later or never.
112 #define M_SIGEMT M(SIGEMT)
117 #if SIGRTMIN > BITS_PER_LONG
118 #define M(sig) (1ULL << ((sig)-1))
120 #define M(sig) (1UL << ((sig)-1))
122 #define T(sig, mask) (M(sig) & (mask))
124 #define SIG_KERNEL_ONLY_MASK (\
125 M(SIGKILL) | M(SIGSTOP) )
127 #define SIG_KERNEL_STOP_MASK (\
128 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
130 #define SIG_KERNEL_COREDUMP_MASK (\
131 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
132 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
133 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
135 #define SIG_KERNEL_IGNORE_MASK (\
136 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
138 #define sig_kernel_only(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
140 #define sig_kernel_coredump(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
142 #define sig_kernel_ignore(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
144 #define sig_kernel_stop(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
147 #define sig_user_defined(t, signr) \
148 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
149 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
151 #define sig_fatal(t, signr) \
152 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
153 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
155 #define sig_avoid_stop_race() \
156 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
157 sigtestsetmask(¤t->signal->shared_pending.signal, \
158 M(SIGCONT) | M(SIGKILL)))
160 static int sig_ignored(struct task_struct *t, int sig)
162 void __user * handler;
165 * Tracers always want to know about signals..
167 if (t->ptrace & PT_PTRACED)
171 * Blocked signals are never ignored, since the
172 * signal handler may change by the time it is
175 if (sigismember(&t->blocked, sig))
178 /* Is it explicitly or implicitly ignored? */
179 handler = t->sighand->action[sig-1].sa.sa_handler;
180 return handler == SIG_IGN ||
181 (handler == SIG_DFL && sig_kernel_ignore(sig));
185 * Re-calculate pending state from the set of locally pending
186 * signals, globally pending signals, and blocked signals.
188 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
193 switch (_NSIG_WORDS) {
195 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
196 ready |= signal->sig[i] &~ blocked->sig[i];
199 case 4: ready = signal->sig[3] &~ blocked->sig[3];
200 ready |= signal->sig[2] &~ blocked->sig[2];
201 ready |= signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 2: ready = signal->sig[1] &~ blocked->sig[1];
206 ready |= signal->sig[0] &~ blocked->sig[0];
209 case 1: ready = signal->sig[0] &~ blocked->sig[0];
214 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
216 fastcall void recalc_sigpending_tsk(struct task_struct *t)
218 if (t->signal->group_stop_count > 0 ||
219 PENDING(&t->pending, &t->blocked) ||
220 PENDING(&t->signal->shared_pending, &t->blocked))
221 set_tsk_thread_flag(t, TIF_SIGPENDING);
223 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226 void recalc_sigpending(void)
228 recalc_sigpending_tsk(current);
231 /* Given the mask, find the first available signal that should be serviced. */
234 next_signal(struct sigpending *pending, sigset_t *mask)
236 unsigned long i, *s, *m, x;
239 s = pending->signal.sig;
241 switch (_NSIG_WORDS) {
243 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
244 if ((x = *s &~ *m) != 0) {
245 sig = ffz(~x) + i*_NSIG_BPW + 1;
250 case 2: if ((x = s[0] &~ m[0]) != 0)
252 else if ((x = s[1] &~ m[1]) != 0)
259 case 1: if ((x = *s &~ *m) != 0)
267 static struct sigqueue *__sigqueue_alloc(void)
269 struct sigqueue *q = NULL;
271 if (atomic_read(¤t->user->sigpending) <
272 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
273 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
275 INIT_LIST_HEAD(&q->list);
278 q->user = get_uid(current->user);
279 atomic_inc(&q->user->sigpending);
284 static inline void __sigqueue_free(struct sigqueue *q)
286 if (q->flags & SIGQUEUE_PREALLOC)
288 atomic_dec(&q->user->sigpending);
290 kmem_cache_free(sigqueue_cachep, q);
293 static void flush_sigqueue(struct sigpending *queue)
297 sigemptyset(&queue->signal);
298 while (!list_empty(&queue->list)) {
299 q = list_entry(queue->list.next, struct sigqueue , list);
300 list_del_init(&q->list);
306 * Flush all pending signals for a task.
310 flush_signals(struct task_struct *t)
314 spin_lock_irqsave(&t->sighand->siglock, flags);
315 clear_tsk_thread_flag(t,TIF_SIGPENDING);
316 flush_sigqueue(&t->pending);
317 flush_sigqueue(&t->signal->shared_pending);
318 spin_unlock_irqrestore(&t->sighand->siglock, flags);
322 * This function expects the tasklist_lock write-locked.
324 void __exit_sighand(struct task_struct *tsk)
326 struct sighand_struct * sighand = tsk->sighand;
328 /* Ok, we're done with the signal handlers */
330 if (atomic_dec_and_test(&sighand->count))
331 kmem_cache_free(sighand_cachep, sighand);
334 void exit_sighand(struct task_struct *tsk)
336 write_lock_irq(&tasklist_lock);
338 write_unlock_irq(&tasklist_lock);
342 * This function expects the tasklist_lock write-locked.
344 void __exit_signal(struct task_struct *tsk)
346 struct signal_struct * sig = tsk->signal;
347 struct sighand_struct * sighand = tsk->sighand;
351 if (!atomic_read(&sig->count))
353 spin_lock(&sighand->siglock);
354 if (atomic_dec_and_test(&sig->count)) {
355 if (tsk == sig->curr_target)
356 sig->curr_target = next_thread(tsk);
358 spin_unlock(&sighand->siglock);
359 flush_sigqueue(&sig->shared_pending);
362 * If there is any task waiting for the group exit
365 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
366 wake_up_process(sig->group_exit_task);
367 sig->group_exit_task = NULL;
369 if (tsk == sig->curr_target)
370 sig->curr_target = next_thread(tsk);
373 * Accumulate here the counters for all threads but the
374 * group leader as they die, so they can be added into
375 * the process-wide totals when those are taken.
376 * The group leader stays around as a zombie as long
377 * as there are other threads. When it gets reaped,
378 * the exit.c code will add its counts into these totals.
379 * We won't ever get here for the group leader, since it
380 * will have been the last reference on the signal_struct.
382 sig->utime += tsk->utime;
383 sig->stime += tsk->stime;
384 sig->min_flt += tsk->min_flt;
385 sig->maj_flt += tsk->maj_flt;
386 sig->nvcsw += tsk->nvcsw;
387 sig->nivcsw += tsk->nivcsw;
388 spin_unlock(&sighand->siglock);
389 sig = NULL; /* Marker for below. */
391 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
392 flush_sigqueue(&tsk->pending);
395 * We are cleaning up the signal_struct here. We delayed
396 * calling exit_itimers until after flush_sigqueue, just in
397 * case our thread-local pending queue contained a queued
398 * timer signal that would have been cleared in
399 * exit_itimers. When that called sigqueue_free, it would
400 * attempt to re-take the tasklist_lock and deadlock. This
401 * can never happen if we ensure that all queues the
402 * timer's signal might be queued on have been flushed
403 * first. The shared_pending queue, and our own pending
404 * queue are the only queues the timer could be on, since
405 * there are no other threads left in the group and timer
406 * signals are constrained to threads inside the group.
409 kmem_cache_free(signal_cachep, sig);
413 void exit_signal(struct task_struct *tsk)
415 write_lock_irq(&tasklist_lock);
417 write_unlock_irq(&tasklist_lock);
421 * Flush all handlers for a task.
425 flush_signal_handlers(struct task_struct *t, int force_default)
428 struct k_sigaction *ka = &t->sighand->action[0];
429 for (i = _NSIG ; i != 0 ; i--) {
430 if (force_default || ka->sa.sa_handler != SIG_IGN)
431 ka->sa.sa_handler = SIG_DFL;
433 sigemptyset(&ka->sa.sa_mask);
439 /* Notify the system that a driver wants to block all signals for this
440 * process, and wants to be notified if any signals at all were to be
441 * sent/acted upon. If the notifier routine returns non-zero, then the
442 * signal will be acted upon after all. If the notifier routine returns 0,
443 * then then signal will be blocked. Only one block per process is
444 * allowed. priv is a pointer to private data that the notifier routine
445 * can use to determine if the signal should be blocked or not. */
448 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
452 spin_lock_irqsave(¤t->sighand->siglock, flags);
453 current->notifier_mask = mask;
454 current->notifier_data = priv;
455 current->notifier = notifier;
456 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
459 /* Notify the system that blocking has ended. */
462 unblock_all_signals(void)
466 spin_lock_irqsave(¤t->sighand->siglock, flags);
467 current->notifier = NULL;
468 current->notifier_data = NULL;
470 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
473 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
475 struct sigqueue *q, *first = NULL;
476 int still_pending = 0;
478 if (unlikely(!sigismember(&list->signal, sig)))
482 * Collect the siginfo appropriate to this signal. Check if
483 * there is another siginfo for the same signal.
485 list_for_each_entry(q, &list->list, list) {
486 if (q->info.si_signo == sig) {
495 list_del_init(&first->list);
496 copy_siginfo(info, &first->info);
497 __sigqueue_free(first);
499 sigdelset(&list->signal, sig);
502 /* Ok, it wasn't in the queue. This must be
503 a fast-pathed signal or we must have been
504 out of queue space. So zero out the info.
506 sigdelset(&list->signal, sig);
507 info->si_signo = sig;
516 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
521 sig = next_signal(pending, mask);
523 if (current->notifier) {
524 if (sigismember(current->notifier_mask, sig)) {
525 if (!(current->notifier)(current->notifier_data)) {
526 clear_thread_flag(TIF_SIGPENDING);
532 if (!collect_signal(sig, pending, info))
542 * Dequeue a signal and return the element to the caller, which is
543 * expected to free it.
545 * All callers have to hold the siglock.
547 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
549 int signr = __dequeue_signal(&tsk->pending, mask, info);
551 signr = __dequeue_signal(&tsk->signal->shared_pending,
554 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
555 info->si_sys_private){
556 do_schedule_next_timer(info);
562 * Tell a process that it has a new active signal..
564 * NOTE! we rely on the previous spin_lock to
565 * lock interrupts for us! We can only be called with
566 * "siglock" held, and the local interrupt must
567 * have been disabled when that got acquired!
569 * No need to set need_resched since signal event passing
570 * goes through ->blocked
572 void signal_wake_up(struct task_struct *t, int resume)
576 set_tsk_thread_flag(t, TIF_SIGPENDING);
579 * If resume is set, we want to wake it up in the TASK_STOPPED case.
580 * We don't check for TASK_STOPPED because there is a race with it
581 * executing another processor and just now entering stopped state.
582 * By calling wake_up_process any time resume is set, we ensure
583 * the process will wake up and handle its stop or death signal.
585 mask = TASK_INTERRUPTIBLE;
587 mask |= TASK_STOPPED;
588 if (!wake_up_state(t, mask))
593 * Remove signals in mask from the pending set and queue.
594 * Returns 1 if any signals were found.
596 * All callers must be holding the siglock.
598 static int rm_from_queue(unsigned long mask, struct sigpending *s)
600 struct sigqueue *q, *n;
602 if (!sigtestsetmask(&s->signal, mask))
605 sigdelsetmask(&s->signal, mask);
606 list_for_each_entry_safe(q, n, &s->list, list) {
607 if (q->info.si_signo < SIGRTMIN &&
608 (mask & sigmask(q->info.si_signo))) {
609 list_del_init(&q->list);
617 * Bad permissions for sending the signal
619 static int check_kill_permission(int sig, struct siginfo *info,
620 struct task_struct *t)
625 if (sig < 0 || sig > _NSIG)
628 user = (!info || ((unsigned long)info != 1 &&
629 (unsigned long)info != 2 && SI_FROMUSER(info)));
632 if (user && ((sig != SIGCONT) ||
633 (current->signal->session != t->signal->session))
634 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
635 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
636 && !capable(CAP_KILL))
640 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
643 return security_task_kill(t, info, sig);
647 static void do_notify_parent_cldstop(struct task_struct *tsk,
648 struct task_struct *parent,
652 * Handle magic process-wide effects of stop/continue signals.
653 * Unlike the signal actions, these happen immediately at signal-generation
654 * time regardless of blocking, ignoring, or handling. This does the
655 * actual continuing for SIGCONT, but not the actual stopping for stop
656 * signals. The process stop is done as a signal action for SIG_DFL.
658 static void handle_stop_signal(int sig, struct task_struct *p)
660 struct task_struct *t;
662 if (sig_kernel_stop(sig)) {
664 * This is a stop signal. Remove SIGCONT from all queues.
666 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
669 rm_from_queue(sigmask(SIGCONT), &t->pending);
672 } else if (sig == SIGCONT) {
674 * Remove all stop signals from all queues,
675 * and wake all threads.
677 if (unlikely(p->signal->group_stop_count > 0)) {
679 * There was a group stop in progress. We'll
680 * pretend it finished before we got here. We are
681 * obliged to report it to the parent: if the
682 * SIGSTOP happened "after" this SIGCONT, then it
683 * would have cleared this pending SIGCONT. If it
684 * happened "before" this SIGCONT, then the parent
685 * got the SIGCHLD about the stop finishing before
686 * the continue happened. We do the notification
687 * now, and it's as if the stop had finished and
688 * the SIGCHLD was pending on entry to this kill.
690 p->signal->group_stop_count = 0;
691 p->signal->stop_state = 1;
692 spin_unlock(&p->sighand->siglock);
693 if (p->ptrace & PT_PTRACED)
694 do_notify_parent_cldstop(p, p->parent,
697 do_notify_parent_cldstop(
699 p->group_leader->real_parent,
701 spin_lock(&p->sighand->siglock);
703 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
707 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
710 * If there is a handler for SIGCONT, we must make
711 * sure that no thread returns to user mode before
712 * we post the signal, in case it was the only
713 * thread eligible to run the signal handler--then
714 * it must not do anything between resuming and
715 * running the handler. With the TIF_SIGPENDING
716 * flag set, the thread will pause and acquire the
717 * siglock that we hold now and until we've queued
718 * the pending signal.
720 * Wake up the stopped thread _after_ setting
723 state = TASK_STOPPED;
724 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
725 set_tsk_thread_flag(t, TIF_SIGPENDING);
726 state |= TASK_INTERRUPTIBLE;
728 wake_up_state(t, state);
733 if (p->signal->stop_state > 0) {
735 * We were in fact stopped, and are now continued.
736 * Notify the parent with CLD_CONTINUED.
738 p->signal->stop_state = -1;
739 p->signal->group_exit_code = 0;
740 spin_unlock(&p->sighand->siglock);
741 if (p->ptrace & PT_PTRACED)
742 do_notify_parent_cldstop(p, p->parent,
745 do_notify_parent_cldstop(
747 p->group_leader->real_parent,
749 spin_lock(&p->sighand->siglock);
754 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
755 struct sigpending *signals)
757 struct sigqueue * q = NULL;
761 * fast-pathed signals for kernel-internal things like SIGSTOP
764 if ((unsigned long)info == 2)
767 /* Real-time signals must be queued if sent by sigqueue, or
768 some other real-time mechanism. It is implementation
769 defined whether kill() does so. We attempt to do so, on
770 the principle of least surprise, but since kill is not
771 allowed to fail with EAGAIN when low on memory we just
772 make sure at least one signal gets delivered and don't
773 pass on the info struct. */
775 if (atomic_read(&t->user->sigpending) <
776 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
777 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
781 q->user = get_uid(t->user);
782 atomic_inc(&q->user->sigpending);
783 list_add_tail(&q->list, &signals->list);
784 switch ((unsigned long) info) {
786 q->info.si_signo = sig;
787 q->info.si_errno = 0;
788 q->info.si_code = SI_USER;
789 q->info.si_pid = current->pid;
790 q->info.si_uid = current->uid;
793 q->info.si_signo = sig;
794 q->info.si_errno = 0;
795 q->info.si_code = SI_KERNEL;
800 copy_siginfo(&q->info, info);
804 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
805 && info->si_code != SI_USER)
807 * Queue overflow, abort. We may abort if the signal was rt
808 * and sent by user using something other than kill().
811 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
813 * Set up a return to indicate that we dropped
816 ret = info->si_sys_private;
820 sigaddset(&signals->signal, sig);
824 #define LEGACY_QUEUE(sigptr, sig) \
825 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
829 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
833 if (!irqs_disabled())
836 if (!spin_is_locked(&t->sighand->siglock))
840 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
842 * Set up a return to indicate that we dropped the signal.
844 ret = info->si_sys_private;
846 /* Short-circuit ignored signals. */
847 if (sig_ignored(t, sig))
850 /* Support queueing exactly one non-rt signal, so that we
851 can get more detailed information about the cause of
853 if (LEGACY_QUEUE(&t->pending, sig))
856 ret = send_signal(sig, info, t, &t->pending);
857 if (!ret && !sigismember(&t->blocked, sig))
858 signal_wake_up(t, sig == SIGKILL);
864 * Force a signal that the process can't ignore: if necessary
865 * we unblock the signal and change any SIG_IGN to SIG_DFL.
869 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
871 unsigned long int flags;
874 spin_lock_irqsave(&t->sighand->siglock, flags);
875 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
876 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
877 sigdelset(&t->blocked, sig);
878 recalc_sigpending_tsk(t);
880 ret = specific_send_sig_info(sig, info, t);
881 spin_unlock_irqrestore(&t->sighand->siglock, flags);
887 force_sig_specific(int sig, struct task_struct *t)
889 unsigned long int flags;
891 spin_lock_irqsave(&t->sighand->siglock, flags);
892 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
893 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
894 sigdelset(&t->blocked, sig);
895 recalc_sigpending_tsk(t);
896 specific_send_sig_info(sig, (void *)2, t);
897 spin_unlock_irqrestore(&t->sighand->siglock, flags);
901 * Test if P wants to take SIG. After we've checked all threads with this,
902 * it's equivalent to finding no threads not blocking SIG. Any threads not
903 * blocking SIG were ruled out because they are not running and already
904 * have pending signals. Such threads will dequeue from the shared queue
905 * as soon as they're available, so putting the signal on the shared queue
906 * will be equivalent to sending it to one such thread.
908 #define wants_signal(sig, p, mask) \
909 (!sigismember(&(p)->blocked, sig) \
910 && !((p)->state & mask) \
911 && !((p)->flags & PF_EXITING) \
912 && (task_curr(p) || !signal_pending(p)))
916 __group_complete_signal(int sig, struct task_struct *p)
919 struct task_struct *t;
922 * Don't bother zombies and stopped tasks (but
923 * SIGKILL will punch through stopped state)
925 mask = TASK_DEAD | TASK_ZOMBIE | TASK_TRACED;
927 mask |= TASK_STOPPED;
930 * Now find a thread we can wake up to take the signal off the queue.
932 * If the main thread wants the signal, it gets first crack.
933 * Probably the least surprising to the average bear.
935 if (wants_signal(sig, p, mask))
937 else if (thread_group_empty(p))
939 * There is just one thread and it does not need to be woken.
940 * It will dequeue unblocked signals before it runs again.
945 * Otherwise try to find a suitable thread.
947 t = p->signal->curr_target;
949 /* restart balancing at this thread */
950 t = p->signal->curr_target = p;
951 BUG_ON(t->tgid != p->tgid);
953 while (!wants_signal(sig, t, mask)) {
955 if (t == p->signal->curr_target)
957 * No thread needs to be woken.
958 * Any eligible threads will see
959 * the signal in the queue soon.
963 p->signal->curr_target = t;
967 * Found a killable thread. If the signal will be fatal,
968 * then start taking the whole group down immediately.
970 if (sig_fatal(p, sig) && !p->signal->group_exit &&
971 !sigismember(&t->real_blocked, sig) &&
972 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
974 * This signal will be fatal to the whole group.
976 if (!sig_kernel_coredump(sig)) {
978 * Start a group exit and wake everybody up.
979 * This way we don't have other threads
980 * running and doing things after a slower
981 * thread has the fatal signal pending.
983 p->signal->group_exit = 1;
984 p->signal->group_exit_code = sig;
985 p->signal->group_stop_count = 0;
988 sigaddset(&t->pending.signal, SIGKILL);
989 signal_wake_up(t, 1);
996 * There will be a core dump. We make all threads other
997 * than the chosen one go into a group stop so that nothing
998 * happens until it gets scheduled, takes the signal off
999 * the shared queue, and does the core dump. This is a
1000 * little more complicated than strictly necessary, but it
1001 * keeps the signal state that winds up in the core dump
1002 * unchanged from the death state, e.g. which thread had
1003 * the core-dump signal unblocked.
1005 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1006 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1007 p->signal->group_stop_count = 0;
1008 p->signal->group_exit_task = t;
1011 p->signal->group_stop_count++;
1012 signal_wake_up(t, 0);
1015 wake_up_process(p->signal->group_exit_task);
1020 * The signal is already in the shared-pending queue.
1021 * Tell the chosen thread to wake up and dequeue it.
1023 signal_wake_up(t, sig == SIGKILL);
1028 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1033 if (!spin_is_locked(&p->sighand->siglock))
1036 handle_stop_signal(sig, p);
1038 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1040 * Set up a return to indicate that we dropped the signal.
1042 ret = info->si_sys_private;
1044 /* Short-circuit ignored signals. */
1045 if (sig_ignored(p, sig))
1048 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1049 /* This is a non-RT signal and we already have one queued. */
1053 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1054 * We always use the shared queue for process-wide signals,
1055 * to avoid several races.
1057 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1061 __group_complete_signal(sig, p);
1066 * Nuke all other threads in the group.
1068 void zap_other_threads(struct task_struct *p)
1070 struct task_struct *t;
1072 p->signal->group_stop_count = 0;
1074 if (thread_group_empty(p))
1077 for (t = next_thread(p); t != p; t = next_thread(t)) {
1079 * Don't bother with already dead threads
1081 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1085 * We don't want to notify the parent, since we are
1086 * killed as part of a thread group due to another
1087 * thread doing an execve() or similar. So set the
1088 * exit signal to -1 to allow immediate reaping of
1089 * the process. But don't detach the thread group
1092 if (t != p->group_leader)
1093 t->exit_signal = -1;
1095 sigaddset(&t->pending.signal, SIGKILL);
1096 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1097 signal_wake_up(t, 1);
1102 * Must be called with the tasklist_lock held for reading!
1104 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1106 unsigned long flags;
1109 ret = check_kill_permission(sig, info, p);
1110 if (!ret && sig && p->sighand) {
1111 spin_lock_irqsave(&p->sighand->siglock, flags);
1112 ret = __group_send_sig_info(sig, info, p);
1113 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1120 * kill_pg_info() sends a signal to a process group: this is what the tty
1121 * control characters do (^C, ^Z etc)
1124 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1126 struct task_struct *p;
1127 int retval, success;
1134 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1135 int err = group_send_sig_info(sig, info, p);
1138 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1139 return success ? 0 : retval;
1143 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1147 read_lock(&tasklist_lock);
1148 retval = __kill_pg_info(sig, info, pgrp);
1149 read_unlock(&tasklist_lock);
1155 * kill_sl_info() sends a signal to the session leader: this is used
1156 * to send SIGHUP to the controlling process of a terminal when
1157 * the connection is lost.
1162 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1164 int err, retval = -EINVAL;
1165 struct task_struct *p;
1171 read_lock(&tasklist_lock);
1172 do_each_task_pid(sid, PIDTYPE_SID, p) {
1173 if (!p->signal->leader)
1175 err = group_send_sig_info(sig, info, p);
1178 } while_each_task_pid(sid, PIDTYPE_SID, p);
1179 read_unlock(&tasklist_lock);
1185 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1188 struct task_struct *p;
1190 read_lock(&tasklist_lock);
1191 p = find_task_by_pid(pid);
1194 error = group_send_sig_info(sig, info, p);
1195 read_unlock(&tasklist_lock);
1201 * kill_something_info() interprets pid in interesting ways just like kill(2).
1203 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1204 * is probably wrong. Should make it like BSD or SYSV.
1207 static int kill_something_info(int sig, struct siginfo *info, int pid)
1210 return kill_pg_info(sig, info, process_group(current));
1211 } else if (pid == -1) {
1212 int retval = 0, count = 0;
1213 struct task_struct * p;
1215 read_lock(&tasklist_lock);
1216 for_each_process(p) {
1217 if (p->pid > 1 && p->tgid != current->tgid) {
1218 int err = group_send_sig_info(sig, info, p);
1224 read_unlock(&tasklist_lock);
1225 return count ? retval : -ESRCH;
1226 } else if (pid < 0) {
1227 return kill_pg_info(sig, info, -pid);
1229 return kill_proc_info(sig, info, pid);
1234 * These are for backward compatibility with the rest of the kernel source.
1238 * These two are the most common entry points. They send a signal
1239 * just to the specific thread.
1242 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1245 unsigned long flags;
1248 * Make sure legacy kernel users don't send in bad values
1249 * (normal paths check this in check_kill_permission).
1251 if (sig < 0 || sig > _NSIG)
1255 * We need the tasklist lock even for the specific
1256 * thread case (when we don't need to follow the group
1257 * lists) in order to avoid races with "p->sighand"
1258 * going away or changing from under us.
1260 read_lock(&tasklist_lock);
1261 spin_lock_irqsave(&p->sighand->siglock, flags);
1262 ret = specific_send_sig_info(sig, info, p);
1263 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1264 read_unlock(&tasklist_lock);
1269 send_sig(int sig, struct task_struct *p, int priv)
1271 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1275 * This is the entry point for "process-wide" signals.
1276 * They will go to an appropriate thread in the thread group.
1279 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282 read_lock(&tasklist_lock);
1283 ret = group_send_sig_info(sig, info, p);
1284 read_unlock(&tasklist_lock);
1289 force_sig(int sig, struct task_struct *p)
1291 force_sig_info(sig, (void*)1L, p);
1295 * When things go south during signal handling, we
1296 * will force a SIGSEGV. And if the signal that caused
1297 * the problem was already a SIGSEGV, we'll want to
1298 * make sure we don't even try to deliver the signal..
1301 force_sigsegv(int sig, struct task_struct *p)
1303 if (sig == SIGSEGV) {
1304 unsigned long flags;
1305 spin_lock_irqsave(&p->sighand->siglock, flags);
1306 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1307 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1309 force_sig(SIGSEGV, p);
1314 kill_pg(pid_t pgrp, int sig, int priv)
1316 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1320 kill_sl(pid_t sess, int sig, int priv)
1322 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1326 kill_proc(pid_t pid, int sig, int priv)
1328 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1332 * These functions support sending signals using preallocated sigqueue
1333 * structures. This is needed "because realtime applications cannot
1334 * afford to lose notifications of asynchronous events, like timer
1335 * expirations or I/O completions". In the case of Posix Timers
1336 * we allocate the sigqueue structure from the timer_create. If this
1337 * allocation fails we are able to report the failure to the application
1338 * with an EAGAIN error.
1341 struct sigqueue *sigqueue_alloc(void)
1345 if ((q = __sigqueue_alloc()))
1346 q->flags |= SIGQUEUE_PREALLOC;
1350 void sigqueue_free(struct sigqueue *q)
1352 unsigned long flags;
1353 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1355 * If the signal is still pending remove it from the
1358 if (unlikely(!list_empty(&q->list))) {
1359 read_lock(&tasklist_lock);
1360 spin_lock_irqsave(q->lock, flags);
1361 if (!list_empty(&q->list))
1362 list_del_init(&q->list);
1363 spin_unlock_irqrestore(q->lock, flags);
1364 read_unlock(&tasklist_lock);
1366 q->flags &= ~SIGQUEUE_PREALLOC;
1371 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1373 unsigned long flags;
1377 * We need the tasklist lock even for the specific
1378 * thread case (when we don't need to follow the group
1379 * lists) in order to avoid races with "p->sighand"
1380 * going away or changing from under us.
1382 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1383 read_lock(&tasklist_lock);
1384 spin_lock_irqsave(&p->sighand->siglock, flags);
1386 if (unlikely(!list_empty(&q->list))) {
1388 * If an SI_TIMER entry is already queue just increment
1389 * the overrun count.
1391 if (q->info.si_code != SI_TIMER)
1393 q->info.si_overrun++;
1396 /* Short-circuit ignored signals. */
1397 if (sig_ignored(p, sig)) {
1402 q->lock = &p->sighand->siglock;
1403 list_add_tail(&q->list, &p->pending.list);
1404 sigaddset(&p->pending.signal, sig);
1405 if (!sigismember(&p->blocked, sig))
1406 signal_wake_up(p, sig == SIGKILL);
1409 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1410 read_unlock(&tasklist_lock);
1415 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1417 unsigned long flags;
1420 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1421 read_lock(&tasklist_lock);
1422 spin_lock_irqsave(&p->sighand->siglock, flags);
1423 handle_stop_signal(sig, p);
1425 /* Short-circuit ignored signals. */
1426 if (sig_ignored(p, sig)) {
1431 if (unlikely(!list_empty(&q->list))) {
1433 * If an SI_TIMER entry is already queue just increment
1434 * the overrun count. Other uses should not try to
1435 * send the signal multiple times.
1437 if (q->info.si_code != SI_TIMER)
1439 q->info.si_overrun++;
1444 * Put this signal on the shared-pending queue.
1445 * We always use the shared queue for process-wide signals,
1446 * to avoid several races.
1448 q->lock = &p->sighand->siglock;
1449 list_add_tail(&q->list, &p->signal->shared_pending.list);
1450 sigaddset(&p->signal->shared_pending.signal, sig);
1452 __group_complete_signal(sig, p);
1454 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1455 read_unlock(&tasklist_lock);
1460 * Joy. Or not. Pthread wants us to wake up every thread
1461 * in our parent group.
1463 static void __wake_up_parent(struct task_struct *p,
1464 struct task_struct *parent)
1466 struct task_struct *tsk = parent;
1469 * Fortunately this is not necessary for thread groups:
1471 if (p->tgid == tsk->tgid) {
1472 wake_up_interruptible_sync(&tsk->wait_chldexit);
1477 wake_up_interruptible_sync(&tsk->wait_chldexit);
1478 tsk = next_thread(tsk);
1479 if (tsk->signal != parent->signal)
1481 } while (tsk != parent);
1485 * Let a parent know about the death of a child.
1486 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1489 void do_notify_parent(struct task_struct *tsk, int sig)
1491 struct siginfo info;
1492 unsigned long flags;
1493 struct sighand_struct *psig;
1498 /* do_notify_parent_cldstop should have been called instead. */
1499 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1501 BUG_ON(!tsk->ptrace &&
1502 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1504 info.si_signo = sig;
1506 info.si_pid = tsk->pid;
1507 info.si_uid = tsk->uid;
1509 /* FIXME: find out whether or not this is supposed to be c*time. */
1510 info.si_utime = tsk->utime + tsk->signal->utime;
1511 info.si_stime = tsk->stime + tsk->signal->stime;
1513 info.si_status = tsk->exit_code & 0x7f;
1514 if (tsk->exit_code & 0x80)
1515 info.si_code = CLD_DUMPED;
1516 else if (tsk->exit_code & 0x7f)
1517 info.si_code = CLD_KILLED;
1519 info.si_code = CLD_EXITED;
1520 info.si_status = tsk->exit_code >> 8;
1523 psig = tsk->parent->sighand;
1524 spin_lock_irqsave(&psig->siglock, flags);
1525 if (sig == SIGCHLD &&
1526 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1527 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1529 * We are exiting and our parent doesn't care. POSIX.1
1530 * defines special semantics for setting SIGCHLD to SIG_IGN
1531 * or setting the SA_NOCLDWAIT flag: we should be reaped
1532 * automatically and not left for our parent's wait4 call.
1533 * Rather than having the parent do it as a magic kind of
1534 * signal handler, we just set this to tell do_exit that we
1535 * can be cleaned up without becoming a zombie. Note that
1536 * we still call __wake_up_parent in this case, because a
1537 * blocked sys_wait4 might now return -ECHILD.
1539 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1540 * is implementation-defined: we do (if you don't want
1541 * it, just use SIG_IGN instead).
1543 tsk->exit_signal = -1;
1544 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1547 if (sig > 0 && sig <= _NSIG)
1548 __group_send_sig_info(sig, &info, tsk->parent);
1549 __wake_up_parent(tsk, tsk->parent);
1550 spin_unlock_irqrestore(&psig->siglock, flags);
1554 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1557 struct siginfo info;
1558 unsigned long flags;
1559 struct sighand_struct *sighand;
1561 info.si_signo = SIGCHLD;
1563 info.si_pid = tsk->pid;
1564 info.si_uid = tsk->uid;
1566 /* FIXME: find out whether or not this is supposed to be c*time. */
1567 info.si_utime = tsk->utime;
1568 info.si_stime = tsk->stime;
1573 info.si_status = SIGCONT;
1576 info.si_status = tsk->signal->group_exit_code & 0x7f;
1579 info.si_status = tsk->exit_code & 0x7f;
1585 sighand = parent->sighand;
1586 spin_lock_irqsave(&sighand->siglock, flags);
1587 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1588 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1589 __group_send_sig_info(SIGCHLD, &info, parent);
1591 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1593 __wake_up_parent(tsk, parent);
1594 spin_unlock_irqrestore(&sighand->siglock, flags);
1598 * This must be called with current->sighand->siglock held.
1600 * This should be the path for all ptrace stops.
1601 * We always set current->last_siginfo while stopped here.
1602 * That makes it a way to test a stopped process for
1603 * being ptrace-stopped vs being job-control-stopped.
1605 static void ptrace_stop(int exit_code, siginfo_t *info)
1607 BUG_ON(!(current->ptrace & PT_PTRACED));
1610 * If there is a group stop in progress,
1611 * we must participate in the bookkeeping.
1613 if (current->signal->group_stop_count > 0)
1614 --current->signal->group_stop_count;
1616 current->last_siginfo = info;
1617 current->exit_code = exit_code;
1619 /* Let the debugger run. */
1620 set_current_state(TASK_TRACED);
1621 spin_unlock_irq(¤t->sighand->siglock);
1622 read_lock(&tasklist_lock);
1623 do_notify_parent_cldstop(current, current->parent, CLD_TRAPPED);
1624 read_unlock(&tasklist_lock);
1628 * We are back. Now reacquire the siglock before touching
1629 * last_siginfo, so that we are sure to have synchronized with
1630 * any signal-sending on another CPU that wants to examine it.
1632 spin_lock_irq(¤t->sighand->siglock);
1633 current->last_siginfo = NULL;
1636 * Queued signals ignored us while we were stopped for tracing.
1637 * So check for any that we should take before resuming user mode.
1639 recalc_sigpending();
1642 void ptrace_notify(int exit_code)
1646 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1648 memset(&info, 0, sizeof info);
1649 info.si_signo = SIGTRAP;
1650 info.si_code = exit_code;
1651 info.si_pid = current->pid;
1652 info.si_uid = current->uid;
1654 /* Let the debugger run. */
1655 spin_lock_irq(¤t->sighand->siglock);
1656 ptrace_stop(exit_code, &info);
1657 spin_unlock_irq(¤t->sighand->siglock);
1660 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1663 finish_stop(int stop_count)
1666 * If there are no other threads in the group, or if there is
1667 * a group stop in progress and we are the last to stop,
1668 * report to the parent. When ptraced, every thread reports itself.
1670 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1671 read_lock(&tasklist_lock);
1672 do_notify_parent_cldstop(current, current->parent,
1674 read_unlock(&tasklist_lock);
1676 else if (stop_count == 0) {
1677 read_lock(&tasklist_lock);
1678 do_notify_parent_cldstop(current->group_leader,
1679 current->group_leader->real_parent,
1681 read_unlock(&tasklist_lock);
1686 * Now we don't run again until continued.
1688 current->exit_code = 0;
1692 * This performs the stopping for SIGSTOP and other stop signals.
1693 * We have to stop all threads in the thread group.
1696 do_signal_stop(int signr)
1698 struct signal_struct *sig = current->signal;
1699 struct sighand_struct *sighand = current->sighand;
1700 int stop_count = -1;
1702 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1704 if (sig->group_stop_count > 0) {
1706 * There is a group stop in progress. We don't need to
1707 * start another one.
1709 signr = sig->group_exit_code;
1710 stop_count = --sig->group_stop_count;
1711 current->exit_code = signr;
1712 set_current_state(TASK_STOPPED);
1713 if (stop_count == 0)
1714 sig->stop_state = 1;
1715 spin_unlock_irq(&sighand->siglock);
1717 else if (thread_group_empty(current)) {
1719 * Lock must be held through transition to stopped state.
1721 current->exit_code = current->signal->group_exit_code = signr;
1722 set_current_state(TASK_STOPPED);
1723 sig->stop_state = 1;
1724 spin_unlock_irq(&sighand->siglock);
1728 * There is no group stop already in progress.
1729 * We must initiate one now, but that requires
1730 * dropping siglock to get both the tasklist lock
1731 * and siglock again in the proper order. Note that
1732 * this allows an intervening SIGCONT to be posted.
1733 * We need to check for that and bail out if necessary.
1735 struct task_struct *t;
1737 spin_unlock_irq(&sighand->siglock);
1739 /* signals can be posted during this window */
1741 read_lock(&tasklist_lock);
1742 spin_lock_irq(&sighand->siglock);
1744 if (unlikely(sig->group_exit)) {
1746 * There is a group exit in progress now.
1747 * We'll just ignore the stop and process the
1748 * associated fatal signal.
1750 spin_unlock_irq(&sighand->siglock);
1751 read_unlock(&tasklist_lock);
1755 if (unlikely(sig_avoid_stop_race())) {
1757 * Either a SIGCONT or a SIGKILL signal was
1758 * posted in the siglock-not-held window.
1760 spin_unlock_irq(&sighand->siglock);
1761 read_unlock(&tasklist_lock);
1765 if (sig->group_stop_count == 0) {
1766 sig->group_exit_code = signr;
1768 for (t = next_thread(current); t != current;
1771 * Setting state to TASK_STOPPED for a group
1772 * stop is always done with the siglock held,
1773 * so this check has no races.
1775 if (t->state < TASK_STOPPED) {
1777 signal_wake_up(t, 0);
1779 sig->group_stop_count = stop_count;
1782 /* A race with another thread while unlocked. */
1783 signr = sig->group_exit_code;
1784 stop_count = --sig->group_stop_count;
1787 current->exit_code = signr;
1788 set_current_state(TASK_STOPPED);
1789 if (stop_count == 0)
1790 sig->stop_state = 1;
1792 spin_unlock_irq(&sighand->siglock);
1793 read_unlock(&tasklist_lock);
1796 finish_stop(stop_count);
1800 * Do appropriate magic when group_stop_count > 0.
1801 * We return nonzero if we stopped, after releasing the siglock.
1802 * We return zero if we still hold the siglock and should look
1803 * for another signal without checking group_stop_count again.
1805 static inline int handle_group_stop(void)
1809 if (current->signal->group_exit_task == current) {
1811 * Group stop is so we can do a core dump,
1812 * We are the initiating thread, so get on with it.
1814 current->signal->group_exit_task = NULL;
1818 if (current->signal->group_exit)
1820 * Group stop is so another thread can do a core dump,
1821 * or else we are racing against a death signal.
1822 * Just punt the stop so we can get the next signal.
1827 * There is a group stop in progress. We stop
1828 * without any associated signal being in our queue.
1830 stop_count = --current->signal->group_stop_count;
1831 if (stop_count == 0)
1832 current->signal->stop_state = 1;
1833 current->exit_code = current->signal->group_exit_code;
1834 set_current_state(TASK_STOPPED);
1835 spin_unlock_irq(¤t->sighand->siglock);
1836 finish_stop(stop_count);
1840 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1841 struct pt_regs *regs, void *cookie)
1843 sigset_t *mask = ¤t->blocked;
1847 spin_lock_irq(¤t->sighand->siglock);
1849 struct k_sigaction *ka;
1851 if (unlikely(current->signal->group_stop_count > 0) &&
1852 handle_group_stop())
1855 signr = dequeue_signal(current, mask, info);
1858 break; /* will return 0 */
1860 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1861 ptrace_signal_deliver(regs, cookie);
1863 /* Let the debugger run. */
1864 ptrace_stop(signr, info);
1866 /* We're back. Did the debugger cancel the sig? */
1867 signr = current->exit_code;
1871 current->exit_code = 0;
1873 /* Update the siginfo structure if the signal has
1874 changed. If the debugger wanted something
1875 specific in the siginfo structure then it should
1876 have updated *info via PTRACE_SETSIGINFO. */
1877 if (signr != info->si_signo) {
1878 info->si_signo = signr;
1880 info->si_code = SI_USER;
1881 info->si_pid = current->parent->pid;
1882 info->si_uid = current->parent->uid;
1885 /* If the (new) signal is now blocked, requeue it. */
1886 if (sigismember(¤t->blocked, signr)) {
1887 specific_send_sig_info(signr, info, current);
1892 ka = ¤t->sighand->action[signr-1];
1893 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1895 if (ka->sa.sa_handler != SIG_DFL) {
1896 /* Run the handler. */
1899 if (ka->sa.sa_flags & SA_ONESHOT)
1900 ka->sa.sa_handler = SIG_DFL;
1902 break; /* will return non-zero "signr" value */
1906 * Now we are doing the default action for this signal.
1908 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1911 /* Init gets no signals it doesn't want. */
1912 if (current->pid == 1)
1915 if (sig_kernel_stop(signr)) {
1917 * The default action is to stop all threads in
1918 * the thread group. The job control signals
1919 * do nothing in an orphaned pgrp, but SIGSTOP
1920 * always works. Note that siglock needs to be
1921 * dropped during the call to is_orphaned_pgrp()
1922 * because of lock ordering with tasklist_lock.
1923 * This allows an intervening SIGCONT to be posted.
1924 * We need to check for that and bail out if necessary.
1926 if (signr == SIGSTOP) {
1927 do_signal_stop(signr); /* releases siglock */
1930 spin_unlock_irq(¤t->sighand->siglock);
1932 /* signals can be posted during this window */
1934 if (is_orphaned_pgrp(process_group(current)))
1937 spin_lock_irq(¤t->sighand->siglock);
1938 if (unlikely(sig_avoid_stop_race())) {
1940 * Either a SIGCONT or a SIGKILL signal was
1941 * posted in the siglock-not-held window.
1946 do_signal_stop(signr); /* releases siglock */
1950 spin_unlock_irq(¤t->sighand->siglock);
1953 * Anything else is fatal, maybe with a core dump.
1955 current->flags |= PF_SIGNALED;
1956 if (sig_kernel_coredump(signr) &&
1957 do_coredump((long)signr, signr, regs)) {
1959 * That killed all other threads in the group and
1960 * synchronized with their demise, so there can't
1961 * be any more left to kill now. The group_exit
1962 * flags are set by do_coredump. Note that
1963 * thread_group_empty won't always be true yet,
1964 * because those threads were blocked in __exit_mm
1965 * and we just let them go to finish dying.
1967 const int code = signr | 0x80;
1968 BUG_ON(!current->signal->group_exit);
1969 BUG_ON(current->signal->group_exit_code != code);
1975 * Death signals, no core dump.
1977 do_group_exit(signr);
1980 spin_unlock_irq(¤t->sighand->siglock);
1986 EXPORT_SYMBOL(recalc_sigpending);
1987 EXPORT_SYMBOL_GPL(dequeue_signal);
1988 EXPORT_SYMBOL(flush_signals);
1989 EXPORT_SYMBOL(force_sig);
1990 EXPORT_SYMBOL(force_sig_info);
1991 EXPORT_SYMBOL(kill_pg);
1992 EXPORT_SYMBOL(kill_pg_info);
1993 EXPORT_SYMBOL(kill_proc);
1994 EXPORT_SYMBOL(kill_proc_info);
1995 EXPORT_SYMBOL(kill_sl);
1996 EXPORT_SYMBOL(kill_sl_info);
1997 EXPORT_SYMBOL(ptrace_notify);
1998 EXPORT_SYMBOL(send_sig);
1999 EXPORT_SYMBOL(send_sig_info);
2000 EXPORT_SYMBOL(send_group_sig_info);
2001 EXPORT_SYMBOL(sigqueue_alloc);
2002 EXPORT_SYMBOL(sigqueue_free);
2003 EXPORT_SYMBOL(send_sigqueue);
2004 EXPORT_SYMBOL(send_group_sigqueue);
2005 EXPORT_SYMBOL(sigprocmask);
2006 EXPORT_SYMBOL(block_all_signals);
2007 EXPORT_SYMBOL(unblock_all_signals);
2011 * System call entry points.
2014 asmlinkage long sys_restart_syscall(void)
2016 struct restart_block *restart = ¤t_thread_info()->restart_block;
2017 return restart->fn(restart);
2020 long do_no_restart_syscall(struct restart_block *param)
2026 * We don't need to get the kernel lock - this is all local to this
2027 * particular thread.. (and that's good, because this is _heavily_
2028 * used by various programs)
2032 * This is also useful for kernel threads that want to temporarily
2033 * (or permanently) block certain signals.
2035 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2036 * interface happily blocks "unblockable" signals like SIGKILL
2039 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2044 spin_lock_irq(¤t->sighand->siglock);
2045 old_block = current->blocked;
2049 sigorsets(¤t->blocked, ¤t->blocked, set);
2052 signandsets(¤t->blocked, ¤t->blocked, set);
2055 current->blocked = *set;
2060 recalc_sigpending();
2061 spin_unlock_irq(¤t->sighand->siglock);
2063 *oldset = old_block;
2068 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2070 int error = -EINVAL;
2071 sigset_t old_set, new_set;
2073 /* XXX: Don't preclude handling different sized sigset_t's. */
2074 if (sigsetsize != sizeof(sigset_t))
2079 if (copy_from_user(&new_set, set, sizeof(*set)))
2081 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2083 error = sigprocmask(how, &new_set, &old_set);
2089 spin_lock_irq(¤t->sighand->siglock);
2090 old_set = current->blocked;
2091 spin_unlock_irq(¤t->sighand->siglock);
2095 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2103 long do_sigpending(void __user *set, unsigned long sigsetsize)
2105 long error = -EINVAL;
2108 if (sigsetsize > sizeof(sigset_t))
2111 spin_lock_irq(¤t->sighand->siglock);
2112 sigorsets(&pending, ¤t->pending.signal,
2113 ¤t->signal->shared_pending.signal);
2114 spin_unlock_irq(¤t->sighand->siglock);
2116 /* Outside the lock because only this thread touches it. */
2117 sigandsets(&pending, ¤t->blocked, &pending);
2120 if (!copy_to_user(set, &pending, sigsetsize))
2128 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2130 return do_sigpending(set, sigsetsize);
2133 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2135 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2139 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2141 if (from->si_code < 0)
2142 return __copy_to_user(to, from, sizeof(siginfo_t))
2145 * If you change siginfo_t structure, please be sure
2146 * this code is fixed accordingly.
2147 * It should never copy any pad contained in the structure
2148 * to avoid security leaks, but must copy the generic
2149 * 3 ints plus the relevant union member.
2151 err = __put_user(from->si_signo, &to->si_signo);
2152 err |= __put_user(from->si_errno, &to->si_errno);
2153 err |= __put_user((short)from->si_code, &to->si_code);
2154 switch (from->si_code & __SI_MASK) {
2156 err |= __put_user(from->si_pid, &to->si_pid);
2157 err |= __put_user(from->si_uid, &to->si_uid);
2160 err |= __put_user(from->si_tid, &to->si_tid);
2161 err |= __put_user(from->si_overrun, &to->si_overrun);
2162 err |= __put_user(from->si_ptr, &to->si_ptr);
2165 err |= __put_user(from->si_band, &to->si_band);
2166 err |= __put_user(from->si_fd, &to->si_fd);
2169 err |= __put_user(from->si_addr, &to->si_addr);
2170 #ifdef __ARCH_SI_TRAPNO
2171 err |= __put_user(from->si_trapno, &to->si_trapno);
2175 err |= __put_user(from->si_pid, &to->si_pid);
2176 err |= __put_user(from->si_uid, &to->si_uid);
2177 err |= __put_user(from->si_status, &to->si_status);
2178 err |= __put_user(from->si_utime, &to->si_utime);
2179 err |= __put_user(from->si_stime, &to->si_stime);
2181 case __SI_RT: /* This is not generated by the kernel as of now. */
2182 case __SI_MESGQ: /* But this is */
2183 err |= __put_user(from->si_pid, &to->si_pid);
2184 err |= __put_user(from->si_uid, &to->si_uid);
2185 err |= __put_user(from->si_ptr, &to->si_ptr);
2187 default: /* this is just in case for now ... */
2188 err |= __put_user(from->si_pid, &to->si_pid);
2189 err |= __put_user(from->si_uid, &to->si_uid);
2198 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2199 siginfo_t __user *uinfo,
2200 const struct timespec __user *uts,
2209 /* XXX: Don't preclude handling different sized sigset_t's. */
2210 if (sigsetsize != sizeof(sigset_t))
2213 if (copy_from_user(&these, uthese, sizeof(these)))
2217 * Invert the set of allowed signals to get those we
2220 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2224 if (copy_from_user(&ts, uts, sizeof(ts)))
2226 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2231 spin_lock_irq(¤t->sighand->siglock);
2232 sig = dequeue_signal(current, &these, &info);
2234 timeout = MAX_SCHEDULE_TIMEOUT;
2236 timeout = (timespec_to_jiffies(&ts)
2237 + (ts.tv_sec || ts.tv_nsec));
2240 /* None ready -- temporarily unblock those we're
2241 * interested while we are sleeping in so that we'll
2242 * be awakened when they arrive. */
2243 current->real_blocked = current->blocked;
2244 sigandsets(¤t->blocked, ¤t->blocked, &these);
2245 recalc_sigpending();
2246 spin_unlock_irq(¤t->sighand->siglock);
2248 current->state = TASK_INTERRUPTIBLE;
2249 timeout = schedule_timeout(timeout);
2251 spin_lock_irq(¤t->sighand->siglock);
2252 sig = dequeue_signal(current, &these, &info);
2253 current->blocked = current->real_blocked;
2254 siginitset(¤t->real_blocked, 0);
2255 recalc_sigpending();
2258 spin_unlock_irq(¤t->sighand->siglock);
2263 if (copy_siginfo_to_user(uinfo, &info))
2276 sys_kill(int pid, int sig)
2278 struct siginfo info;
2280 info.si_signo = sig;
2282 info.si_code = SI_USER;
2283 info.si_pid = current->tgid;
2284 info.si_uid = current->uid;
2286 return kill_something_info(sig, &info, pid);
2290 * sys_tgkill - send signal to one specific thread
2291 * @tgid: the thread group ID of the thread
2292 * @pid: the PID of the thread
2293 * @sig: signal to be sent
2295 * This syscall also checks the tgid and returns -ESRCH even if the PID
2296 * exists but it's not belonging to the target process anymore. This
2297 * method solves the problem of threads exiting and PIDs getting reused.
2299 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2301 struct siginfo info;
2303 struct task_struct *p;
2305 /* This is only valid for single tasks */
2306 if (pid <= 0 || tgid <= 0)
2309 info.si_signo = sig;
2311 info.si_code = SI_TKILL;
2312 info.si_pid = current->tgid;
2313 info.si_uid = current->uid;
2315 read_lock(&tasklist_lock);
2316 p = find_task_by_pid(pid);
2318 if (p && (p->tgid == tgid)) {
2319 error = check_kill_permission(sig, &info, p);
2321 * The null signal is a permissions and process existence
2322 * probe. No signal is actually delivered.
2324 if (!error && sig && p->sighand) {
2325 spin_lock_irq(&p->sighand->siglock);
2326 handle_stop_signal(sig, p);
2327 error = specific_send_sig_info(sig, &info, p);
2328 spin_unlock_irq(&p->sighand->siglock);
2331 read_unlock(&tasklist_lock);
2336 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2339 sys_tkill(int pid, int sig)
2341 struct siginfo info;
2343 struct task_struct *p;
2345 /* This is only valid for single tasks */
2349 info.si_signo = sig;
2351 info.si_code = SI_TKILL;
2352 info.si_pid = current->tgid;
2353 info.si_uid = current->uid;
2355 read_lock(&tasklist_lock);
2356 p = find_task_by_pid(pid);
2359 error = check_kill_permission(sig, &info, p);
2361 * The null signal is a permissions and process existence
2362 * probe. No signal is actually delivered.
2364 if (!error && sig && p->sighand) {
2365 spin_lock_irq(&p->sighand->siglock);
2366 handle_stop_signal(sig, p);
2367 error = specific_send_sig_info(sig, &info, p);
2368 spin_unlock_irq(&p->sighand->siglock);
2371 read_unlock(&tasklist_lock);
2376 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2380 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2383 /* Not even root can pretend to send signals from the kernel.
2384 Nor can they impersonate a kill(), which adds source info. */
2385 if (info.si_code >= 0)
2387 info.si_signo = sig;
2389 /* POSIX.1b doesn't mention process groups. */
2390 return kill_proc_info(sig, &info, pid);
2394 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2396 struct k_sigaction *k;
2398 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2401 k = ¤t->sighand->action[sig-1];
2403 spin_lock_irq(¤t->sighand->siglock);
2404 if (signal_pending(current)) {
2406 * If there might be a fatal signal pending on multiple
2407 * threads, make sure we take it before changing the action.
2409 spin_unlock_irq(¤t->sighand->siglock);
2410 return -ERESTARTNOINTR;
2419 * "Setting a signal action to SIG_IGN for a signal that is
2420 * pending shall cause the pending signal to be discarded,
2421 * whether or not it is blocked."
2423 * "Setting a signal action to SIG_DFL for a signal that is
2424 * pending and whose default action is to ignore the signal
2425 * (for example, SIGCHLD), shall cause the pending signal to
2426 * be discarded, whether or not it is blocked"
2428 if (act->sa.sa_handler == SIG_IGN ||
2429 (act->sa.sa_handler == SIG_DFL &&
2430 sig_kernel_ignore(sig))) {
2432 * This is a fairly rare case, so we only take the
2433 * tasklist_lock once we're sure we'll need it.
2434 * Now we must do this little unlock and relock
2435 * dance to maintain the lock hierarchy.
2437 struct task_struct *t = current;
2438 spin_unlock_irq(&t->sighand->siglock);
2439 read_lock(&tasklist_lock);
2440 spin_lock_irq(&t->sighand->siglock);
2442 sigdelsetmask(&k->sa.sa_mask,
2443 sigmask(SIGKILL) | sigmask(SIGSTOP));
2444 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2446 rm_from_queue(sigmask(sig), &t->pending);
2447 recalc_sigpending_tsk(t);
2449 } while (t != current);
2450 spin_unlock_irq(¤t->sighand->siglock);
2451 read_unlock(&tasklist_lock);
2456 sigdelsetmask(&k->sa.sa_mask,
2457 sigmask(SIGKILL) | sigmask(SIGSTOP));
2460 spin_unlock_irq(¤t->sighand->siglock);
2465 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2471 oss.ss_sp = (void __user *) current->sas_ss_sp;
2472 oss.ss_size = current->sas_ss_size;
2473 oss.ss_flags = sas_ss_flags(sp);
2482 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2483 || __get_user(ss_sp, &uss->ss_sp)
2484 || __get_user(ss_flags, &uss->ss_flags)
2485 || __get_user(ss_size, &uss->ss_size))
2489 if (on_sig_stack(sp))
2495 * Note - this code used to test ss_flags incorrectly
2496 * old code may have been written using ss_flags==0
2497 * to mean ss_flags==SS_ONSTACK (as this was the only
2498 * way that worked) - this fix preserves that older
2501 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2504 if (ss_flags == SS_DISABLE) {
2509 if (ss_size < MINSIGSTKSZ)
2513 current->sas_ss_sp = (unsigned long) ss_sp;
2514 current->sas_ss_size = ss_size;
2519 if (copy_to_user(uoss, &oss, sizeof(oss)))
2528 #ifdef __ARCH_WANT_SYS_SIGPENDING
2531 sys_sigpending(old_sigset_t __user *set)
2533 return do_sigpending(set, sizeof(*set));
2538 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2539 /* Some platforms have their own version with special arguments others
2540 support only sys_rt_sigprocmask. */
2543 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2546 old_sigset_t old_set, new_set;
2550 if (copy_from_user(&new_set, set, sizeof(*set)))
2552 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2554 spin_lock_irq(¤t->sighand->siglock);
2555 old_set = current->blocked.sig[0];
2563 sigaddsetmask(¤t->blocked, new_set);
2566 sigdelsetmask(¤t->blocked, new_set);
2569 current->blocked.sig[0] = new_set;
2573 recalc_sigpending();
2574 spin_unlock_irq(¤t->sighand->siglock);
2580 old_set = current->blocked.sig[0];
2583 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2590 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2592 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2594 sys_rt_sigaction(int sig,
2595 const struct sigaction __user *act,
2596 struct sigaction __user *oact,
2599 struct k_sigaction new_sa, old_sa;
2602 /* XXX: Don't preclude handling different sized sigset_t's. */
2603 if (sigsetsize != sizeof(sigset_t))
2607 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2611 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2614 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2620 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2622 #ifdef __ARCH_WANT_SYS_SGETMASK
2625 * For backwards compatibility. Functionality superseded by sigprocmask.
2631 return current->blocked.sig[0];
2635 sys_ssetmask(int newmask)
2639 spin_lock_irq(¤t->sighand->siglock);
2640 old = current->blocked.sig[0];
2642 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2644 recalc_sigpending();
2645 spin_unlock_irq(¤t->sighand->siglock);
2649 #endif /* __ARCH_WANT_SGETMASK */
2651 #ifdef __ARCH_WANT_SYS_SIGNAL
2653 * For backwards compatibility. Functionality superseded by sigaction.
2655 asmlinkage unsigned long
2656 sys_signal(int sig, __sighandler_t handler)
2658 struct k_sigaction new_sa, old_sa;
2661 new_sa.sa.sa_handler = handler;
2662 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2664 ret = do_sigaction(sig, &new_sa, &old_sa);
2666 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2668 #endif /* __ARCH_WANT_SYS_SIGNAL */
2670 #ifdef __ARCH_WANT_SYS_PAUSE
2675 current->state = TASK_INTERRUPTIBLE;
2677 return -ERESTARTNOHAND;
2682 void __init signals_init(void)
2685 kmem_cache_create("sigqueue",
2686 sizeof(struct sigqueue),
2687 __alignof__(struct sigqueue),
2688 SLAB_PANIC, NULL, NULL);