2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/ptrace.h>
24 #include <asm/param.h>
25 #include <asm/uaccess.h>
26 #include <asm/unistd.h>
27 #include <asm/siginfo.h>
30 * SLAB caches for signal bits.
33 static kmem_cache_t *sigqueue_cachep;
36 * In POSIX a signal is sent either to a specific thread (Linux task)
37 * or to the process as a whole (Linux thread group). How the signal
38 * is sent determines whether it's to one thread or the whole group,
39 * which determines which signal mask(s) are involved in blocking it
40 * from being delivered until later. When the signal is delivered,
41 * either it's caught or ignored by a user handler or it has a default
42 * effect that applies to the whole thread group (POSIX process).
44 * The possible effects an unblocked signal set to SIG_DFL can have are:
45 * ignore - Nothing Happens
46 * terminate - kill the process, i.e. all threads in the group,
47 * similar to exit_group. The group leader (only) reports
48 * WIFSIGNALED status to its parent.
49 * coredump - write a core dump file describing all threads using
50 * the same mm and then kill all those threads
51 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
53 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
54 * Other signals when not blocked and set to SIG_DFL behaves as follows.
55 * The job control signals also have other special effects.
57 * +--------------------+------------------+
58 * | POSIX signal | default action |
59 * +--------------------+------------------+
60 * | SIGHUP | terminate |
61 * | SIGINT | terminate |
62 * | SIGQUIT | coredump |
63 * | SIGILL | coredump |
64 * | SIGTRAP | coredump |
65 * | SIGABRT/SIGIOT | coredump |
66 * | SIGBUS | coredump |
67 * | SIGFPE | coredump |
68 * | SIGKILL | terminate(+) |
69 * | SIGUSR1 | terminate |
70 * | SIGSEGV | coredump |
71 * | SIGUSR2 | terminate |
72 * | SIGPIPE | terminate |
73 * | SIGALRM | terminate |
74 * | SIGTERM | terminate |
75 * | SIGCHLD | ignore |
76 * | SIGCONT | ignore(*) |
77 * | SIGSTOP | stop(*)(+) |
78 * | SIGTSTP | stop(*) |
79 * | SIGTTIN | stop(*) |
80 * | SIGTTOU | stop(*) |
82 * | SIGXCPU | coredump |
83 * | SIGXFSZ | coredump |
84 * | SIGVTALRM | terminate |
85 * | SIGPROF | terminate |
86 * | SIGPOLL/SIGIO | terminate |
87 * | SIGSYS/SIGUNUSED | coredump |
88 * | SIGSTKFLT | terminate |
89 * | SIGWINCH | ignore |
90 * | SIGPWR | terminate |
91 * | SIGRTMIN-SIGRTMAX | terminate |
92 * +--------------------+------------------+
93 * | non-POSIX signal | default action |
94 * +--------------------+------------------+
95 * | SIGEMT | coredump |
96 * +--------------------+------------------+
98 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
99 * (*) Special job control effects:
100 * When SIGCONT is sent, it resumes the process (all threads in the group)
101 * from TASK_STOPPED state and also clears any pending/queued stop signals
102 * (any of those marked with "stop(*)"). This happens regardless of blocking,
103 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
104 * any pending/queued SIGCONT signals; this happens regardless of blocking,
105 * catching, or ignored the stop signal, though (except for SIGSTOP) the
106 * default action of stopping the process may happen later or never.
110 #define M_SIGEMT M(SIGEMT)
115 #if SIGRTMIN > BITS_PER_LONG
116 #define M(sig) (1ULL << ((sig)-1))
118 #define M(sig) (1UL << ((sig)-1))
120 #define T(sig, mask) (M(sig) & (mask))
122 #define SIG_KERNEL_ONLY_MASK (\
123 M(SIGKILL) | M(SIGSTOP) )
125 #define SIG_KERNEL_STOP_MASK (\
126 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
128 #define SIG_KERNEL_COREDUMP_MASK (\
129 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
130 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
131 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
133 #define SIG_KERNEL_IGNORE_MASK (\
134 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
136 #define sig_kernel_only(sig) \
137 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
138 #define sig_kernel_coredump(sig) \
139 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
140 #define sig_kernel_ignore(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
142 #define sig_kernel_stop(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
145 #define sig_user_defined(t, signr) \
146 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
147 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
149 #define sig_fatal(t, signr) \
150 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
151 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
153 #define sig_avoid_stop_race() \
154 (sigtestsetmask(¤t->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
155 sigtestsetmask(¤t->signal->shared_pending.signal, \
156 M(SIGCONT) | M(SIGKILL)))
158 static int sig_ignored(struct task_struct *t, int sig)
160 void __user * handler;
163 * Tracers always want to know about signals..
165 if (t->ptrace & PT_PTRACED)
169 * Blocked signals are never ignored, since the
170 * signal handler may change by the time it is
173 if (sigismember(&t->blocked, sig))
176 /* Is it explicitly or implicitly ignored? */
177 handler = t->sighand->action[sig-1].sa.sa_handler;
178 return handler == SIG_IGN ||
179 (handler == SIG_DFL && sig_kernel_ignore(sig));
183 * Re-calculate pending state from the set of locally pending
184 * signals, globally pending signals, and blocked signals.
186 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
191 switch (_NSIG_WORDS) {
193 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
194 ready |= signal->sig[i] &~ blocked->sig[i];
197 case 4: ready = signal->sig[3] &~ blocked->sig[3];
198 ready |= signal->sig[2] &~ blocked->sig[2];
199 ready |= signal->sig[1] &~ blocked->sig[1];
200 ready |= signal->sig[0] &~ blocked->sig[0];
203 case 2: ready = signal->sig[1] &~ blocked->sig[1];
204 ready |= signal->sig[0] &~ blocked->sig[0];
207 case 1: ready = signal->sig[0] &~ blocked->sig[0];
212 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 if (t->signal->group_stop_count > 0 ||
217 PENDING(&t->pending, &t->blocked) ||
218 PENDING(&t->signal->shared_pending, &t->blocked))
219 set_tsk_thread_flag(t, TIF_SIGPENDING);
221 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 void recalc_sigpending(void)
226 recalc_sigpending_tsk(current);
229 /* Given the mask, find the first available signal that should be serviced. */
232 next_signal(struct sigpending *pending, sigset_t *mask)
234 unsigned long i, *s, *m, x;
237 s = pending->signal.sig;
239 switch (_NSIG_WORDS) {
241 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
242 if ((x = *s &~ *m) != 0) {
243 sig = ffz(~x) + i*_NSIG_BPW + 1;
248 case 2: if ((x = s[0] &~ m[0]) != 0)
250 else if ((x = s[1] &~ m[1]) != 0)
257 case 1: if ((x = *s &~ *m) != 0)
265 static struct sigqueue *__sigqueue_alloc(void)
267 struct sigqueue *q = NULL;
269 if (atomic_read(¤t->user->sigpending) <
270 current->rlim[RLIMIT_SIGPENDING].rlim_cur)
271 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
273 INIT_LIST_HEAD(&q->list);
276 #warning MEF PLANETLAB: q->user = get_uid(current->user); is something new in Fedora Core.
277 q->user = get_uid(current->user);
278 atomic_inc(&q->user->sigpending);
283 static inline void __sigqueue_free(struct sigqueue *q)
285 if (q->flags & SIGQUEUE_PREALLOC)
287 atomic_dec(&q->user->sigpending);
289 kmem_cache_free(sigqueue_cachep, q);
292 static void flush_sigqueue(struct sigpending *queue)
296 sigemptyset(&queue->signal);
297 while (!list_empty(&queue->list)) {
298 q = list_entry(queue->list.next, struct sigqueue , list);
299 list_del_init(&q->list);
305 * Flush all pending signals for a task.
309 flush_signals(struct task_struct *t)
313 spin_lock_irqsave(&t->sighand->siglock, flags);
314 clear_tsk_thread_flag(t,TIF_SIGPENDING);
315 flush_sigqueue(&t->pending);
316 flush_sigqueue(&t->signal->shared_pending);
317 spin_unlock_irqrestore(&t->sighand->siglock, flags);
321 * This function expects the tasklist_lock write-locked.
323 void __exit_sighand(struct task_struct *tsk)
325 struct sighand_struct * sighand = tsk->sighand;
327 /* Ok, we're done with the signal handlers */
329 if (atomic_dec_and_test(&sighand->count))
330 kmem_cache_free(sighand_cachep, sighand);
333 void exit_sighand(struct task_struct *tsk)
335 write_lock_irq(&tasklist_lock);
337 write_unlock_irq(&tasklist_lock);
341 * This function expects the tasklist_lock write-locked.
343 void __exit_signal(struct task_struct *tsk)
345 struct signal_struct * sig = tsk->signal;
346 struct sighand_struct * sighand = tsk->sighand;
350 if (!atomic_read(&sig->count))
352 spin_lock(&sighand->siglock);
353 if (atomic_dec_and_test(&sig->count)) {
354 if (tsk == sig->curr_target)
355 sig->curr_target = next_thread(tsk);
357 spin_unlock(&sighand->siglock);
358 flush_sigqueue(&sig->shared_pending);
361 * If there is any task waiting for the group exit
364 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
365 wake_up_process(sig->group_exit_task);
366 sig->group_exit_task = NULL;
368 if (tsk == sig->curr_target)
369 sig->curr_target = next_thread(tsk);
371 spin_unlock(&sighand->siglock);
372 sig = NULL; /* Marker for below. */
374 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
375 flush_sigqueue(&tsk->pending);
378 * We are cleaning up the signal_struct here. We delayed
379 * calling exit_itimers until after flush_sigqueue, just in
380 * case our thread-local pending queue contained a queued
381 * timer signal that would have been cleared in
382 * exit_itimers. When that called sigqueue_free, it would
383 * attempt to re-take the tasklist_lock and deadlock. This
384 * can never happen if we ensure that all queues the
385 * timer's signal might be queued on have been flushed
386 * first. The shared_pending queue, and our own pending
387 * queue are the only queues the timer could be on, since
388 * there are no other threads left in the group and timer
389 * signals are constrained to threads inside the group.
392 kmem_cache_free(signal_cachep, sig);
396 void exit_signal(struct task_struct *tsk)
398 write_lock_irq(&tasklist_lock);
400 write_unlock_irq(&tasklist_lock);
404 * Flush all handlers for a task.
408 flush_signal_handlers(struct task_struct *t, int force_default)
411 struct k_sigaction *ka = &t->sighand->action[0];
412 for (i = _NSIG ; i != 0 ; i--) {
413 if (force_default || ka->sa.sa_handler != SIG_IGN)
414 ka->sa.sa_handler = SIG_DFL;
416 sigemptyset(&ka->sa.sa_mask);
421 EXPORT_SYMBOL_GPL(flush_signal_handlers);
423 /* Notify the system that a driver wants to block all signals for this
424 * process, and wants to be notified if any signals at all were to be
425 * sent/acted upon. If the notifier routine returns non-zero, then the
426 * signal will be acted upon after all. If the notifier routine returns 0,
427 * then then signal will be blocked. Only one block per process is
428 * allowed. priv is a pointer to private data that the notifier routine
429 * can use to determine if the signal should be blocked or not. */
432 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
436 spin_lock_irqsave(¤t->sighand->siglock, flags);
437 current->notifier_mask = mask;
438 current->notifier_data = priv;
439 current->notifier = notifier;
440 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
443 /* Notify the system that blocking has ended. */
446 unblock_all_signals(void)
450 spin_lock_irqsave(¤t->sighand->siglock, flags);
451 current->notifier = NULL;
452 current->notifier_data = NULL;
454 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
457 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
459 struct sigqueue *q, *first = NULL;
460 int still_pending = 0;
462 if (unlikely(!sigismember(&list->signal, sig)))
466 * Collect the siginfo appropriate to this signal. Check if
467 * there is another siginfo for the same signal.
469 list_for_each_entry(q, &list->list, list) {
470 if (q->info.si_signo == sig) {
479 list_del_init(&first->list);
480 copy_siginfo(info, &first->info);
481 __sigqueue_free(first);
483 sigdelset(&list->signal, sig);
486 /* Ok, it wasn't in the queue. This must be
487 a fast-pathed signal or we must have been
488 out of queue space. So zero out the info.
490 sigdelset(&list->signal, sig);
491 info->si_signo = sig;
500 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
505 sig = next_signal(pending, mask);
507 if (current->notifier) {
508 if (sigismember(current->notifier_mask, sig)) {
509 if (!(current->notifier)(current->notifier_data)) {
510 clear_thread_flag(TIF_SIGPENDING);
516 if (!collect_signal(sig, pending, info))
526 * Dequeue a signal and return the element to the caller, which is
527 * expected to free it.
529 * All callers have to hold the siglock.
531 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
533 int signr = __dequeue_signal(&tsk->pending, mask, info);
535 signr = __dequeue_signal(&tsk->signal->shared_pending,
538 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
539 info->si_sys_private){
540 do_schedule_next_timer(info);
546 * Tell a process that it has a new active signal..
548 * NOTE! we rely on the previous spin_lock to
549 * lock interrupts for us! We can only be called with
550 * "siglock" held, and the local interrupt must
551 * have been disabled when that got acquired!
553 * No need to set need_resched since signal event passing
554 * goes through ->blocked
556 void signal_wake_up(struct task_struct *t, int resume)
560 set_tsk_thread_flag(t, TIF_SIGPENDING);
563 * If resume is set, we want to wake it up in the TASK_STOPPED case.
564 * We don't check for TASK_STOPPED because there is a race with it
565 * executing another processor and just now entering stopped state.
566 * By calling wake_up_process any time resume is set, we ensure
567 * the process will wake up and handle its stop or death signal.
569 mask = TASK_INTERRUPTIBLE;
571 mask |= TASK_STOPPED;
572 if (!wake_up_state(t, mask))
577 * Remove signals in mask from the pending set and queue.
578 * Returns 1 if any signals were found.
580 * All callers must be holding the siglock.
582 static int rm_from_queue(unsigned long mask, struct sigpending *s)
584 struct sigqueue *q, *n;
586 if (!sigtestsetmask(&s->signal, mask))
589 sigdelsetmask(&s->signal, mask);
590 list_for_each_entry_safe(q, n, &s->list, list) {
591 if (q->info.si_signo < SIGRTMIN &&
592 (mask & sigmask(q->info.si_signo))) {
593 list_del_init(&q->list);
601 * Bad permissions for sending the signal
603 static int check_kill_permission(int sig, struct siginfo *info,
604 struct task_struct *t)
607 if (sig < 0 || sig > _NSIG)
610 if ((!info || ((unsigned long)info != 1 &&
611 (unsigned long)info != 2 && SI_FROMUSER(info)))
612 && ((sig != SIGCONT) ||
613 (current->signal->session != t->signal->session))
614 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
615 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
616 && !capable(CAP_KILL))
618 return security_task_kill(t, info, sig);
622 static void do_notify_parent_cldstop(struct task_struct *tsk,
623 struct task_struct *parent);
626 * Handle magic process-wide effects of stop/continue signals.
627 * Unlike the signal actions, these happen immediately at signal-generation
628 * time regardless of blocking, ignoring, or handling. This does the
629 * actual continuing for SIGCONT, but not the actual stopping for stop
630 * signals. The process stop is done as a signal action for SIG_DFL.
632 static void handle_stop_signal(int sig, struct task_struct *p)
634 struct task_struct *t;
636 if (sig_kernel_stop(sig)) {
638 * This is a stop signal. Remove SIGCONT from all queues.
640 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
643 rm_from_queue(sigmask(SIGCONT), &t->pending);
646 } else if (sig == SIGCONT) {
648 * Remove all stop signals from all queues,
649 * and wake all threads.
651 if (unlikely(p->signal->group_stop_count > 0)) {
653 * There was a group stop in progress. We'll
654 * pretend it finished before we got here. We are
655 * obliged to report it to the parent: if the
656 * SIGSTOP happened "after" this SIGCONT, then it
657 * would have cleared this pending SIGCONT. If it
658 * happened "before" this SIGCONT, then the parent
659 * got the SIGCHLD about the stop finishing before
660 * the continue happened. We do the notification
661 * now, and it's as if the stop had finished and
662 * the SIGCHLD was pending on entry to this kill.
664 p->signal->group_stop_count = 0;
665 if (p->ptrace & PT_PTRACED)
666 do_notify_parent_cldstop(p, p->parent);
668 do_notify_parent_cldstop(
670 p->group_leader->real_parent);
672 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
676 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
679 * If there is a handler for SIGCONT, we must make
680 * sure that no thread returns to user mode before
681 * we post the signal, in case it was the only
682 * thread eligible to run the signal handler--then
683 * it must not do anything between resuming and
684 * running the handler. With the TIF_SIGPENDING
685 * flag set, the thread will pause and acquire the
686 * siglock that we hold now and until we've queued
687 * the pending signal.
689 * Wake up the stopped thread _after_ setting
692 state = TASK_STOPPED;
693 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
694 set_tsk_thread_flag(t, TIF_SIGPENDING);
695 state |= TASK_INTERRUPTIBLE;
697 wake_up_state(t, state);
704 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
705 struct sigpending *signals)
707 struct sigqueue * q = NULL;
711 * fast-pathed signals for kernel-internal things like SIGSTOP
714 if ((unsigned long)info == 2)
717 /* Real-time signals must be queued if sent by sigqueue, or
718 some other real-time mechanism. It is implementation
719 defined whether kill() does so. We attempt to do so, on
720 the principle of least surprise, but since kill is not
721 allowed to fail with EAGAIN when low on memory we just
722 make sure at least one signal gets delivered and don't
723 pass on the info struct. */
725 if (atomic_read(&t->user->sigpending) <
726 t->rlim[RLIMIT_SIGPENDING].rlim_cur)
727 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
731 #warning MEF PLANETLAB: q->user = get_uid(t->user); is something new in Fedora Core.
732 q->user = get_uid(t->user);
733 atomic_inc(&q->user->sigpending);
734 list_add_tail(&q->list, &signals->list);
735 switch ((unsigned long) info) {
737 q->info.si_signo = sig;
738 q->info.si_errno = 0;
739 q->info.si_code = SI_USER;
740 q->info.si_pid = current->pid;
741 q->info.si_uid = current->uid;
744 q->info.si_signo = sig;
745 q->info.si_errno = 0;
746 q->info.si_code = SI_KERNEL;
751 copy_siginfo(&q->info, info);
755 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
756 && info->si_code != SI_USER)
758 * Queue overflow, abort. We may abort if the signal was rt
759 * and sent by user using something other than kill().
762 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
764 * Set up a return to indicate that we dropped
767 ret = info->si_sys_private;
771 sigaddset(&signals->signal, sig);
775 #define LEGACY_QUEUE(sigptr, sig) \
776 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
780 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
784 if (!irqs_disabled())
787 if (!spin_is_locked(&t->sighand->siglock))
791 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
793 * Set up a return to indicate that we dropped the signal.
795 ret = info->si_sys_private;
797 /* Short-circuit ignored signals. */
798 if (sig_ignored(t, sig))
801 /* Support queueing exactly one non-rt signal, so that we
802 can get more detailed information about the cause of
804 if (LEGACY_QUEUE(&t->pending, sig))
807 ret = send_signal(sig, info, t, &t->pending);
808 if (!ret && !sigismember(&t->blocked, sig))
809 signal_wake_up(t, sig == SIGKILL);
815 * Force a signal that the process can't ignore: if necessary
816 * we unblock the signal and change any SIG_IGN to SIG_DFL.
820 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
822 unsigned long int flags;
825 spin_lock_irqsave(&t->sighand->siglock, flags);
826 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
827 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
828 sigdelset(&t->blocked, sig);
829 recalc_sigpending_tsk(t);
831 ret = specific_send_sig_info(sig, info, t);
832 spin_unlock_irqrestore(&t->sighand->siglock, flags);
838 force_sig_specific(int sig, struct task_struct *t)
840 unsigned long int flags;
842 spin_lock_irqsave(&t->sighand->siglock, flags);
843 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
844 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
845 sigdelset(&t->blocked, sig);
846 recalc_sigpending_tsk(t);
847 specific_send_sig_info(sig, (void *)2, t);
848 spin_unlock_irqrestore(&t->sighand->siglock, flags);
852 * Test if P wants to take SIG. After we've checked all threads with this,
853 * it's equivalent to finding no threads not blocking SIG. Any threads not
854 * blocking SIG were ruled out because they are not running and already
855 * have pending signals. Such threads will dequeue from the shared queue
856 * as soon as they're available, so putting the signal on the shared queue
857 * will be equivalent to sending it to one such thread.
859 #define wants_signal(sig, p, mask) \
860 (!sigismember(&(p)->blocked, sig) \
861 && !((p)->state & mask) \
862 && !((p)->flags & PF_EXITING) \
863 && (task_curr(p) || !signal_pending(p)))
867 __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
869 struct task_struct *t;
872 * Now find a thread we can wake up to take the signal off the queue.
874 * If the main thread wants the signal, it gets first crack.
875 * Probably the least surprising to the average bear.
877 if (wants_signal(sig, p, mask))
879 else if (thread_group_empty(p))
881 * There is just one thread and it does not need to be woken.
882 * It will dequeue unblocked signals before it runs again.
887 * Otherwise try to find a suitable thread.
889 t = p->signal->curr_target;
891 /* restart balancing at this thread */
892 t = p->signal->curr_target = p;
893 BUG_ON(t->tgid != p->tgid);
895 while (!wants_signal(sig, t, mask)) {
897 if (t == p->signal->curr_target)
899 * No thread needs to be woken.
900 * Any eligible threads will see
901 * the signal in the queue soon.
905 p->signal->curr_target = t;
909 * Found a killable thread. If the signal will be fatal,
910 * then start taking the whole group down immediately.
912 if (sig_fatal(p, sig) && !p->signal->group_exit &&
913 !sigismember(&t->real_blocked, sig) &&
914 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
916 * This signal will be fatal to the whole group.
918 if (!sig_kernel_coredump(sig)) {
920 * Start a group exit and wake everybody up.
921 * This way we don't have other threads
922 * running and doing things after a slower
923 * thread has the fatal signal pending.
925 p->signal->group_exit = 1;
926 p->signal->group_exit_code = sig;
927 p->signal->group_stop_count = 0;
930 sigaddset(&t->pending.signal, SIGKILL);
931 signal_wake_up(t, 1);
938 * There will be a core dump. We make all threads other
939 * than the chosen one go into a group stop so that nothing
940 * happens until it gets scheduled, takes the signal off
941 * the shared queue, and does the core dump. This is a
942 * little more complicated than strictly necessary, but it
943 * keeps the signal state that winds up in the core dump
944 * unchanged from the death state, e.g. which thread had
945 * the core-dump signal unblocked.
947 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
948 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
949 p->signal->group_stop_count = 0;
950 p->signal->group_exit_task = t;
953 p->signal->group_stop_count++;
954 signal_wake_up(t, 0);
957 wake_up_process(p->signal->group_exit_task);
962 * The signal is already in the shared-pending queue.
963 * Tell the chosen thread to wake up and dequeue it.
965 signal_wake_up(t, sig == SIGKILL);
970 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
976 if (!spin_is_locked(&p->sighand->siglock))
979 handle_stop_signal(sig, p);
981 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
983 * Set up a return to indicate that we dropped the signal.
985 ret = info->si_sys_private;
987 /* Short-circuit ignored signals. */
988 if (sig_ignored(p, sig))
991 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
992 /* This is a non-RT signal and we already have one queued. */
996 * Don't bother zombies and stopped tasks (but
997 * SIGKILL will punch through stopped state)
999 mask = TASK_DEAD | TASK_ZOMBIE;
1001 mask |= TASK_STOPPED;
1004 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1005 * We always use the shared queue for process-wide signals,
1006 * to avoid several races.
1008 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1012 __group_complete_signal(sig, p, mask);
1017 * Nuke all other threads in the group.
1019 void zap_other_threads(struct task_struct *p)
1021 struct task_struct *t;
1023 p->signal->group_stop_count = 0;
1025 if (thread_group_empty(p))
1028 for (t = next_thread(p); t != p; t = next_thread(t)) {
1030 * Don't bother with already dead threads
1032 if (t->state & (TASK_ZOMBIE|TASK_DEAD))
1036 * We don't want to notify the parent, since we are
1037 * killed as part of a thread group due to another
1038 * thread doing an execve() or similar. So set the
1039 * exit signal to -1 to allow immediate reaping of
1040 * the process. But don't detach the thread group
1043 if (t != p->group_leader)
1044 t->exit_signal = -1;
1046 sigaddset(&t->pending.signal, SIGKILL);
1047 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1048 signal_wake_up(t, 1);
1053 * Must be called with the tasklist_lock held for reading!
1055 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1057 unsigned long flags;
1060 if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
1063 ret = check_kill_permission(sig, info, p);
1064 if (!ret && sig && p->sighand) {
1065 spin_lock_irqsave(&p->sighand->siglock, flags);
1066 ret = __group_send_sig_info(sig, info, p);
1067 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1074 * kill_pg_info() sends a signal to a process group: this is what the tty
1075 * control characters do (^C, ^Z etc)
1078 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1080 struct task_struct *p;
1081 struct list_head *l;
1083 int retval, success;
1090 for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
1091 int err = group_send_sig_info(sig, info, p);
1095 return success ? 0 : retval;
1099 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1103 read_lock(&tasklist_lock);
1104 retval = __kill_pg_info(sig, info, pgrp);
1105 read_unlock(&tasklist_lock);
1111 * kill_sl_info() sends a signal to the session leader: this is used
1112 * to send SIGHUP to the controlling process of a terminal when
1113 * the connection is lost.
1118 kill_sl_info(int sig, struct siginfo *info, pid_t sid)
1120 int err, retval = -EINVAL;
1122 struct list_head *l;
1123 struct task_struct *p;
1129 read_lock(&tasklist_lock);
1130 for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
1131 if (!p->signal->leader)
1133 err = group_send_sig_info(sig, info, p);
1137 read_unlock(&tasklist_lock);
1143 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1146 struct task_struct *p;
1148 read_lock(&tasklist_lock);
1149 p = find_task_by_pid(pid);
1152 error = group_send_sig_info(sig, info, p);
1153 read_unlock(&tasklist_lock);
1159 * kill_something_info() interprets pid in interesting ways just like kill(2).
1161 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1162 * is probably wrong. Should make it like BSD or SYSV.
1165 static int kill_something_info(int sig, struct siginfo *info, int pid)
1168 return kill_pg_info(sig, info, process_group(current));
1169 } else if (pid == -1) {
1170 int retval = 0, count = 0;
1171 struct task_struct * p;
1173 read_lock(&tasklist_lock);
1174 for_each_process(p) {
1175 if (p->pid > 1 && p->tgid != current->tgid) {
1176 int err = group_send_sig_info(sig, info, p);
1182 read_unlock(&tasklist_lock);
1183 return count ? retval : -ESRCH;
1184 } else if (pid < 0) {
1185 return kill_pg_info(sig, info, -pid);
1187 return kill_proc_info(sig, info, pid);
1192 * These are for backward compatibility with the rest of the kernel source.
1196 * These two are the most common entry points. They send a signal
1197 * just to the specific thread.
1200 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1203 unsigned long flags;
1206 * Make sure legacy kernel users don't send in bad values
1207 * (normal paths check this in check_kill_permission).
1209 if (sig < 0 || sig > _NSIG)
1213 * We need the tasklist lock even for the specific
1214 * thread case (when we don't need to follow the group
1215 * lists) in order to avoid races with "p->sighand"
1216 * going away or changing from under us.
1218 read_lock(&tasklist_lock);
1219 spin_lock_irqsave(&p->sighand->siglock, flags);
1220 ret = specific_send_sig_info(sig, info, p);
1221 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1222 read_unlock(&tasklist_lock);
1227 send_sig(int sig, struct task_struct *p, int priv)
1229 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1233 * This is the entry point for "process-wide" signals.
1234 * They will go to an appropriate thread in the thread group.
1237 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1240 read_lock(&tasklist_lock);
1241 ret = group_send_sig_info(sig, info, p);
1242 read_unlock(&tasklist_lock);
1247 force_sig(int sig, struct task_struct *p)
1249 force_sig_info(sig, (void*)1L, p);
1253 kill_pg(pid_t pgrp, int sig, int priv)
1255 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1259 kill_sl(pid_t sess, int sig, int priv)
1261 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
1265 kill_proc(pid_t pid, int sig, int priv)
1267 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1271 * These functions support sending signals using preallocated sigqueue
1272 * structures. This is needed "because realtime applications cannot
1273 * afford to lose notifications of asynchronous events, like timer
1274 * expirations or I/O completions". In the case of Posix Timers
1275 * we allocate the sigqueue structure from the timer_create. If this
1276 * allocation fails we are able to report the failure to the application
1277 * with an EAGAIN error.
1280 struct sigqueue *sigqueue_alloc(void)
1284 if ((q = __sigqueue_alloc()))
1285 q->flags |= SIGQUEUE_PREALLOC;
1289 void sigqueue_free(struct sigqueue *q)
1291 unsigned long flags;
1292 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1294 * If the signal is still pending remove it from the
1297 if (unlikely(!list_empty(&q->list))) {
1298 read_lock(&tasklist_lock);
1299 spin_lock_irqsave(q->lock, flags);
1300 if (!list_empty(&q->list))
1301 list_del_init(&q->list);
1302 spin_unlock_irqrestore(q->lock, flags);
1303 read_unlock(&tasklist_lock);
1305 q->flags &= ~SIGQUEUE_PREALLOC;
1310 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1312 unsigned long flags;
1316 * We need the tasklist lock even for the specific
1317 * thread case (when we don't need to follow the group
1318 * lists) in order to avoid races with "p->sighand"
1319 * going away or changing from under us.
1321 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1322 read_lock(&tasklist_lock);
1323 spin_lock_irqsave(&p->sighand->siglock, flags);
1325 if (unlikely(!list_empty(&q->list))) {
1327 * If an SI_TIMER entry is already queue just increment
1328 * the overrun count.
1330 if (q->info.si_code != SI_TIMER)
1332 q->info.si_overrun++;
1335 /* Short-circuit ignored signals. */
1336 if (sig_ignored(p, sig)) {
1341 q->lock = &p->sighand->siglock;
1342 list_add_tail(&q->list, &p->pending.list);
1343 sigaddset(&p->pending.signal, sig);
1344 if (!sigismember(&p->blocked, sig))
1345 signal_wake_up(p, sig == SIGKILL);
1348 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1349 read_unlock(&tasklist_lock);
1354 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1356 unsigned long flags;
1360 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1361 read_lock(&tasklist_lock);
1362 spin_lock_irqsave(&p->sighand->siglock, flags);
1363 handle_stop_signal(sig, p);
1365 /* Short-circuit ignored signals. */
1366 if (sig_ignored(p, sig)) {
1371 if (unlikely(!list_empty(&q->list))) {
1373 * If an SI_TIMER entry is already queue just increment
1374 * the overrun count. Other uses should not try to
1375 * send the signal multiple times.
1377 if (q->info.si_code != SI_TIMER)
1379 q->info.si_overrun++;
1383 * Don't bother zombies and stopped tasks (but
1384 * SIGKILL will punch through stopped state)
1386 mask = TASK_DEAD | TASK_ZOMBIE;
1388 mask |= TASK_STOPPED;
1391 * Put this signal on the shared-pending queue.
1392 * We always use the shared queue for process-wide signals,
1393 * to avoid several races.
1395 q->lock = &p->sighand->siglock;
1396 list_add_tail(&q->list, &p->signal->shared_pending.list);
1397 sigaddset(&p->signal->shared_pending.signal, sig);
1399 __group_complete_signal(sig, p, mask);
1401 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1402 read_unlock(&tasklist_lock);
1407 * Joy. Or not. Pthread wants us to wake up every thread
1408 * in our parent group.
1410 static void __wake_up_parent(struct task_struct *p,
1411 struct task_struct *parent)
1413 struct task_struct *tsk = parent;
1416 * Fortunately this is not necessary for thread groups:
1418 if (p->tgid == tsk->tgid) {
1419 wake_up_interruptible_sync(&tsk->wait_chldexit);
1424 wake_up_interruptible_sync(&tsk->wait_chldexit);
1425 tsk = next_thread(tsk);
1426 if (tsk->signal != parent->signal)
1428 } while (tsk != parent);
1432 * Let a parent know about a status change of a child.
1435 void do_notify_parent(struct task_struct *tsk, int sig)
1437 struct siginfo info;
1438 unsigned long flags;
1440 struct sighand_struct *psig;
1445 BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
1446 BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
1448 info.si_signo = sig;
1450 info.si_pid = tsk->pid;
1451 info.si_uid = tsk->uid;
1453 /* FIXME: find out whether or not this is supposed to be c*time. */
1454 info.si_utime = tsk->utime;
1455 info.si_stime = tsk->stime;
1457 status = tsk->exit_code & 0x7f;
1458 why = SI_KERNEL; /* shouldn't happen */
1459 switch (tsk->state) {
1461 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
1462 if (tsk->ptrace & PT_PTRACED)
1469 if (tsk->exit_code & 0x80)
1471 else if (tsk->exit_code & 0x7f)
1475 status = tsk->exit_code >> 8;
1480 info.si_status = status;
1482 psig = tsk->parent->sighand;
1483 spin_lock_irqsave(&psig->siglock, flags);
1484 if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
1485 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1486 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1488 * We are exiting and our parent doesn't care. POSIX.1
1489 * defines special semantics for setting SIGCHLD to SIG_IGN
1490 * or setting the SA_NOCLDWAIT flag: we should be reaped
1491 * automatically and not left for our parent's wait4 call.
1492 * Rather than having the parent do it as a magic kind of
1493 * signal handler, we just set this to tell do_exit that we
1494 * can be cleaned up without becoming a zombie. Note that
1495 * we still call __wake_up_parent in this case, because a
1496 * blocked sys_wait4 might now return -ECHILD.
1498 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1499 * is implementation-defined: we do (if you don't want
1500 * it, just use SIG_IGN instead).
1502 tsk->exit_signal = -1;
1503 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1506 if (sig > 0 && sig <= _NSIG)
1507 __group_send_sig_info(sig, &info, tsk->parent);
1508 __wake_up_parent(tsk, tsk->parent);
1509 spin_unlock_irqrestore(&psig->siglock, flags);
1514 * We need the tasklist lock because it's the only
1515 * thing that protects out "parent" pointer.
1517 * exit.c calls "do_notify_parent()" directly, because
1518 * it already has the tasklist lock.
1521 notify_parent(struct task_struct *tsk, int sig)
1524 read_lock(&tasklist_lock);
1525 do_notify_parent(tsk, sig);
1526 read_unlock(&tasklist_lock);
1531 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
1533 struct siginfo info;
1534 unsigned long flags;
1535 struct sighand_struct *sighand;
1537 info.si_signo = SIGCHLD;
1539 info.si_pid = tsk->pid;
1540 info.si_uid = tsk->uid;
1542 /* FIXME: find out whether or not this is supposed to be c*time. */
1543 info.si_utime = tsk->utime;
1544 info.si_stime = tsk->stime;
1546 info.si_status = tsk->exit_code & 0x7f;
1547 info.si_code = CLD_STOPPED;
1549 sighand = parent->sighand;
1550 spin_lock_irqsave(&sighand->siglock, flags);
1551 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1552 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1553 __group_send_sig_info(SIGCHLD, &info, parent);
1555 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1557 __wake_up_parent(tsk, parent);
1558 spin_unlock_irqrestore(&sighand->siglock, flags);
1561 int print_fatal_signals = 0;
1563 static void print_fatal_signal(struct pt_regs *regs, int signr)
1567 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1568 current->comm, current->pid, signr);
1571 printk("code at %08lx: ", regs->eip);
1572 for (i = 0; i < 16; i++) {
1573 __get_user(insn, (unsigned char *)(regs->eip + i));
1574 printk("%02x ", insn);
1581 static int __init setup_print_fatal_signals(char *str)
1583 get_option (&str, &print_fatal_signals);
1588 __setup("print-fatal-signals=", setup_print_fatal_signals);
1590 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1593 finish_stop(int stop_count)
1596 * If there are no other threads in the group, or if there is
1597 * a group stop in progress and we are the last to stop,
1598 * report to the parent. When ptraced, every thread reports itself.
1600 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1601 read_lock(&tasklist_lock);
1602 do_notify_parent_cldstop(current, current->parent);
1603 read_unlock(&tasklist_lock);
1605 else if (stop_count == 0) {
1606 read_lock(&tasklist_lock);
1607 do_notify_parent_cldstop(current->group_leader,
1608 current->group_leader->real_parent);
1609 read_unlock(&tasklist_lock);
1614 * Now we don't run again until continued.
1616 current->exit_code = 0;
1620 * This performs the stopping for SIGSTOP and other stop signals.
1621 * We have to stop all threads in the thread group.
1624 do_signal_stop(int signr)
1626 struct signal_struct *sig = current->signal;
1627 struct sighand_struct *sighand = current->sighand;
1628 int stop_count = -1;
1630 /* spin_lock_irq(&sighand->siglock) is now done in caller */
1632 if (sig->group_stop_count > 0) {
1634 * There is a group stop in progress. We don't need to
1635 * start another one.
1637 signr = sig->group_exit_code;
1638 stop_count = --sig->group_stop_count;
1639 current->exit_code = signr;
1640 set_current_state(TASK_STOPPED);
1641 spin_unlock_irq(&sighand->siglock);
1643 else if (thread_group_empty(current)) {
1645 * Lock must be held through transition to stopped state.
1647 current->exit_code = signr;
1648 set_current_state(TASK_STOPPED);
1649 spin_unlock_irq(&sighand->siglock);
1653 * There is no group stop already in progress.
1654 * We must initiate one now, but that requires
1655 * dropping siglock to get both the tasklist lock
1656 * and siglock again in the proper order. Note that
1657 * this allows an intervening SIGCONT to be posted.
1658 * We need to check for that and bail out if necessary.
1660 struct task_struct *t;
1662 spin_unlock_irq(&sighand->siglock);
1664 /* signals can be posted during this window */
1666 read_lock(&tasklist_lock);
1667 spin_lock_irq(&sighand->siglock);
1669 if (unlikely(sig->group_exit)) {
1671 * There is a group exit in progress now.
1672 * We'll just ignore the stop and process the
1673 * associated fatal signal.
1675 spin_unlock_irq(&sighand->siglock);
1676 read_unlock(&tasklist_lock);
1680 if (unlikely(sig_avoid_stop_race())) {
1682 * Either a SIGCONT or a SIGKILL signal was
1683 * posted in the siglock-not-held window.
1685 spin_unlock_irq(&sighand->siglock);
1686 read_unlock(&tasklist_lock);
1690 if (sig->group_stop_count == 0) {
1691 sig->group_exit_code = signr;
1693 for (t = next_thread(current); t != current;
1696 * Setting state to TASK_STOPPED for a group
1697 * stop is always done with the siglock held,
1698 * so this check has no races.
1700 if (t->state < TASK_STOPPED) {
1702 signal_wake_up(t, 0);
1704 sig->group_stop_count = stop_count;
1707 /* A race with another thread while unlocked. */
1708 signr = sig->group_exit_code;
1709 stop_count = --sig->group_stop_count;
1712 current->exit_code = signr;
1713 set_current_state(TASK_STOPPED);
1715 spin_unlock_irq(&sighand->siglock);
1716 read_unlock(&tasklist_lock);
1719 finish_stop(stop_count);
1723 * Do appropriate magic when group_stop_count > 0.
1724 * We return nonzero if we stopped, after releasing the siglock.
1725 * We return zero if we still hold the siglock and should look
1726 * for another signal without checking group_stop_count again.
1728 static inline int handle_group_stop(void)
1732 if (current->signal->group_exit_task == current) {
1734 * Group stop is so we can do a core dump,
1735 * We are the initiating thread, so get on with it.
1737 current->signal->group_exit_task = NULL;
1741 if (current->signal->group_exit)
1743 * Group stop is so another thread can do a core dump,
1744 * or else we are racing against a death signal.
1745 * Just punt the stop so we can get the next signal.
1750 * There is a group stop in progress. We stop
1751 * without any associated signal being in our queue.
1753 stop_count = --current->signal->group_stop_count;
1754 current->exit_code = current->signal->group_exit_code;
1755 set_current_state(TASK_STOPPED);
1756 spin_unlock_irq(¤t->sighand->siglock);
1757 finish_stop(stop_count);
1761 int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
1763 sigset_t *mask = ¤t->blocked;
1767 spin_lock_irq(¤t->sighand->siglock);
1769 struct k_sigaction *ka;
1771 if (unlikely(current->signal->group_stop_count > 0) &&
1772 handle_group_stop())
1775 signr = dequeue_signal(current, mask, info);
1778 break; /* will return 0 */
1780 if ((signr == SIGSEGV) && print_fatal_signals) {
1781 spin_unlock_irq(¤t->sighand->siglock);
1782 print_fatal_signal(regs, signr);
1783 spin_lock_irq(¤t->sighand->siglock);
1785 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1786 ptrace_signal_deliver(regs, cookie);
1789 * If there is a group stop in progress,
1790 * we must participate in the bookkeeping.
1792 if (current->signal->group_stop_count > 0)
1793 --current->signal->group_stop_count;
1795 /* Let the debugger run. */
1796 current->exit_code = signr;
1797 current->last_siginfo = info;
1798 set_current_state(TASK_STOPPED);
1799 spin_unlock_irq(¤t->sighand->siglock);
1800 notify_parent(current, SIGCHLD);
1803 current->last_siginfo = NULL;
1805 /* We're back. Did the debugger cancel the sig? */
1806 spin_lock_irq(¤t->sighand->siglock);
1807 signr = current->exit_code;
1811 current->exit_code = 0;
1813 /* Update the siginfo structure if the signal has
1814 changed. If the debugger wanted something
1815 specific in the siginfo structure then it should
1816 have updated *info via PTRACE_SETSIGINFO. */
1817 if (signr != info->si_signo) {
1818 info->si_signo = signr;
1820 info->si_code = SI_USER;
1821 info->si_pid = current->parent->pid;
1822 info->si_uid = current->parent->uid;
1825 /* If the (new) signal is now blocked, requeue it. */
1826 if (sigismember(¤t->blocked, signr)) {
1827 specific_send_sig_info(signr, info, current);
1832 ka = ¤t->sighand->action[signr-1];
1833 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1835 if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
1836 break; /* will return non-zero "signr" value */
1839 * Now we are doing the default action for this signal.
1841 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1844 /* Init gets no signals it doesn't want. */
1845 if (current->pid == 1)
1848 if (sig_kernel_stop(signr)) {
1850 * The default action is to stop all threads in
1851 * the thread group. The job control signals
1852 * do nothing in an orphaned pgrp, but SIGSTOP
1853 * always works. Note that siglock needs to be
1854 * dropped during the call to is_orphaned_pgrp()
1855 * because of lock ordering with tasklist_lock.
1856 * This allows an intervening SIGCONT to be posted.
1857 * We need to check for that and bail out if necessary.
1859 if (signr == SIGSTOP) {
1860 do_signal_stop(signr); /* releases siglock */
1863 spin_unlock_irq(¤t->sighand->siglock);
1865 /* signals can be posted during this window */
1867 if (is_orphaned_pgrp(process_group(current)))
1870 spin_lock_irq(¤t->sighand->siglock);
1871 if (unlikely(sig_avoid_stop_race())) {
1873 * Either a SIGCONT or a SIGKILL signal was
1874 * posted in the siglock-not-held window.
1879 do_signal_stop(signr); /* releases siglock */
1883 spin_unlock_irq(¤t->sighand->siglock);
1886 * Anything else is fatal, maybe with a core dump.
1888 current->flags |= PF_SIGNALED;
1889 if (print_fatal_signals)
1890 print_fatal_signal(regs, signr);
1891 if (sig_kernel_coredump(signr) &&
1892 do_coredump((long)signr, signr, regs)) {
1894 * That killed all other threads in the group and
1895 * synchronized with their demise, so there can't
1896 * be any more left to kill now. The group_exit
1897 * flags are set by do_coredump. Note that
1898 * thread_group_empty won't always be true yet,
1899 * because those threads were blocked in __exit_mm
1900 * and we just let them go to finish dying.
1902 const int code = signr | 0x80;
1903 BUG_ON(!current->signal->group_exit);
1904 BUG_ON(current->signal->group_exit_code != code);
1910 * Death signals, no core dump.
1912 do_group_exit(signr);
1915 spin_unlock_irq(¤t->sighand->siglock);
1921 EXPORT_SYMBOL(recalc_sigpending);
1922 EXPORT_SYMBOL_GPL(dequeue_signal);
1923 EXPORT_SYMBOL(flush_signals);
1924 EXPORT_SYMBOL(force_sig);
1925 EXPORT_SYMBOL(force_sig_info);
1926 EXPORT_SYMBOL(kill_pg);
1927 EXPORT_SYMBOL(kill_pg_info);
1928 EXPORT_SYMBOL(kill_proc);
1929 EXPORT_SYMBOL(kill_proc_info);
1930 EXPORT_SYMBOL(kill_sl);
1931 EXPORT_SYMBOL(kill_sl_info);
1932 EXPORT_SYMBOL(notify_parent);
1933 EXPORT_SYMBOL(send_sig);
1934 EXPORT_SYMBOL(send_sig_info);
1935 EXPORT_SYMBOL(send_group_sig_info);
1936 EXPORT_SYMBOL(sigqueue_alloc);
1937 EXPORT_SYMBOL(sigqueue_free);
1938 EXPORT_SYMBOL(send_sigqueue);
1939 EXPORT_SYMBOL(send_group_sigqueue);
1940 EXPORT_SYMBOL(sigprocmask);
1941 EXPORT_SYMBOL(block_all_signals);
1942 EXPORT_SYMBOL(unblock_all_signals);
1946 * System call entry points.
1949 asmlinkage long sys_restart_syscall(void)
1951 struct restart_block *restart = ¤t_thread_info()->restart_block;
1952 return restart->fn(restart);
1955 long do_no_restart_syscall(struct restart_block *param)
1961 * We don't need to get the kernel lock - this is all local to this
1962 * particular thread.. (and that's good, because this is _heavily_
1963 * used by various programs)
1967 * This is also useful for kernel threads that want to temporarily
1968 * (or permanently) block certain signals.
1970 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1971 * interface happily blocks "unblockable" signals like SIGKILL
1974 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1979 spin_lock_irq(¤t->sighand->siglock);
1980 old_block = current->blocked;
1984 sigorsets(¤t->blocked, ¤t->blocked, set);
1987 signandsets(¤t->blocked, ¤t->blocked, set);
1990 current->blocked = *set;
1995 recalc_sigpending();
1996 spin_unlock_irq(¤t->sighand->siglock);
1998 *oldset = old_block;
2003 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2005 int error = -EINVAL;
2006 sigset_t old_set, new_set;
2008 /* XXX: Don't preclude handling different sized sigset_t's. */
2009 if (sigsetsize != sizeof(sigset_t))
2014 if (copy_from_user(&new_set, set, sizeof(*set)))
2016 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2018 error = sigprocmask(how, &new_set, &old_set);
2024 spin_lock_irq(¤t->sighand->siglock);
2025 old_set = current->blocked;
2026 spin_unlock_irq(¤t->sighand->siglock);
2030 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2038 long do_sigpending(void __user *set, unsigned long sigsetsize)
2040 long error = -EINVAL;
2043 if (sigsetsize > sizeof(sigset_t))
2046 spin_lock_irq(¤t->sighand->siglock);
2047 sigorsets(&pending, ¤t->pending.signal,
2048 ¤t->signal->shared_pending.signal);
2049 spin_unlock_irq(¤t->sighand->siglock);
2051 /* Outside the lock because only this thread touches it. */
2052 sigandsets(&pending, ¤t->blocked, &pending);
2055 if (!copy_to_user(set, &pending, sigsetsize))
2063 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2065 return do_sigpending(set, sigsetsize);
2068 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2070 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2074 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2076 if (from->si_code < 0)
2077 return __copy_to_user(to, from, sizeof(siginfo_t))
2080 * If you change siginfo_t structure, please be sure
2081 * this code is fixed accordingly.
2082 * It should never copy any pad contained in the structure
2083 * to avoid security leaks, but must copy the generic
2084 * 3 ints plus the relevant union member.
2086 err = __put_user(from->si_signo, &to->si_signo);
2087 err |= __put_user(from->si_errno, &to->si_errno);
2088 err |= __put_user((short)from->si_code, &to->si_code);
2089 switch (from->si_code & __SI_MASK) {
2091 err |= __put_user(from->si_pid, &to->si_pid);
2092 err |= __put_user(from->si_uid, &to->si_uid);
2095 err |= __put_user(from->si_tid, &to->si_tid);
2096 err |= __put_user(from->si_overrun, &to->si_overrun);
2097 err |= __put_user(from->si_ptr, &to->si_ptr);
2100 err |= __put_user(from->si_band, &to->si_band);
2101 err |= __put_user(from->si_fd, &to->si_fd);
2104 err |= __put_user(from->si_addr, &to->si_addr);
2105 #ifdef __ARCH_SI_TRAPNO
2106 err |= __put_user(from->si_trapno, &to->si_trapno);
2110 err |= __put_user(from->si_pid, &to->si_pid);
2111 err |= __put_user(from->si_uid, &to->si_uid);
2112 err |= __put_user(from->si_status, &to->si_status);
2113 err |= __put_user(from->si_utime, &to->si_utime);
2114 err |= __put_user(from->si_stime, &to->si_stime);
2116 case __SI_RT: /* This is not generated by the kernel as of now. */
2117 case __SI_MESGQ: /* But this is */
2118 err |= __put_user(from->si_pid, &to->si_pid);
2119 err |= __put_user(from->si_uid, &to->si_uid);
2120 err |= __put_user(from->si_ptr, &to->si_ptr);
2122 default: /* this is just in case for now ... */
2123 err |= __put_user(from->si_pid, &to->si_pid);
2124 err |= __put_user(from->si_uid, &to->si_uid);
2133 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2134 siginfo_t __user *uinfo,
2135 const struct timespec __user *uts,
2144 /* XXX: Don't preclude handling different sized sigset_t's. */
2145 if (sigsetsize != sizeof(sigset_t))
2148 if (copy_from_user(&these, uthese, sizeof(these)))
2152 * Invert the set of allowed signals to get those we
2155 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2159 if (copy_from_user(&ts, uts, sizeof(ts)))
2161 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2166 spin_lock_irq(¤t->sighand->siglock);
2167 sig = dequeue_signal(current, &these, &info);
2169 timeout = MAX_SCHEDULE_TIMEOUT;
2171 timeout = (timespec_to_jiffies(&ts)
2172 + (ts.tv_sec || ts.tv_nsec));
2175 /* None ready -- temporarily unblock those we're
2176 * interested while we are sleeping in so that we'll
2177 * be awakened when they arrive. */
2178 current->real_blocked = current->blocked;
2179 sigandsets(¤t->blocked, ¤t->blocked, &these);
2180 recalc_sigpending();
2181 spin_unlock_irq(¤t->sighand->siglock);
2183 current->state = TASK_INTERRUPTIBLE;
2184 timeout = schedule_timeout(timeout);
2186 spin_lock_irq(¤t->sighand->siglock);
2187 sig = dequeue_signal(current, &these, &info);
2188 current->blocked = current->real_blocked;
2189 siginitset(¤t->real_blocked, 0);
2190 recalc_sigpending();
2193 spin_unlock_irq(¤t->sighand->siglock);
2198 if (copy_siginfo_to_user(uinfo, &info))
2211 sys_kill(int pid, int sig)
2213 struct siginfo info;
2215 info.si_signo = sig;
2217 info.si_code = SI_USER;
2218 info.si_pid = current->tgid;
2219 info.si_uid = current->uid;
2221 return kill_something_info(sig, &info, pid);
2225 * sys_tgkill - send signal to one specific thread
2226 * @tgid: the thread group ID of the thread
2227 * @pid: the PID of the thread
2228 * @sig: signal to be sent
2230 * This syscall also checks the tgid and returns -ESRCH even if the PID
2231 * exists but it's not belonging to the target process anymore. This
2232 * method solves the problem of threads exiting and PIDs getting reused.
2234 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2236 struct siginfo info;
2238 struct task_struct *p;
2240 /* This is only valid for single tasks */
2241 if (pid <= 0 || tgid <= 0)
2244 info.si_signo = sig;
2246 info.si_code = SI_TKILL;
2247 info.si_pid = current->tgid;
2248 info.si_uid = current->uid;
2250 read_lock(&tasklist_lock);
2251 p = find_task_by_pid(pid);
2253 if (p && (p->tgid == tgid)) {
2254 error = check_kill_permission(sig, &info, p);
2256 * The null signal is a permissions and process existence
2257 * probe. No signal is actually delivered.
2259 if (!error && sig && p->sighand) {
2260 spin_lock_irq(&p->sighand->siglock);
2261 handle_stop_signal(sig, p);
2262 error = specific_send_sig_info(sig, &info, p);
2263 spin_unlock_irq(&p->sighand->siglock);
2266 read_unlock(&tasklist_lock);
2271 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2274 sys_tkill(int pid, int sig)
2276 struct siginfo info;
2278 struct task_struct *p;
2280 /* This is only valid for single tasks */
2284 info.si_signo = sig;
2286 info.si_code = SI_TKILL;
2287 info.si_pid = current->tgid;
2288 info.si_uid = current->uid;
2290 read_lock(&tasklist_lock);
2291 p = find_task_by_pid(pid);
2294 error = check_kill_permission(sig, &info, p);
2296 * The null signal is a permissions and process existence
2297 * probe. No signal is actually delivered.
2299 if (!error && sig && p->sighand) {
2300 spin_lock_irq(&p->sighand->siglock);
2301 handle_stop_signal(sig, p);
2302 error = specific_send_sig_info(sig, &info, p);
2303 spin_unlock_irq(&p->sighand->siglock);
2306 read_unlock(&tasklist_lock);
2311 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2315 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2318 /* Not even root can pretend to send signals from the kernel.
2319 Nor can they impersonate a kill(), which adds source info. */
2320 if (info.si_code >= 0)
2322 info.si_signo = sig;
2324 /* POSIX.1b doesn't mention process groups. */
2325 return kill_proc_info(sig, &info, pid);
2329 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2331 struct k_sigaction *k;
2333 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2336 k = ¤t->sighand->action[sig-1];
2338 spin_lock_irq(¤t->sighand->siglock);
2339 if (signal_pending(current)) {
2341 * If there might be a fatal signal pending on multiple
2342 * threads, make sure we take it before changing the action.
2344 spin_unlock_irq(¤t->sighand->siglock);
2345 return -ERESTARTNOINTR;
2354 * "Setting a signal action to SIG_IGN for a signal that is
2355 * pending shall cause the pending signal to be discarded,
2356 * whether or not it is blocked."
2358 * "Setting a signal action to SIG_DFL for a signal that is
2359 * pending and whose default action is to ignore the signal
2360 * (for example, SIGCHLD), shall cause the pending signal to
2361 * be discarded, whether or not it is blocked"
2363 if (act->sa.sa_handler == SIG_IGN ||
2364 (act->sa.sa_handler == SIG_DFL &&
2365 sig_kernel_ignore(sig))) {
2367 * This is a fairly rare case, so we only take the
2368 * tasklist_lock once we're sure we'll need it.
2369 * Now we must do this little unlock and relock
2370 * dance to maintain the lock hierarchy.
2372 struct task_struct *t = current;
2373 spin_unlock_irq(&t->sighand->siglock);
2374 read_lock(&tasklist_lock);
2375 spin_lock_irq(&t->sighand->siglock);
2377 sigdelsetmask(&k->sa.sa_mask,
2378 sigmask(SIGKILL) | sigmask(SIGSTOP));
2379 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2381 rm_from_queue(sigmask(sig), &t->pending);
2382 recalc_sigpending_tsk(t);
2384 } while (t != current);
2385 spin_unlock_irq(¤t->sighand->siglock);
2386 read_unlock(&tasklist_lock);
2391 sigdelsetmask(&k->sa.sa_mask,
2392 sigmask(SIGKILL) | sigmask(SIGSTOP));
2395 spin_unlock_irq(¤t->sighand->siglock);
2400 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2406 oss.ss_sp = (void __user *) current->sas_ss_sp;
2407 oss.ss_size = current->sas_ss_size;
2408 oss.ss_flags = sas_ss_flags(sp);
2417 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2418 || __get_user(ss_sp, &uss->ss_sp)
2419 || __get_user(ss_flags, &uss->ss_flags)
2420 || __get_user(ss_size, &uss->ss_size))
2424 if (on_sig_stack(sp))
2430 * Note - this code used to test ss_flags incorrectly
2431 * old code may have been written using ss_flags==0
2432 * to mean ss_flags==SS_ONSTACK (as this was the only
2433 * way that worked) - this fix preserves that older
2436 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2439 if (ss_flags == SS_DISABLE) {
2444 if (ss_size < MINSIGSTKSZ)
2448 current->sas_ss_sp = (unsigned long) ss_sp;
2449 current->sas_ss_size = ss_size;
2454 if (copy_to_user(uoss, &oss, sizeof(oss)))
2463 #ifdef __ARCH_WANT_SYS_SIGPENDING
2466 sys_sigpending(old_sigset_t __user *set)
2468 return do_sigpending(set, sizeof(*set));
2473 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2474 /* Some platforms have their own version with special arguments others
2475 support only sys_rt_sigprocmask. */
2478 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2481 old_sigset_t old_set, new_set;
2485 if (copy_from_user(&new_set, set, sizeof(*set)))
2487 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2489 spin_lock_irq(¤t->sighand->siglock);
2490 old_set = current->blocked.sig[0];
2498 sigaddsetmask(¤t->blocked, new_set);
2501 sigdelsetmask(¤t->blocked, new_set);
2504 current->blocked.sig[0] = new_set;
2508 recalc_sigpending();
2509 spin_unlock_irq(¤t->sighand->siglock);
2515 old_set = current->blocked.sig[0];
2518 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2525 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2527 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2529 sys_rt_sigaction(int sig,
2530 const struct sigaction __user *act,
2531 struct sigaction __user *oact,
2534 struct k_sigaction new_sa, old_sa;
2537 /* XXX: Don't preclude handling different sized sigset_t's. */
2538 if (sigsetsize != sizeof(sigset_t))
2542 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2546 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2549 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2555 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2557 #ifdef __ARCH_WANT_SYS_SGETMASK
2560 * For backwards compatibility. Functionality superseded by sigprocmask.
2566 return current->blocked.sig[0];
2570 sys_ssetmask(int newmask)
2574 spin_lock_irq(¤t->sighand->siglock);
2575 old = current->blocked.sig[0];
2577 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2579 recalc_sigpending();
2580 spin_unlock_irq(¤t->sighand->siglock);
2584 #endif /* __ARCH_WANT_SGETMASK */
2586 #ifdef __ARCH_WANT_SYS_SIGNAL
2588 * For backwards compatibility. Functionality superseded by sigaction.
2590 asmlinkage unsigned long
2591 sys_signal(int sig, __sighandler_t handler)
2593 struct k_sigaction new_sa, old_sa;
2596 new_sa.sa.sa_handler = handler;
2597 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2599 ret = do_sigaction(sig, &new_sa, &old_sa);
2601 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2603 #endif /* __ARCH_WANT_SYS_SIGNAL */
2605 #ifdef __ARCH_WANT_SYS_PAUSE
2610 current->state = TASK_INTERRUPTIBLE;
2612 return -ERESTARTNOHAND;
2617 void __init signals_init(void)
2620 kmem_cache_create("sigqueue",
2621 sizeof(struct sigqueue),
2622 __alignof__(struct sigqueue),
2623 SLAB_PANIC, NULL, NULL);