2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <asm/param.h>
26 #include <asm/uaccess.h>
27 #include <asm/unistd.h>
28 #include <asm/siginfo.h>
30 extern void k_getrusage(struct task_struct *, int, struct rusage *);
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 static int sig_ignored(struct task_struct *t, int sig)
158 void __user * handler;
161 * Tracers always want to know about signals..
163 if (t->ptrace & PT_PTRACED)
167 * Blocked signals are never ignored, since the
168 * signal handler may change by the time it is
171 if (sigismember(&t->blocked, sig))
174 /* Is it explicitly or implicitly ignored? */
175 handler = t->sighand->action[sig-1].sa.sa_handler;
176 return handler == SIG_IGN ||
177 (handler == SIG_DFL && sig_kernel_ignore(sig));
181 * Re-calculate pending state from the set of locally pending
182 * signals, globally pending signals, and blocked signals.
184 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
189 switch (_NSIG_WORDS) {
191 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
192 ready |= signal->sig[i] &~ blocked->sig[i];
195 case 4: ready = signal->sig[3] &~ blocked->sig[3];
196 ready |= signal->sig[2] &~ blocked->sig[2];
197 ready |= signal->sig[1] &~ blocked->sig[1];
198 ready |= signal->sig[0] &~ blocked->sig[0];
201 case 2: ready = signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 1: ready = signal->sig[0] &~ blocked->sig[0];
210 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 if (t->signal->group_stop_count > 0 ||
215 PENDING(&t->pending, &t->blocked) ||
216 PENDING(&t->signal->shared_pending, &t->blocked))
217 set_tsk_thread_flag(t, TIF_SIGPENDING);
219 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 void recalc_sigpending(void)
224 recalc_sigpending_tsk(current);
227 /* Given the mask, find the first available signal that should be serviced. */
230 next_signal(struct sigpending *pending, sigset_t *mask)
232 unsigned long i, *s, *m, x;
235 s = pending->signal.sig;
237 switch (_NSIG_WORDS) {
239 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
240 if ((x = *s &~ *m) != 0) {
241 sig = ffz(~x) + i*_NSIG_BPW + 1;
246 case 2: if ((x = s[0] &~ m[0]) != 0)
248 else if ((x = s[1] &~ m[1]) != 0)
255 case 1: if ((x = *s &~ *m) != 0)
263 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags)
265 struct sigqueue *q = NULL;
267 if (atomic_read(&t->user->sigpending) <
268 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
269 q = kmem_cache_alloc(sigqueue_cachep, flags);
271 INIT_LIST_HEAD(&q->list);
274 q->user = get_uid(t->user);
275 atomic_inc(&q->user->sigpending);
280 static inline void __sigqueue_free(struct sigqueue *q)
282 if (q->flags & SIGQUEUE_PREALLOC)
284 atomic_dec(&q->user->sigpending);
286 kmem_cache_free(sigqueue_cachep, q);
289 static void flush_sigqueue(struct sigpending *queue)
293 sigemptyset(&queue->signal);
294 while (!list_empty(&queue->list)) {
295 q = list_entry(queue->list.next, struct sigqueue , list);
296 list_del_init(&q->list);
302 * Flush all pending signals for a task.
306 flush_signals(struct task_struct *t)
310 spin_lock_irqsave(&t->sighand->siglock, flags);
311 clear_tsk_thread_flag(t,TIF_SIGPENDING);
312 flush_sigqueue(&t->pending);
313 flush_sigqueue(&t->signal->shared_pending);
314 spin_unlock_irqrestore(&t->sighand->siglock, flags);
318 * This function expects the tasklist_lock write-locked.
320 void __exit_sighand(struct task_struct *tsk)
322 struct sighand_struct * sighand = tsk->sighand;
324 /* Ok, we're done with the signal handlers */
326 if (atomic_dec_and_test(&sighand->count))
327 kmem_cache_free(sighand_cachep, sighand);
330 void exit_sighand(struct task_struct *tsk)
332 write_lock_irq(&tasklist_lock);
334 write_unlock_irq(&tasklist_lock);
338 * This function expects the tasklist_lock write-locked.
340 void __exit_signal(struct task_struct *tsk)
342 struct signal_struct * sig = tsk->signal;
343 struct sighand_struct * sighand = tsk->sighand;
347 if (!atomic_read(&sig->count))
349 spin_lock(&sighand->siglock);
350 if (atomic_dec_and_test(&sig->count)) {
351 if (tsk == sig->curr_target)
352 sig->curr_target = next_thread(tsk);
354 spin_unlock(&sighand->siglock);
355 flush_sigqueue(&sig->shared_pending);
358 * If there is any task waiting for the group exit
361 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
362 wake_up_process(sig->group_exit_task);
363 sig->group_exit_task = NULL;
365 if (tsk == sig->curr_target)
366 sig->curr_target = next_thread(tsk);
369 * Accumulate here the counters for all threads but the
370 * group leader as they die, so they can be added into
371 * the process-wide totals when those are taken.
372 * The group leader stays around as a zombie as long
373 * as there are other threads. When it gets reaped,
374 * the exit.c code will add its counts into these totals.
375 * We won't ever get here for the group leader, since it
376 * will have been the last reference on the signal_struct.
378 sig->utime = cputime_add(sig->utime, tsk->utime);
379 sig->stime = cputime_add(sig->stime, tsk->stime);
380 sig->min_flt += tsk->min_flt;
381 sig->maj_flt += tsk->maj_flt;
382 sig->nvcsw += tsk->nvcsw;
383 sig->nivcsw += tsk->nivcsw;
384 spin_unlock(&sighand->siglock);
385 sig = NULL; /* Marker for below. */
387 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
388 flush_sigqueue(&tsk->pending);
391 * We are cleaning up the signal_struct here. We delayed
392 * calling exit_itimers until after flush_sigqueue, just in
393 * case our thread-local pending queue contained a queued
394 * timer signal that would have been cleared in
395 * exit_itimers. When that called sigqueue_free, it would
396 * attempt to re-take the tasklist_lock and deadlock. This
397 * can never happen if we ensure that all queues the
398 * timer's signal might be queued on have been flushed
399 * first. The shared_pending queue, and our own pending
400 * queue are the only queues the timer could be on, since
401 * there are no other threads left in the group and timer
402 * signals are constrained to threads inside the group.
405 kmem_cache_free(signal_cachep, sig);
409 void exit_signal(struct task_struct *tsk)
411 write_lock_irq(&tasklist_lock);
413 write_unlock_irq(&tasklist_lock);
417 * Flush all handlers for a task.
421 flush_signal_handlers(struct task_struct *t, int force_default)
424 struct k_sigaction *ka = &t->sighand->action[0];
425 for (i = _NSIG ; i != 0 ; i--) {
426 if (force_default || ka->sa.sa_handler != SIG_IGN)
427 ka->sa.sa_handler = SIG_DFL;
429 sigemptyset(&ka->sa.sa_mask);
435 /* Notify the system that a driver wants to block all signals for this
436 * process, and wants to be notified if any signals at all were to be
437 * sent/acted upon. If the notifier routine returns non-zero, then the
438 * signal will be acted upon after all. If the notifier routine returns 0,
439 * then then signal will be blocked. Only one block per process is
440 * allowed. priv is a pointer to private data that the notifier routine
441 * can use to determine if the signal should be blocked or not. */
444 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
448 spin_lock_irqsave(¤t->sighand->siglock, flags);
449 current->notifier_mask = mask;
450 current->notifier_data = priv;
451 current->notifier = notifier;
452 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
455 /* Notify the system that blocking has ended. */
458 unblock_all_signals(void)
462 spin_lock_irqsave(¤t->sighand->siglock, flags);
463 current->notifier = NULL;
464 current->notifier_data = NULL;
466 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
469 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
471 struct sigqueue *q, *first = NULL;
472 int still_pending = 0;
474 if (unlikely(!sigismember(&list->signal, sig)))
478 * Collect the siginfo appropriate to this signal. Check if
479 * there is another siginfo for the same signal.
481 list_for_each_entry(q, &list->list, list) {
482 if (q->info.si_signo == sig) {
491 list_del_init(&first->list);
492 copy_siginfo(info, &first->info);
493 __sigqueue_free(first);
495 sigdelset(&list->signal, sig);
498 /* Ok, it wasn't in the queue. This must be
499 a fast-pathed signal or we must have been
500 out of queue space. So zero out the info.
502 sigdelset(&list->signal, sig);
503 info->si_signo = sig;
512 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
517 sig = next_signal(pending, mask);
519 if (current->notifier) {
520 if (sigismember(current->notifier_mask, sig)) {
521 if (!(current->notifier)(current->notifier_data)) {
522 clear_thread_flag(TIF_SIGPENDING);
528 if (!collect_signal(sig, pending, info))
538 * Dequeue a signal and return the element to the caller, which is
539 * expected to free it.
541 * All callers have to hold the siglock.
543 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
545 int signr = __dequeue_signal(&tsk->pending, mask, info);
547 signr = __dequeue_signal(&tsk->signal->shared_pending,
549 if (signr && unlikely(sig_kernel_stop(signr))) {
551 * Set a marker that we have dequeued a stop signal. Our
552 * caller might release the siglock and then the pending
553 * stop signal it is about to process is no longer in the
554 * pending bitmasks, but must still be cleared by a SIGCONT
555 * (and overruled by a SIGKILL). So those cases clear this
556 * shared flag after we've set it. Note that this flag may
557 * remain set after the signal we return is ignored or
558 * handled. That doesn't matter because its only purpose
559 * is to alert stop-signal processing code when another
560 * processor has come along and cleared the flag.
562 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
565 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
566 info->si_sys_private){
567 do_schedule_next_timer(info);
573 * Tell a process that it has a new active signal..
575 * NOTE! we rely on the previous spin_lock to
576 * lock interrupts for us! We can only be called with
577 * "siglock" held, and the local interrupt must
578 * have been disabled when that got acquired!
580 * No need to set need_resched since signal event passing
581 * goes through ->blocked
583 void signal_wake_up(struct task_struct *t, int resume)
587 set_tsk_thread_flag(t, TIF_SIGPENDING);
590 * For SIGKILL, we want to wake it up in the stopped/traced case.
591 * We don't check t->state here because there is a race with it
592 * executing another processor and just now entering stopped state.
593 * By using wake_up_state, we ensure the process will wake up and
594 * handle its death signal.
596 mask = TASK_INTERRUPTIBLE;
598 mask |= TASK_STOPPED | TASK_TRACED;
599 if (!wake_up_state(t, mask))
604 * Remove signals in mask from the pending set and queue.
605 * Returns 1 if any signals were found.
607 * All callers must be holding the siglock.
609 static int rm_from_queue(unsigned long mask, struct sigpending *s)
611 struct sigqueue *q, *n;
613 if (!sigtestsetmask(&s->signal, mask))
616 sigdelsetmask(&s->signal, mask);
617 list_for_each_entry_safe(q, n, &s->list, list) {
618 if (q->info.si_signo < SIGRTMIN &&
619 (mask & sigmask(q->info.si_signo))) {
620 list_del_init(&q->list);
628 * Bad permissions for sending the signal
630 static int check_kill_permission(int sig, struct siginfo *info,
631 struct task_struct *t)
636 if (sig < 0 || sig > _NSIG)
639 user = (!info || ((unsigned long)info != 1 &&
640 (unsigned long)info != 2 && SI_FROMUSER(info)));
643 if (user && ((sig != SIGCONT) ||
644 (current->signal->session != t->signal->session))
645 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
646 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
647 && !capable(CAP_KILL))
651 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
654 return security_task_kill(t, info, sig);
658 static void do_notify_parent_cldstop(struct task_struct *tsk,
659 struct task_struct *parent,
663 * Handle magic process-wide effects of stop/continue signals.
664 * Unlike the signal actions, these happen immediately at signal-generation
665 * time regardless of blocking, ignoring, or handling. This does the
666 * actual continuing for SIGCONT, but not the actual stopping for stop
667 * signals. The process stop is done as a signal action for SIG_DFL.
669 static void handle_stop_signal(int sig, struct task_struct *p)
671 struct task_struct *t;
673 if (p->flags & SIGNAL_GROUP_EXIT)
675 * The process is in the middle of dying already.
679 if (sig_kernel_stop(sig)) {
681 * This is a stop signal. Remove SIGCONT from all queues.
683 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
686 rm_from_queue(sigmask(SIGCONT), &t->pending);
689 } else if (sig == SIGCONT) {
691 * Remove all stop signals from all queues,
692 * and wake all threads.
694 if (unlikely(p->signal->group_stop_count > 0)) {
696 * There was a group stop in progress. We'll
697 * pretend it finished before we got here. We are
698 * obliged to report it to the parent: if the
699 * SIGSTOP happened "after" this SIGCONT, then it
700 * would have cleared this pending SIGCONT. If it
701 * happened "before" this SIGCONT, then the parent
702 * got the SIGCHLD about the stop finishing before
703 * the continue happened. We do the notification
704 * now, and it's as if the stop had finished and
705 * the SIGCHLD was pending on entry to this kill.
707 p->signal->group_stop_count = 0;
708 p->signal->flags = SIGNAL_STOP_CONTINUED;
709 spin_unlock(&p->sighand->siglock);
710 if (p->ptrace & PT_PTRACED)
711 do_notify_parent_cldstop(p, p->parent,
714 do_notify_parent_cldstop(
716 p->group_leader->real_parent,
718 spin_lock(&p->sighand->siglock);
720 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
724 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
727 * If there is a handler for SIGCONT, we must make
728 * sure that no thread returns to user mode before
729 * we post the signal, in case it was the only
730 * thread eligible to run the signal handler--then
731 * it must not do anything between resuming and
732 * running the handler. With the TIF_SIGPENDING
733 * flag set, the thread will pause and acquire the
734 * siglock that we hold now and until we've queued
735 * the pending signal.
737 * Wake up the stopped thread _after_ setting
740 state = TASK_STOPPED;
741 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
742 set_tsk_thread_flag(t, TIF_SIGPENDING);
743 state |= TASK_INTERRUPTIBLE;
745 wake_up_state(t, state);
750 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
752 * We were in fact stopped, and are now continued.
753 * Notify the parent with CLD_CONTINUED.
755 p->signal->flags = SIGNAL_STOP_CONTINUED;
756 p->signal->group_exit_code = 0;
757 spin_unlock(&p->sighand->siglock);
758 if (p->ptrace & PT_PTRACED)
759 do_notify_parent_cldstop(p, p->parent,
762 do_notify_parent_cldstop(
764 p->group_leader->real_parent,
766 spin_lock(&p->sighand->siglock);
769 * We are not stopped, but there could be a stop
770 * signal in the middle of being processed after
771 * being removed from the queue. Clear that too.
773 p->signal->flags = 0;
775 } else if (sig == SIGKILL) {
777 * Make sure that any pending stop signal already dequeued
778 * is undone by the wakeup for SIGKILL.
780 p->signal->flags = 0;
784 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
785 struct sigpending *signals)
787 struct sigqueue * q = NULL;
791 * fast-pathed signals for kernel-internal things like SIGSTOP
794 if ((unsigned long)info == 2)
797 /* Real-time signals must be queued if sent by sigqueue, or
798 some other real-time mechanism. It is implementation
799 defined whether kill() does so. We attempt to do so, on
800 the principle of least surprise, but since kill is not
801 allowed to fail with EAGAIN when low on memory we just
802 make sure at least one signal gets delivered and don't
803 pass on the info struct. */
805 q = __sigqueue_alloc(t, GFP_ATOMIC);
807 list_add_tail(&q->list, &signals->list);
808 switch ((unsigned long) info) {
810 q->info.si_signo = sig;
811 q->info.si_errno = 0;
812 q->info.si_code = SI_USER;
813 q->info.si_pid = current->pid;
814 q->info.si_uid = current->uid;
817 q->info.si_signo = sig;
818 q->info.si_errno = 0;
819 q->info.si_code = SI_KERNEL;
824 copy_siginfo(&q->info, info);
828 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
829 && info->si_code != SI_USER)
831 * Queue overflow, abort. We may abort if the signal was rt
832 * and sent by user using something other than kill().
835 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
837 * Set up a return to indicate that we dropped
840 ret = info->si_sys_private;
844 sigaddset(&signals->signal, sig);
848 #define LEGACY_QUEUE(sigptr, sig) \
849 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
853 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
857 if (!irqs_disabled())
859 assert_spin_locked(&t->sighand->siglock);
861 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
863 * Set up a return to indicate that we dropped the signal.
865 ret = info->si_sys_private;
867 /* Short-circuit ignored signals. */
868 if (sig_ignored(t, sig))
871 /* Support queueing exactly one non-rt signal, so that we
872 can get more detailed information about the cause of
874 if (LEGACY_QUEUE(&t->pending, sig))
877 ret = send_signal(sig, info, t, &t->pending);
878 if (!ret && !sigismember(&t->blocked, sig))
879 signal_wake_up(t, sig == SIGKILL);
885 * Force a signal that the process can't ignore: if necessary
886 * we unblock the signal and change any SIG_IGN to SIG_DFL.
890 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
892 unsigned long int flags;
895 spin_lock_irqsave(&t->sighand->siglock, flags);
896 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
897 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
898 sigdelset(&t->blocked, sig);
899 recalc_sigpending_tsk(t);
901 ret = specific_send_sig_info(sig, info, t);
902 spin_unlock_irqrestore(&t->sighand->siglock, flags);
908 force_sig_specific(int sig, struct task_struct *t)
910 unsigned long int flags;
912 spin_lock_irqsave(&t->sighand->siglock, flags);
913 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
914 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
915 sigdelset(&t->blocked, sig);
916 recalc_sigpending_tsk(t);
917 specific_send_sig_info(sig, (void *)2, t);
918 spin_unlock_irqrestore(&t->sighand->siglock, flags);
922 * Test if P wants to take SIG. After we've checked all threads with this,
923 * it's equivalent to finding no threads not blocking SIG. Any threads not
924 * blocking SIG were ruled out because they are not running and already
925 * have pending signals. Such threads will dequeue from the shared queue
926 * as soon as they're available, so putting the signal on the shared queue
927 * will be equivalent to sending it to one such thread.
929 #define wants_signal(sig, p, mask) \
930 (!sigismember(&(p)->blocked, sig) \
931 && !((p)->state & mask) \
932 && !((p)->flags & PF_EXITING) \
933 && (task_curr(p) || !signal_pending(p)))
937 __group_complete_signal(int sig, struct task_struct *p)
940 struct task_struct *t;
943 * Don't bother traced and stopped tasks (but
944 * SIGKILL will punch through that).
946 mask = TASK_STOPPED | TASK_TRACED;
951 * Now find a thread we can wake up to take the signal off the queue.
953 * If the main thread wants the signal, it gets first crack.
954 * Probably the least surprising to the average bear.
956 if (wants_signal(sig, p, mask))
958 else if (thread_group_empty(p))
960 * There is just one thread and it does not need to be woken.
961 * It will dequeue unblocked signals before it runs again.
966 * Otherwise try to find a suitable thread.
968 t = p->signal->curr_target;
970 /* restart balancing at this thread */
971 t = p->signal->curr_target = p;
972 BUG_ON(t->tgid != p->tgid);
974 while (!wants_signal(sig, t, mask)) {
976 if (t == p->signal->curr_target)
978 * No thread needs to be woken.
979 * Any eligible threads will see
980 * the signal in the queue soon.
984 p->signal->curr_target = t;
988 * Found a killable thread. If the signal will be fatal,
989 * then start taking the whole group down immediately.
991 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
992 !sigismember(&t->real_blocked, sig) &&
993 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
995 * This signal will be fatal to the whole group.
997 if (!sig_kernel_coredump(sig)) {
999 * Start a group exit and wake everybody up.
1000 * This way we don't have other threads
1001 * running and doing things after a slower
1002 * thread has the fatal signal pending.
1004 p->signal->flags = SIGNAL_GROUP_EXIT;
1005 p->signal->group_exit_code = sig;
1006 p->signal->group_stop_count = 0;
1009 sigaddset(&t->pending.signal, SIGKILL);
1010 signal_wake_up(t, 1);
1017 * There will be a core dump. We make all threads other
1018 * than the chosen one go into a group stop so that nothing
1019 * happens until it gets scheduled, takes the signal off
1020 * the shared queue, and does the core dump. This is a
1021 * little more complicated than strictly necessary, but it
1022 * keeps the signal state that winds up in the core dump
1023 * unchanged from the death state, e.g. which thread had
1024 * the core-dump signal unblocked.
1026 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1027 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1028 p->signal->group_stop_count = 0;
1029 p->signal->group_exit_task = t;
1032 p->signal->group_stop_count++;
1033 signal_wake_up(t, 0);
1036 wake_up_process(p->signal->group_exit_task);
1041 * The signal is already in the shared-pending queue.
1042 * Tell the chosen thread to wake up and dequeue it.
1044 signal_wake_up(t, sig == SIGKILL);
1049 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1053 assert_spin_locked(&p->sighand->siglock);
1054 handle_stop_signal(sig, p);
1056 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1058 * Set up a return to indicate that we dropped the signal.
1060 ret = info->si_sys_private;
1062 /* Short-circuit ignored signals. */
1063 if (sig_ignored(p, sig))
1066 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1067 /* This is a non-RT signal and we already have one queued. */
1071 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1072 * We always use the shared queue for process-wide signals,
1073 * to avoid several races.
1075 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1079 __group_complete_signal(sig, p);
1084 * Nuke all other threads in the group.
1086 void zap_other_threads(struct task_struct *p)
1088 struct task_struct *t;
1090 p->signal->flags = SIGNAL_GROUP_EXIT;
1091 p->signal->group_stop_count = 0;
1093 if (thread_group_empty(p))
1096 for (t = next_thread(p); t != p; t = next_thread(t)) {
1098 * Don't bother with already dead threads
1104 * We don't want to notify the parent, since we are
1105 * killed as part of a thread group due to another
1106 * thread doing an execve() or similar. So set the
1107 * exit signal to -1 to allow immediate reaping of
1108 * the process. But don't detach the thread group
1111 if (t != p->group_leader)
1112 t->exit_signal = -1;
1114 sigaddset(&t->pending.signal, SIGKILL);
1115 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1116 signal_wake_up(t, 1);
1121 * Must be called with the tasklist_lock held for reading!
1123 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1125 unsigned long flags;
1128 ret = check_kill_permission(sig, info, p);
1129 if (!ret && sig && p->sighand) {
1130 spin_lock_irqsave(&p->sighand->siglock, flags);
1131 ret = __group_send_sig_info(sig, info, p);
1132 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1139 * kill_pg_info() sends a signal to a process group: this is what the tty
1140 * control characters do (^C, ^Z etc)
1143 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1145 struct task_struct *p = NULL;
1146 int retval, success;
1153 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1154 int err = group_send_sig_info(sig, info, p);
1157 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1158 return success ? 0 : retval;
1162 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1166 read_lock(&tasklist_lock);
1167 retval = __kill_pg_info(sig, info, pgrp);
1168 read_unlock(&tasklist_lock);
1174 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1177 struct task_struct *p;
1179 read_lock(&tasklist_lock);
1180 p = find_task_by_pid(pid);
1183 error = group_send_sig_info(sig, info, p);
1184 read_unlock(&tasklist_lock);
1190 * kill_something_info() interprets pid in interesting ways just like kill(2).
1192 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1193 * is probably wrong. Should make it like BSD or SYSV.
1196 static int kill_something_info(int sig, struct siginfo *info, int pid)
1199 return kill_pg_info(sig, info, process_group(current));
1200 } else if (pid == -1) {
1201 int retval = 0, count = 0;
1202 struct task_struct * p;
1204 read_lock(&tasklist_lock);
1205 for_each_process(p) {
1206 if (p->pid > 1 && p->tgid != current->tgid) {
1207 int err = group_send_sig_info(sig, info, p);
1213 read_unlock(&tasklist_lock);
1214 return count ? retval : -ESRCH;
1215 } else if (pid < 0) {
1216 return kill_pg_info(sig, info, -pid);
1218 return kill_proc_info(sig, info, pid);
1223 * These are for backward compatibility with the rest of the kernel source.
1227 * These two are the most common entry points. They send a signal
1228 * just to the specific thread.
1231 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1234 unsigned long flags;
1237 * Make sure legacy kernel users don't send in bad values
1238 * (normal paths check this in check_kill_permission).
1240 if (sig < 0 || sig > _NSIG)
1244 * We need the tasklist lock even for the specific
1245 * thread case (when we don't need to follow the group
1246 * lists) in order to avoid races with "p->sighand"
1247 * going away or changing from under us.
1249 read_lock(&tasklist_lock);
1250 spin_lock_irqsave(&p->sighand->siglock, flags);
1251 ret = specific_send_sig_info(sig, info, p);
1252 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1253 read_unlock(&tasklist_lock);
1258 send_sig(int sig, struct task_struct *p, int priv)
1260 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1264 * This is the entry point for "process-wide" signals.
1265 * They will go to an appropriate thread in the thread group.
1268 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1271 read_lock(&tasklist_lock);
1272 ret = group_send_sig_info(sig, info, p);
1273 read_unlock(&tasklist_lock);
1278 force_sig(int sig, struct task_struct *p)
1280 force_sig_info(sig, (void*)1L, p);
1284 * When things go south during signal handling, we
1285 * will force a SIGSEGV. And if the signal that caused
1286 * the problem was already a SIGSEGV, we'll want to
1287 * make sure we don't even try to deliver the signal..
1290 force_sigsegv(int sig, struct task_struct *p)
1292 if (sig == SIGSEGV) {
1293 unsigned long flags;
1294 spin_lock_irqsave(&p->sighand->siglock, flags);
1295 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1296 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1298 force_sig(SIGSEGV, p);
1303 kill_pg(pid_t pgrp, int sig, int priv)
1305 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1309 kill_proc(pid_t pid, int sig, int priv)
1311 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1315 * These functions support sending signals using preallocated sigqueue
1316 * structures. This is needed "because realtime applications cannot
1317 * afford to lose notifications of asynchronous events, like timer
1318 * expirations or I/O completions". In the case of Posix Timers
1319 * we allocate the sigqueue structure from the timer_create. If this
1320 * allocation fails we are able to report the failure to the application
1321 * with an EAGAIN error.
1324 struct sigqueue *sigqueue_alloc(void)
1328 if ((q = __sigqueue_alloc(current, GFP_KERNEL)))
1329 q->flags |= SIGQUEUE_PREALLOC;
1333 void sigqueue_free(struct sigqueue *q)
1335 unsigned long flags;
1336 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1338 * If the signal is still pending remove it from the
1341 if (unlikely(!list_empty(&q->list))) {
1342 read_lock(&tasklist_lock);
1343 spin_lock_irqsave(q->lock, flags);
1344 if (!list_empty(&q->list))
1345 list_del_init(&q->list);
1346 spin_unlock_irqrestore(q->lock, flags);
1347 read_unlock(&tasklist_lock);
1349 q->flags &= ~SIGQUEUE_PREALLOC;
1354 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1356 unsigned long flags;
1360 * We need the tasklist lock even for the specific
1361 * thread case (when we don't need to follow the group
1362 * lists) in order to avoid races with "p->sighand"
1363 * going away or changing from under us.
1365 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1366 read_lock(&tasklist_lock);
1367 spin_lock_irqsave(&p->sighand->siglock, flags);
1369 if (unlikely(!list_empty(&q->list))) {
1371 * If an SI_TIMER entry is already queue just increment
1372 * the overrun count.
1374 if (q->info.si_code != SI_TIMER)
1376 q->info.si_overrun++;
1379 /* Short-circuit ignored signals. */
1380 if (sig_ignored(p, sig)) {
1385 q->lock = &p->sighand->siglock;
1386 list_add_tail(&q->list, &p->pending.list);
1387 sigaddset(&p->pending.signal, sig);
1388 if (!sigismember(&p->blocked, sig))
1389 signal_wake_up(p, sig == SIGKILL);
1392 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1393 read_unlock(&tasklist_lock);
1398 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1400 unsigned long flags;
1403 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1404 read_lock(&tasklist_lock);
1405 spin_lock_irqsave(&p->sighand->siglock, flags);
1406 handle_stop_signal(sig, p);
1408 /* Short-circuit ignored signals. */
1409 if (sig_ignored(p, sig)) {
1414 if (unlikely(!list_empty(&q->list))) {
1416 * If an SI_TIMER entry is already queue just increment
1417 * the overrun count. Other uses should not try to
1418 * send the signal multiple times.
1420 if (q->info.si_code != SI_TIMER)
1422 q->info.si_overrun++;
1427 * Put this signal on the shared-pending queue.
1428 * We always use the shared queue for process-wide signals,
1429 * to avoid several races.
1431 q->lock = &p->sighand->siglock;
1432 list_add_tail(&q->list, &p->signal->shared_pending.list);
1433 sigaddset(&p->signal->shared_pending.signal, sig);
1435 __group_complete_signal(sig, p);
1437 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1438 read_unlock(&tasklist_lock);
1443 * Wake up any threads in the parent blocked in wait* syscalls.
1445 static inline void __wake_up_parent(struct task_struct *p,
1446 struct task_struct *parent)
1448 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1452 * Let a parent know about the death of a child.
1453 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1456 void do_notify_parent(struct task_struct *tsk, int sig)
1458 struct siginfo info;
1459 unsigned long flags;
1460 struct sighand_struct *psig;
1464 /* do_notify_parent_cldstop should have been called instead. */
1465 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1467 BUG_ON(!tsk->ptrace &&
1468 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1470 info.si_signo = sig;
1472 info.si_pid = tsk->pid;
1473 info.si_uid = tsk->uid;
1475 /* FIXME: find out whether or not this is supposed to be c*time. */
1476 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1477 tsk->signal->utime));
1478 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1479 tsk->signal->stime));
1481 info.si_status = tsk->exit_code & 0x7f;
1482 if (tsk->exit_code & 0x80)
1483 info.si_code = CLD_DUMPED;
1484 else if (tsk->exit_code & 0x7f)
1485 info.si_code = CLD_KILLED;
1487 info.si_code = CLD_EXITED;
1488 info.si_status = tsk->exit_code >> 8;
1491 psig = tsk->parent->sighand;
1492 spin_lock_irqsave(&psig->siglock, flags);
1493 if (sig == SIGCHLD &&
1494 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1495 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1497 * We are exiting and our parent doesn't care. POSIX.1
1498 * defines special semantics for setting SIGCHLD to SIG_IGN
1499 * or setting the SA_NOCLDWAIT flag: we should be reaped
1500 * automatically and not left for our parent's wait4 call.
1501 * Rather than having the parent do it as a magic kind of
1502 * signal handler, we just set this to tell do_exit that we
1503 * can be cleaned up without becoming a zombie. Note that
1504 * we still call __wake_up_parent in this case, because a
1505 * blocked sys_wait4 might now return -ECHILD.
1507 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1508 * is implementation-defined: we do (if you don't want
1509 * it, just use SIG_IGN instead).
1511 tsk->exit_signal = -1;
1512 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1515 if (sig > 0 && sig <= _NSIG)
1516 __group_send_sig_info(sig, &info, tsk->parent);
1517 __wake_up_parent(tsk, tsk->parent);
1518 spin_unlock_irqrestore(&psig->siglock, flags);
1522 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1525 struct siginfo info;
1526 unsigned long flags;
1527 struct sighand_struct *sighand;
1529 info.si_signo = SIGCHLD;
1531 info.si_pid = tsk->pid;
1532 info.si_uid = tsk->uid;
1534 /* FIXME: find out whether or not this is supposed to be c*time. */
1535 info.si_utime = cputime_to_jiffies(tsk->utime);
1536 info.si_stime = cputime_to_jiffies(tsk->stime);
1541 info.si_status = SIGCONT;
1544 info.si_status = tsk->signal->group_exit_code & 0x7f;
1547 info.si_status = tsk->exit_code & 0x7f;
1553 sighand = parent->sighand;
1554 spin_lock_irqsave(&sighand->siglock, flags);
1555 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1556 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1557 __group_send_sig_info(SIGCHLD, &info, parent);
1559 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1561 __wake_up_parent(tsk, parent);
1562 spin_unlock_irqrestore(&sighand->siglock, flags);
1566 * This must be called with current->sighand->siglock held.
1568 * This should be the path for all ptrace stops.
1569 * We always set current->last_siginfo while stopped here.
1570 * That makes it a way to test a stopped process for
1571 * being ptrace-stopped vs being job-control-stopped.
1573 * If we actually decide not to stop at all because the tracer is gone,
1574 * we leave nostop_code in current->exit_code.
1576 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1579 * If there is a group stop in progress,
1580 * we must participate in the bookkeeping.
1582 if (current->signal->group_stop_count > 0)
1583 --current->signal->group_stop_count;
1585 current->last_siginfo = info;
1586 current->exit_code = exit_code;
1588 /* Let the debugger run. */
1589 set_current_state(TASK_TRACED);
1590 spin_unlock_irq(¤t->sighand->siglock);
1591 read_lock(&tasklist_lock);
1592 if (likely(current->ptrace & PT_PTRACED) &&
1593 likely(current->parent != current->real_parent ||
1594 !(current->ptrace & PT_ATTACHED)) &&
1595 (likely(current->parent->signal != current->signal) ||
1596 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1597 do_notify_parent_cldstop(current, current->parent,
1599 read_unlock(&tasklist_lock);
1603 * By the time we got the lock, our tracer went away.
1606 read_unlock(&tasklist_lock);
1607 set_current_state(TASK_RUNNING);
1608 current->exit_code = nostop_code;
1612 * We are back. Now reacquire the siglock before touching
1613 * last_siginfo, so that we are sure to have synchronized with
1614 * any signal-sending on another CPU that wants to examine it.
1616 spin_lock_irq(¤t->sighand->siglock);
1617 current->last_siginfo = NULL;
1620 * Queued signals ignored us while we were stopped for tracing.
1621 * So check for any that we should take before resuming user mode.
1623 recalc_sigpending();
1626 void ptrace_notify(int exit_code)
1630 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1632 memset(&info, 0, sizeof info);
1633 info.si_signo = SIGTRAP;
1634 info.si_code = exit_code;
1635 info.si_pid = current->pid;
1636 info.si_uid = current->uid;
1638 /* Let the debugger run. */
1639 spin_lock_irq(¤t->sighand->siglock);
1640 ptrace_stop(exit_code, 0, &info);
1641 spin_unlock_irq(¤t->sighand->siglock);
1644 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
1647 finish_stop(int stop_count)
1650 * If there are no other threads in the group, or if there is
1651 * a group stop in progress and we are the last to stop,
1652 * report to the parent. When ptraced, every thread reports itself.
1654 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1655 read_lock(&tasklist_lock);
1656 do_notify_parent_cldstop(current, current->parent,
1658 read_unlock(&tasklist_lock);
1660 else if (stop_count == 0) {
1661 read_lock(&tasklist_lock);
1662 do_notify_parent_cldstop(current->group_leader,
1663 current->group_leader->real_parent,
1665 read_unlock(&tasklist_lock);
1670 * Now we don't run again until continued.
1672 current->exit_code = 0;
1676 * This performs the stopping for SIGSTOP and other stop signals.
1677 * We have to stop all threads in the thread group.
1678 * Returns nonzero if we've actually stopped and released the siglock.
1679 * Returns zero if we didn't stop and still hold the siglock.
1682 do_signal_stop(int signr)
1684 struct signal_struct *sig = current->signal;
1685 struct sighand_struct *sighand = current->sighand;
1686 int stop_count = -1;
1688 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1691 if (sig->group_stop_count > 0) {
1693 * There is a group stop in progress. We don't need to
1694 * start another one.
1696 signr = sig->group_exit_code;
1697 stop_count = --sig->group_stop_count;
1698 current->exit_code = signr;
1699 set_current_state(TASK_STOPPED);
1700 if (stop_count == 0)
1701 sig->flags = SIGNAL_STOP_STOPPED;
1702 spin_unlock_irq(&sighand->siglock);
1704 else if (thread_group_empty(current)) {
1706 * Lock must be held through transition to stopped state.
1708 current->exit_code = current->signal->group_exit_code = signr;
1709 set_current_state(TASK_STOPPED);
1710 sig->flags = SIGNAL_STOP_STOPPED;
1711 spin_unlock_irq(&sighand->siglock);
1715 * There is no group stop already in progress.
1716 * We must initiate one now, but that requires
1717 * dropping siglock to get both the tasklist lock
1718 * and siglock again in the proper order. Note that
1719 * this allows an intervening SIGCONT to be posted.
1720 * We need to check for that and bail out if necessary.
1722 struct task_struct *t;
1724 spin_unlock_irq(&sighand->siglock);
1726 /* signals can be posted during this window */
1728 read_lock(&tasklist_lock);
1729 spin_lock_irq(&sighand->siglock);
1731 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1733 * Another stop or continue happened while we
1734 * didn't have the lock. We can just swallow this
1735 * signal now. If we raced with a SIGCONT, that
1736 * should have just cleared it now. If we raced
1737 * with another processor delivering a stop signal,
1738 * then the SIGCONT that wakes us up should clear it.
1740 read_unlock(&tasklist_lock);
1744 if (sig->group_stop_count == 0) {
1745 sig->group_exit_code = signr;
1747 for (t = next_thread(current); t != current;
1750 * Setting state to TASK_STOPPED for a group
1751 * stop is always done with the siglock held,
1752 * so this check has no races.
1754 if (t->state < TASK_STOPPED) {
1756 signal_wake_up(t, 0);
1758 sig->group_stop_count = stop_count;
1761 /* A race with another thread while unlocked. */
1762 signr = sig->group_exit_code;
1763 stop_count = --sig->group_stop_count;
1766 current->exit_code = signr;
1767 set_current_state(TASK_STOPPED);
1768 if (stop_count == 0)
1769 sig->flags = SIGNAL_STOP_STOPPED;
1771 spin_unlock_irq(&sighand->siglock);
1772 read_unlock(&tasklist_lock);
1775 finish_stop(stop_count);
1780 * Do appropriate magic when group_stop_count > 0.
1781 * We return nonzero if we stopped, after releasing the siglock.
1782 * We return zero if we still hold the siglock and should look
1783 * for another signal without checking group_stop_count again.
1785 static inline int handle_group_stop(void)
1789 if (current->signal->group_exit_task == current) {
1791 * Group stop is so we can do a core dump,
1792 * We are the initiating thread, so get on with it.
1794 current->signal->group_exit_task = NULL;
1798 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1800 * Group stop is so another thread can do a core dump,
1801 * or else we are racing against a death signal.
1802 * Just punt the stop so we can get the next signal.
1807 * There is a group stop in progress. We stop
1808 * without any associated signal being in our queue.
1810 stop_count = --current->signal->group_stop_count;
1811 if (stop_count == 0)
1812 current->signal->flags = SIGNAL_STOP_STOPPED;
1813 current->exit_code = current->signal->group_exit_code;
1814 set_current_state(TASK_STOPPED);
1815 spin_unlock_irq(¤t->sighand->siglock);
1816 finish_stop(stop_count);
1820 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1821 struct pt_regs *regs, void *cookie)
1823 sigset_t *mask = ¤t->blocked;
1827 spin_lock_irq(¤t->sighand->siglock);
1829 struct k_sigaction *ka;
1831 if (unlikely(current->signal->group_stop_count > 0) &&
1832 handle_group_stop())
1835 signr = dequeue_signal(current, mask, info);
1838 break; /* will return 0 */
1840 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1841 ptrace_signal_deliver(regs, cookie);
1843 /* Let the debugger run. */
1844 ptrace_stop(signr, signr, info);
1846 /* We're back. Did the debugger cancel the sig? */
1847 signr = current->exit_code;
1851 current->exit_code = 0;
1853 /* Update the siginfo structure if the signal has
1854 changed. If the debugger wanted something
1855 specific in the siginfo structure then it should
1856 have updated *info via PTRACE_SETSIGINFO. */
1857 if (signr != info->si_signo) {
1858 info->si_signo = signr;
1860 info->si_code = SI_USER;
1861 info->si_pid = current->parent->pid;
1862 info->si_uid = current->parent->uid;
1865 /* If the (new) signal is now blocked, requeue it. */
1866 if (sigismember(¤t->blocked, signr)) {
1867 specific_send_sig_info(signr, info, current);
1872 ka = ¤t->sighand->action[signr-1];
1873 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1875 if (ka->sa.sa_handler != SIG_DFL) {
1876 /* Run the handler. */
1879 if (ka->sa.sa_flags & SA_ONESHOT)
1880 ka->sa.sa_handler = SIG_DFL;
1882 break; /* will return non-zero "signr" value */
1886 * Now we are doing the default action for this signal.
1888 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1891 /* Init gets no signals it doesn't want. */
1892 if (current->pid == 1)
1895 /* virtual init is protected against user signals */
1896 if ((info->si_code == SI_USER) &&
1897 vx_current_initpid(current->pid))
1900 if (sig_kernel_stop(signr)) {
1902 * The default action is to stop all threads in
1903 * the thread group. The job control signals
1904 * do nothing in an orphaned pgrp, but SIGSTOP
1905 * always works. Note that siglock needs to be
1906 * dropped during the call to is_orphaned_pgrp()
1907 * because of lock ordering with tasklist_lock.
1908 * This allows an intervening SIGCONT to be posted.
1909 * We need to check for that and bail out if necessary.
1911 if (signr != SIGSTOP) {
1912 spin_unlock_irq(¤t->sighand->siglock);
1914 /* signals can be posted during this window */
1916 if (is_orphaned_pgrp(process_group(current)))
1919 spin_lock_irq(¤t->sighand->siglock);
1922 if (likely(do_signal_stop(signr))) {
1923 /* It released the siglock. */
1928 * We didn't actually stop, due to a race
1929 * with SIGCONT or something like that.
1934 spin_unlock_irq(¤t->sighand->siglock);
1937 * Anything else is fatal, maybe with a core dump.
1939 current->flags |= PF_SIGNALED;
1940 if (sig_kernel_coredump(signr)) {
1942 * If it was able to dump core, this kills all
1943 * other threads in the group and synchronizes with
1944 * their demise. If we lost the race with another
1945 * thread getting here, it set group_exit_code
1946 * first and our do_group_exit call below will use
1947 * that value and ignore the one we pass it.
1949 do_coredump((long)signr, signr, regs);
1953 * Death signals, no core dump.
1955 do_group_exit(signr);
1958 spin_unlock_irq(¤t->sighand->siglock);
1964 EXPORT_SYMBOL(recalc_sigpending);
1965 EXPORT_SYMBOL_GPL(dequeue_signal);
1966 EXPORT_SYMBOL(flush_signals);
1967 EXPORT_SYMBOL(force_sig);
1968 EXPORT_SYMBOL(kill_pg);
1969 EXPORT_SYMBOL(kill_proc);
1970 EXPORT_SYMBOL(ptrace_notify);
1971 EXPORT_SYMBOL(send_sig);
1972 EXPORT_SYMBOL(send_sig_info);
1973 EXPORT_SYMBOL(sigprocmask);
1974 EXPORT_SYMBOL(block_all_signals);
1975 EXPORT_SYMBOL(unblock_all_signals);
1979 * System call entry points.
1982 asmlinkage long sys_restart_syscall(void)
1984 struct restart_block *restart = ¤t_thread_info()->restart_block;
1985 return restart->fn(restart);
1988 long do_no_restart_syscall(struct restart_block *param)
1994 * We don't need to get the kernel lock - this is all local to this
1995 * particular thread.. (and that's good, because this is _heavily_
1996 * used by various programs)
2000 * This is also useful for kernel threads that want to temporarily
2001 * (or permanently) block certain signals.
2003 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2004 * interface happily blocks "unblockable" signals like SIGKILL
2007 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2012 spin_lock_irq(¤t->sighand->siglock);
2013 old_block = current->blocked;
2017 sigorsets(¤t->blocked, ¤t->blocked, set);
2020 signandsets(¤t->blocked, ¤t->blocked, set);
2023 current->blocked = *set;
2028 recalc_sigpending();
2029 spin_unlock_irq(¤t->sighand->siglock);
2031 *oldset = old_block;
2036 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2038 int error = -EINVAL;
2039 sigset_t old_set, new_set;
2041 /* XXX: Don't preclude handling different sized sigset_t's. */
2042 if (sigsetsize != sizeof(sigset_t))
2047 if (copy_from_user(&new_set, set, sizeof(*set)))
2049 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2051 error = sigprocmask(how, &new_set, &old_set);
2057 spin_lock_irq(¤t->sighand->siglock);
2058 old_set = current->blocked;
2059 spin_unlock_irq(¤t->sighand->siglock);
2063 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2071 long do_sigpending(void __user *set, unsigned long sigsetsize)
2073 long error = -EINVAL;
2076 if (sigsetsize > sizeof(sigset_t))
2079 spin_lock_irq(¤t->sighand->siglock);
2080 sigorsets(&pending, ¤t->pending.signal,
2081 ¤t->signal->shared_pending.signal);
2082 spin_unlock_irq(¤t->sighand->siglock);
2084 /* Outside the lock because only this thread touches it. */
2085 sigandsets(&pending, ¤t->blocked, &pending);
2088 if (!copy_to_user(set, &pending, sigsetsize))
2096 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2098 return do_sigpending(set, sigsetsize);
2101 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2103 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2107 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2109 if (from->si_code < 0)
2110 return __copy_to_user(to, from, sizeof(siginfo_t))
2113 * If you change siginfo_t structure, please be sure
2114 * this code is fixed accordingly.
2115 * It should never copy any pad contained in the structure
2116 * to avoid security leaks, but must copy the generic
2117 * 3 ints plus the relevant union member.
2119 err = __put_user(from->si_signo, &to->si_signo);
2120 err |= __put_user(from->si_errno, &to->si_errno);
2121 err |= __put_user((short)from->si_code, &to->si_code);
2122 switch (from->si_code & __SI_MASK) {
2124 err |= __put_user(from->si_pid, &to->si_pid);
2125 err |= __put_user(from->si_uid, &to->si_uid);
2128 err |= __put_user(from->si_tid, &to->si_tid);
2129 err |= __put_user(from->si_overrun, &to->si_overrun);
2130 err |= __put_user(from->si_ptr, &to->si_ptr);
2133 err |= __put_user(from->si_band, &to->si_band);
2134 err |= __put_user(from->si_fd, &to->si_fd);
2137 err |= __put_user(from->si_addr, &to->si_addr);
2138 #ifdef __ARCH_SI_TRAPNO
2139 err |= __put_user(from->si_trapno, &to->si_trapno);
2143 err |= __put_user(from->si_pid, &to->si_pid);
2144 err |= __put_user(from->si_uid, &to->si_uid);
2145 err |= __put_user(from->si_status, &to->si_status);
2146 err |= __put_user(from->si_utime, &to->si_utime);
2147 err |= __put_user(from->si_stime, &to->si_stime);
2149 case __SI_RT: /* This is not generated by the kernel as of now. */
2150 case __SI_MESGQ: /* But this is */
2151 err |= __put_user(from->si_pid, &to->si_pid);
2152 err |= __put_user(from->si_uid, &to->si_uid);
2153 err |= __put_user(from->si_ptr, &to->si_ptr);
2155 default: /* this is just in case for now ... */
2156 err |= __put_user(from->si_pid, &to->si_pid);
2157 err |= __put_user(from->si_uid, &to->si_uid);
2166 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2167 siginfo_t __user *uinfo,
2168 const struct timespec __user *uts,
2177 /* XXX: Don't preclude handling different sized sigset_t's. */
2178 if (sigsetsize != sizeof(sigset_t))
2181 if (copy_from_user(&these, uthese, sizeof(these)))
2185 * Invert the set of allowed signals to get those we
2188 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2192 if (copy_from_user(&ts, uts, sizeof(ts)))
2194 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2199 spin_lock_irq(¤t->sighand->siglock);
2200 sig = dequeue_signal(current, &these, &info);
2202 timeout = MAX_SCHEDULE_TIMEOUT;
2204 timeout = (timespec_to_jiffies(&ts)
2205 + (ts.tv_sec || ts.tv_nsec));
2208 /* None ready -- temporarily unblock those we're
2209 * interested while we are sleeping in so that we'll
2210 * be awakened when they arrive. */
2211 current->real_blocked = current->blocked;
2212 sigandsets(¤t->blocked, ¤t->blocked, &these);
2213 recalc_sigpending();
2214 spin_unlock_irq(¤t->sighand->siglock);
2216 current->state = TASK_INTERRUPTIBLE;
2217 timeout = schedule_timeout(timeout);
2219 spin_lock_irq(¤t->sighand->siglock);
2220 sig = dequeue_signal(current, &these, &info);
2221 current->blocked = current->real_blocked;
2222 siginitset(¤t->real_blocked, 0);
2223 recalc_sigpending();
2226 spin_unlock_irq(¤t->sighand->siglock);
2231 if (copy_siginfo_to_user(uinfo, &info))
2244 sys_kill(int pid, int sig)
2246 struct siginfo info;
2248 info.si_signo = sig;
2250 info.si_code = SI_USER;
2251 info.si_pid = current->tgid;
2252 info.si_uid = current->uid;
2254 return kill_something_info(sig, &info, pid);
2258 * sys_tgkill - send signal to one specific thread
2259 * @tgid: the thread group ID of the thread
2260 * @pid: the PID of the thread
2261 * @sig: signal to be sent
2263 * This syscall also checks the tgid and returns -ESRCH even if the PID
2264 * exists but it's not belonging to the target process anymore. This
2265 * method solves the problem of threads exiting and PIDs getting reused.
2267 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2269 struct siginfo info;
2271 struct task_struct *p;
2273 /* This is only valid for single tasks */
2274 if (pid <= 0 || tgid <= 0)
2277 info.si_signo = sig;
2279 info.si_code = SI_TKILL;
2280 info.si_pid = current->tgid;
2281 info.si_uid = current->uid;
2283 read_lock(&tasklist_lock);
2284 p = find_task_by_pid(pid);
2286 if (p && (p->tgid == tgid)) {
2287 error = check_kill_permission(sig, &info, p);
2289 * The null signal is a permissions and process existence
2290 * probe. No signal is actually delivered.
2292 if (!error && sig && p->sighand) {
2293 spin_lock_irq(&p->sighand->siglock);
2294 handle_stop_signal(sig, p);
2295 error = specific_send_sig_info(sig, &info, p);
2296 spin_unlock_irq(&p->sighand->siglock);
2299 read_unlock(&tasklist_lock);
2304 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2307 sys_tkill(int pid, int sig)
2309 struct siginfo info;
2311 struct task_struct *p;
2313 /* This is only valid for single tasks */
2317 info.si_signo = sig;
2319 info.si_code = SI_TKILL;
2320 info.si_pid = current->tgid;
2321 info.si_uid = current->uid;
2323 read_lock(&tasklist_lock);
2324 p = find_task_by_pid(pid);
2327 error = check_kill_permission(sig, &info, p);
2329 * The null signal is a permissions and process existence
2330 * probe. No signal is actually delivered.
2332 if (!error && sig && p->sighand) {
2333 spin_lock_irq(&p->sighand->siglock);
2334 handle_stop_signal(sig, p);
2335 error = specific_send_sig_info(sig, &info, p);
2336 spin_unlock_irq(&p->sighand->siglock);
2339 read_unlock(&tasklist_lock);
2344 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2348 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2351 /* Not even root can pretend to send signals from the kernel.
2352 Nor can they impersonate a kill(), which adds source info. */
2353 if (info.si_code >= 0)
2355 info.si_signo = sig;
2357 /* POSIX.1b doesn't mention process groups. */
2358 return kill_proc_info(sig, &info, pid);
2362 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2364 struct k_sigaction *k;
2366 if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
2369 k = ¤t->sighand->action[sig-1];
2371 spin_lock_irq(¤t->sighand->siglock);
2372 if (signal_pending(current)) {
2374 * If there might be a fatal signal pending on multiple
2375 * threads, make sure we take it before changing the action.
2377 spin_unlock_irq(¤t->sighand->siglock);
2378 return -ERESTARTNOINTR;
2387 * "Setting a signal action to SIG_IGN for a signal that is
2388 * pending shall cause the pending signal to be discarded,
2389 * whether or not it is blocked."
2391 * "Setting a signal action to SIG_DFL for a signal that is
2392 * pending and whose default action is to ignore the signal
2393 * (for example, SIGCHLD), shall cause the pending signal to
2394 * be discarded, whether or not it is blocked"
2396 if (act->sa.sa_handler == SIG_IGN ||
2397 (act->sa.sa_handler == SIG_DFL &&
2398 sig_kernel_ignore(sig))) {
2400 * This is a fairly rare case, so we only take the
2401 * tasklist_lock once we're sure we'll need it.
2402 * Now we must do this little unlock and relock
2403 * dance to maintain the lock hierarchy.
2405 struct task_struct *t = current;
2406 spin_unlock_irq(&t->sighand->siglock);
2407 read_lock(&tasklist_lock);
2408 spin_lock_irq(&t->sighand->siglock);
2410 sigdelsetmask(&k->sa.sa_mask,
2411 sigmask(SIGKILL) | sigmask(SIGSTOP));
2412 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2414 rm_from_queue(sigmask(sig), &t->pending);
2415 recalc_sigpending_tsk(t);
2417 } while (t != current);
2418 spin_unlock_irq(¤t->sighand->siglock);
2419 read_unlock(&tasklist_lock);
2424 sigdelsetmask(&k->sa.sa_mask,
2425 sigmask(SIGKILL) | sigmask(SIGSTOP));
2428 spin_unlock_irq(¤t->sighand->siglock);
2433 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2439 oss.ss_sp = (void __user *) current->sas_ss_sp;
2440 oss.ss_size = current->sas_ss_size;
2441 oss.ss_flags = sas_ss_flags(sp);
2450 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
2451 || __get_user(ss_sp, &uss->ss_sp)
2452 || __get_user(ss_flags, &uss->ss_flags)
2453 || __get_user(ss_size, &uss->ss_size))
2457 if (on_sig_stack(sp))
2463 * Note - this code used to test ss_flags incorrectly
2464 * old code may have been written using ss_flags==0
2465 * to mean ss_flags==SS_ONSTACK (as this was the only
2466 * way that worked) - this fix preserves that older
2469 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2472 if (ss_flags == SS_DISABLE) {
2477 if (ss_size < MINSIGSTKSZ)
2481 current->sas_ss_sp = (unsigned long) ss_sp;
2482 current->sas_ss_size = ss_size;
2487 if (copy_to_user(uoss, &oss, sizeof(oss)))
2496 #ifdef __ARCH_WANT_SYS_SIGPENDING
2499 sys_sigpending(old_sigset_t __user *set)
2501 return do_sigpending(set, sizeof(*set));
2506 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2507 /* Some platforms have their own version with special arguments others
2508 support only sys_rt_sigprocmask. */
2511 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2514 old_sigset_t old_set, new_set;
2518 if (copy_from_user(&new_set, set, sizeof(*set)))
2520 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2522 spin_lock_irq(¤t->sighand->siglock);
2523 old_set = current->blocked.sig[0];
2531 sigaddsetmask(¤t->blocked, new_set);
2534 sigdelsetmask(¤t->blocked, new_set);
2537 current->blocked.sig[0] = new_set;
2541 recalc_sigpending();
2542 spin_unlock_irq(¤t->sighand->siglock);
2548 old_set = current->blocked.sig[0];
2551 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2558 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2560 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2562 sys_rt_sigaction(int sig,
2563 const struct sigaction __user *act,
2564 struct sigaction __user *oact,
2567 struct k_sigaction new_sa, old_sa;
2570 /* XXX: Don't preclude handling different sized sigset_t's. */
2571 if (sigsetsize != sizeof(sigset_t))
2575 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2579 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2582 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2588 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2590 #ifdef __ARCH_WANT_SYS_SGETMASK
2593 * For backwards compatibility. Functionality superseded by sigprocmask.
2599 return current->blocked.sig[0];
2603 sys_ssetmask(int newmask)
2607 spin_lock_irq(¤t->sighand->siglock);
2608 old = current->blocked.sig[0];
2610 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2612 recalc_sigpending();
2613 spin_unlock_irq(¤t->sighand->siglock);
2617 #endif /* __ARCH_WANT_SGETMASK */
2619 #ifdef __ARCH_WANT_SYS_SIGNAL
2621 * For backwards compatibility. Functionality superseded by sigaction.
2623 asmlinkage unsigned long
2624 sys_signal(int sig, __sighandler_t handler)
2626 struct k_sigaction new_sa, old_sa;
2629 new_sa.sa.sa_handler = handler;
2630 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2632 ret = do_sigaction(sig, &new_sa, &old_sa);
2634 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2636 #endif /* __ARCH_WANT_SYS_SIGNAL */
2638 #ifdef __ARCH_WANT_SYS_PAUSE
2643 current->state = TASK_INTERRUPTIBLE;
2645 return -ERESTARTNOHAND;
2650 void __init signals_init(void)
2653 kmem_cache_create("sigqueue",
2654 sizeof(struct sigqueue),
2655 __alignof__(struct sigqueue),
2656 SLAB_PANIC, NULL, NULL);