2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <asm/param.h>
29 #include <asm/uaccess.h>
30 #include <asm/unistd.h>
31 #include <asm/siginfo.h>
34 * SLAB caches for signal bits.
37 static kmem_cache_t *sigqueue_cachep;
40 * In POSIX a signal is sent either to a specific thread (Linux task)
41 * or to the process as a whole (Linux thread group). How the signal
42 * is sent determines whether it's to one thread or the whole group,
43 * which determines which signal mask(s) are involved in blocking it
44 * from being delivered until later. When the signal is delivered,
45 * either it's caught or ignored by a user handler or it has a default
46 * effect that applies to the whole thread group (POSIX process).
48 * The possible effects an unblocked signal set to SIG_DFL can have are:
49 * ignore - Nothing Happens
50 * terminate - kill the process, i.e. all threads in the group,
51 * similar to exit_group. The group leader (only) reports
52 * WIFSIGNALED status to its parent.
53 * coredump - write a core dump file describing all threads using
54 * the same mm and then kill all those threads
55 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
57 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
58 * Other signals when not blocked and set to SIG_DFL behaves as follows.
59 * The job control signals also have other special effects.
61 * +--------------------+------------------+
62 * | POSIX signal | default action |
63 * +--------------------+------------------+
64 * | SIGHUP | terminate |
65 * | SIGINT | terminate |
66 * | SIGQUIT | coredump |
67 * | SIGILL | coredump |
68 * | SIGTRAP | coredump |
69 * | SIGABRT/SIGIOT | coredump |
70 * | SIGBUS | coredump |
71 * | SIGFPE | coredump |
72 * | SIGKILL | terminate(+) |
73 * | SIGUSR1 | terminate |
74 * | SIGSEGV | coredump |
75 * | SIGUSR2 | terminate |
76 * | SIGPIPE | terminate |
77 * | SIGALRM | terminate |
78 * | SIGTERM | terminate |
79 * | SIGCHLD | ignore |
80 * | SIGCONT | ignore(*) |
81 * | SIGSTOP | stop(*)(+) |
82 * | SIGTSTP | stop(*) |
83 * | SIGTTIN | stop(*) |
84 * | SIGTTOU | stop(*) |
86 * | SIGXCPU | coredump |
87 * | SIGXFSZ | coredump |
88 * | SIGVTALRM | terminate |
89 * | SIGPROF | terminate |
90 * | SIGPOLL/SIGIO | terminate |
91 * | SIGSYS/SIGUNUSED | coredump |
92 * | SIGSTKFLT | terminate |
93 * | SIGWINCH | ignore |
94 * | SIGPWR | terminate |
95 * | SIGRTMIN-SIGRTMAX | terminate |
96 * +--------------------+------------------+
97 * | non-POSIX signal | default action |
98 * +--------------------+------------------+
99 * | SIGEMT | coredump |
100 * +--------------------+------------------+
102 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
103 * (*) Special job control effects:
104 * When SIGCONT is sent, it resumes the process (all threads in the group)
105 * from TASK_STOPPED state and also clears any pending/queued stop signals
106 * (any of those marked with "stop(*)"). This happens regardless of blocking,
107 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
108 * any pending/queued SIGCONT signals; this happens regardless of blocking,
109 * catching, or ignored the stop signal, though (except for SIGSTOP) the
110 * default action of stopping the process may happen later or never.
114 #define M_SIGEMT M(SIGEMT)
119 #if SIGRTMIN > BITS_PER_LONG
120 #define M(sig) (1ULL << ((sig)-1))
122 #define M(sig) (1UL << ((sig)-1))
124 #define T(sig, mask) (M(sig) & (mask))
126 #define SIG_KERNEL_ONLY_MASK (\
127 M(SIGKILL) | M(SIGSTOP) )
129 #define SIG_KERNEL_STOP_MASK (\
130 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
132 #define SIG_KERNEL_COREDUMP_MASK (\
133 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
134 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
135 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
137 #define SIG_KERNEL_IGNORE_MASK (\
138 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
140 #define sig_kernel_only(sig) \
141 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
142 #define sig_kernel_coredump(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
144 #define sig_kernel_ignore(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
146 #define sig_kernel_stop(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 #define sig_user_defined(t, signr) \
150 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
151 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
153 #define sig_fatal(t, signr) \
154 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
155 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
157 static int sig_ignored(struct task_struct *t, int sig)
159 void __user * handler;
162 * Tracers always want to know about signals..
164 if (t->ptrace & PT_PTRACED)
168 * Blocked signals are never ignored, since the
169 * signal handler may change by the time it is
172 if (sigismember(&t->blocked, sig))
175 /* Is it explicitly or implicitly ignored? */
176 handler = t->sighand->action[sig-1].sa.sa_handler;
177 return handler == SIG_IGN ||
178 (handler == SIG_DFL && sig_kernel_ignore(sig));
182 * Re-calculate pending state from the set of locally pending
183 * signals, globally pending signals, and blocked signals.
185 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
190 switch (_NSIG_WORDS) {
192 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
193 ready |= signal->sig[i] &~ blocked->sig[i];
196 case 4: ready = signal->sig[3] &~ blocked->sig[3];
197 ready |= signal->sig[2] &~ blocked->sig[2];
198 ready |= signal->sig[1] &~ blocked->sig[1];
199 ready |= signal->sig[0] &~ blocked->sig[0];
202 case 2: ready = signal->sig[1] &~ blocked->sig[1];
203 ready |= signal->sig[0] &~ blocked->sig[0];
206 case 1: ready = signal->sig[0] &~ blocked->sig[0];
211 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
213 fastcall void recalc_sigpending_tsk(struct task_struct *t)
215 if (t->signal->group_stop_count > 0 ||
216 PENDING(&t->pending, &t->blocked) ||
217 PENDING(&t->signal->shared_pending, &t->blocked))
218 set_tsk_thread_flag(t, TIF_SIGPENDING);
220 clear_tsk_thread_flag(t, TIF_SIGPENDING);
223 void recalc_sigpending(void)
225 recalc_sigpending_tsk(current);
228 /* Given the mask, find the first available signal that should be serviced. */
231 next_signal(struct sigpending *pending, sigset_t *mask)
233 unsigned long i, *s, *m, x;
236 s = pending->signal.sig;
238 switch (_NSIG_WORDS) {
240 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
241 if ((x = *s &~ *m) != 0) {
242 sig = ffz(~x) + i*_NSIG_BPW + 1;
247 case 2: if ((x = s[0] &~ m[0]) != 0)
249 else if ((x = s[1] &~ m[1]) != 0)
256 case 1: if ((x = *s &~ *m) != 0)
264 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
267 struct sigqueue *q = NULL;
269 atomic_inc(&t->user->sigpending);
270 if (override_rlimit ||
271 atomic_read(&t->user->sigpending) <=
272 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
273 q = kmem_cache_alloc(sigqueue_cachep, flags);
274 if (unlikely(q == NULL)) {
275 atomic_dec(&t->user->sigpending);
277 INIT_LIST_HEAD(&q->list);
280 q->user = get_uid(t->user);
285 static inline void __sigqueue_free(struct sigqueue *q)
287 if (q->flags & SIGQUEUE_PREALLOC)
289 atomic_dec(&q->user->sigpending);
291 kmem_cache_free(sigqueue_cachep, q);
294 static void flush_sigqueue(struct sigpending *queue)
298 sigemptyset(&queue->signal);
299 while (!list_empty(&queue->list)) {
300 q = list_entry(queue->list.next, struct sigqueue , list);
301 list_del_init(&q->list);
307 * Flush all pending signals for a task.
311 flush_signals(struct task_struct *t)
315 spin_lock_irqsave(&t->sighand->siglock, flags);
316 clear_tsk_thread_flag(t,TIF_SIGPENDING);
317 flush_sigqueue(&t->pending);
318 flush_sigqueue(&t->signal->shared_pending);
319 spin_unlock_irqrestore(&t->sighand->siglock, flags);
323 * This function expects the tasklist_lock write-locked.
325 void __exit_sighand(struct task_struct *tsk)
327 struct sighand_struct * sighand = tsk->sighand;
329 /* Ok, we're done with the signal handlers */
331 if (atomic_dec_and_test(&sighand->count))
332 kmem_cache_free(sighand_cachep, sighand);
335 void exit_sighand(struct task_struct *tsk)
337 write_lock_irq(&tasklist_lock);
339 write_unlock_irq(&tasklist_lock);
343 * This function expects the tasklist_lock write-locked.
345 void __exit_signal(struct task_struct *tsk)
347 struct signal_struct * sig = tsk->signal;
348 struct sighand_struct * sighand = tsk->sighand;
352 if (!atomic_read(&sig->count))
354 spin_lock(&sighand->siglock);
355 posix_cpu_timers_exit(tsk);
356 if (atomic_dec_and_test(&sig->count)) {
357 posix_cpu_timers_exit_group(tsk);
358 if (tsk == sig->curr_target)
359 sig->curr_target = next_thread(tsk);
361 spin_unlock(&sighand->siglock);
362 flush_sigqueue(&sig->shared_pending);
365 * If there is any task waiting for the group exit
368 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
369 wake_up_process(sig->group_exit_task);
370 sig->group_exit_task = NULL;
372 if (tsk == sig->curr_target)
373 sig->curr_target = next_thread(tsk);
376 * Accumulate here the counters for all threads but the
377 * group leader as they die, so they can be added into
378 * the process-wide totals when those are taken.
379 * The group leader stays around as a zombie as long
380 * as there are other threads. When it gets reaped,
381 * the exit.c code will add its counts into these totals.
382 * We won't ever get here for the group leader, since it
383 * will have been the last reference on the signal_struct.
385 sig->utime = cputime_add(sig->utime, tsk->utime);
386 sig->stime = cputime_add(sig->stime, tsk->stime);
387 sig->min_flt += tsk->min_flt;
388 sig->maj_flt += tsk->maj_flt;
389 sig->nvcsw += tsk->nvcsw;
390 sig->nivcsw += tsk->nivcsw;
391 sig->sched_time += tsk->sched_time;
392 spin_unlock(&sighand->siglock);
393 sig = NULL; /* Marker for below. */
395 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
396 flush_sigqueue(&tsk->pending);
399 * We are cleaning up the signal_struct here. We delayed
400 * calling exit_itimers until after flush_sigqueue, just in
401 * case our thread-local pending queue contained a queued
402 * timer signal that would have been cleared in
403 * exit_itimers. When that called sigqueue_free, it would
404 * attempt to re-take the tasklist_lock and deadlock. This
405 * can never happen if we ensure that all queues the
406 * timer's signal might be queued on have been flushed
407 * first. The shared_pending queue, and our own pending
408 * queue are the only queues the timer could be on, since
409 * there are no other threads left in the group and timer
410 * signals are constrained to threads inside the group.
413 exit_thread_group_keys(sig);
414 kmem_cache_free(signal_cachep, sig);
418 void exit_signal(struct task_struct *tsk)
420 write_lock_irq(&tasklist_lock);
422 write_unlock_irq(&tasklist_lock);
426 * Flush all handlers for a task.
430 flush_signal_handlers(struct task_struct *t, int force_default)
433 struct k_sigaction *ka = &t->sighand->action[0];
434 for (i = _NSIG ; i != 0 ; i--) {
435 if (force_default || ka->sa.sa_handler != SIG_IGN)
436 ka->sa.sa_handler = SIG_DFL;
438 sigemptyset(&ka->sa.sa_mask);
443 EXPORT_SYMBOL_GPL(flush_signal_handlers);
445 /* Notify the system that a driver wants to block all signals for this
446 * process, and wants to be notified if any signals at all were to be
447 * sent/acted upon. If the notifier routine returns non-zero, then the
448 * signal will be acted upon after all. If the notifier routine returns 0,
449 * then then signal will be blocked. Only one block per process is
450 * allowed. priv is a pointer to private data that the notifier routine
451 * can use to determine if the signal should be blocked or not. */
454 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
458 spin_lock_irqsave(¤t->sighand->siglock, flags);
459 current->notifier_mask = mask;
460 current->notifier_data = priv;
461 current->notifier = notifier;
462 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
465 /* Notify the system that blocking has ended. */
468 unblock_all_signals(void)
472 spin_lock_irqsave(¤t->sighand->siglock, flags);
473 current->notifier = NULL;
474 current->notifier_data = NULL;
476 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
479 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
481 struct sigqueue *q, *first = NULL;
482 int still_pending = 0;
484 if (unlikely(!sigismember(&list->signal, sig)))
488 * Collect the siginfo appropriate to this signal. Check if
489 * there is another siginfo for the same signal.
491 list_for_each_entry(q, &list->list, list) {
492 if (q->info.si_signo == sig) {
501 list_del_init(&first->list);
502 copy_siginfo(info, &first->info);
503 __sigqueue_free(first);
505 sigdelset(&list->signal, sig);
508 /* Ok, it wasn't in the queue. This must be
509 a fast-pathed signal or we must have been
510 out of queue space. So zero out the info.
512 sigdelset(&list->signal, sig);
513 info->si_signo = sig;
522 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
527 /* SIGKILL must have priority, otherwise it is quite easy
528 * to create an unkillable process, sending sig < SIGKILL
530 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
531 if (!sigismember(mask, SIGKILL))
536 sig = next_signal(pending, mask);
538 if (current->notifier) {
539 if (sigismember(current->notifier_mask, sig)) {
540 if (!(current->notifier)(current->notifier_data)) {
541 clear_thread_flag(TIF_SIGPENDING);
547 if (!collect_signal(sig, pending, info))
557 * Dequeue a signal and return the element to the caller, which is
558 * expected to free it.
560 * All callers have to hold the siglock.
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
564 int signr = __dequeue_signal(&tsk->pending, mask, info);
566 signr = __dequeue_signal(&tsk->signal->shared_pending,
568 if (signr && unlikely(sig_kernel_stop(signr))) {
570 * Set a marker that we have dequeued a stop signal. Our
571 * caller might release the siglock and then the pending
572 * stop signal it is about to process is no longer in the
573 * pending bitmasks, but must still be cleared by a SIGCONT
574 * (and overruled by a SIGKILL). So those cases clear this
575 * shared flag after we've set it. Note that this flag may
576 * remain set after the signal we return is ignored or
577 * handled. That doesn't matter because its only purpose
578 * is to alert stop-signal processing code when another
579 * processor has come along and cleared the flag.
581 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
584 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
585 info->si_sys_private){
587 * Release the siglock to ensure proper locking order
588 * of timer locks outside of siglocks. Note, we leave
589 * irqs disabled here, since the posix-timers code is
590 * about to disable them again anyway.
592 spin_unlock(&tsk->sighand->siglock);
593 do_schedule_next_timer(info);
594 spin_lock(&tsk->sighand->siglock);
600 * Tell a process that it has a new active signal..
602 * NOTE! we rely on the previous spin_lock to
603 * lock interrupts for us! We can only be called with
604 * "siglock" held, and the local interrupt must
605 * have been disabled when that got acquired!
607 * No need to set need_resched since signal event passing
608 * goes through ->blocked
610 void signal_wake_up(struct task_struct *t, int resume)
614 set_tsk_thread_flag(t, TIF_SIGPENDING);
617 * For SIGKILL, we want to wake it up in the stopped/traced case.
618 * We don't check t->state here because there is a race with it
619 * executing another processor and just now entering stopped state.
620 * By using wake_up_state, we ensure the process will wake up and
621 * handle its death signal.
623 mask = TASK_INTERRUPTIBLE;
625 mask |= TASK_STOPPED | TASK_TRACED;
626 if (!wake_up_state(t, mask))
631 * Remove signals in mask from the pending set and queue.
632 * Returns 1 if any signals were found.
634 * All callers must be holding the siglock.
636 static int rm_from_queue(unsigned long mask, struct sigpending *s)
638 struct sigqueue *q, *n;
640 if (!sigtestsetmask(&s->signal, mask))
643 sigdelsetmask(&s->signal, mask);
644 list_for_each_entry_safe(q, n, &s->list, list) {
645 if (q->info.si_signo < SIGRTMIN &&
646 (mask & sigmask(q->info.si_signo))) {
647 list_del_init(&q->list);
655 * Bad permissions for sending the signal
657 static int check_kill_permission(int sig, struct siginfo *info,
658 struct task_struct *t)
663 if (!valid_signal(sig))
666 user = (!info || ((unsigned long)info != 1 &&
667 (unsigned long)info != 2 && SI_FROMUSER(info)));
670 if (user && ((sig != SIGCONT) ||
671 (current->signal->session != t->signal->session))
672 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
673 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
674 && !capable(CAP_KILL))
678 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
681 error = security_task_kill(t, info, sig);
683 audit_signal_info(sig, t); /* Let audit system see the signal */
688 static void do_notify_parent_cldstop(struct task_struct *tsk,
689 struct task_struct *parent,
693 * Handle magic process-wide effects of stop/continue signals.
694 * Unlike the signal actions, these happen immediately at signal-generation
695 * time regardless of blocking, ignoring, or handling. This does the
696 * actual continuing for SIGCONT, but not the actual stopping for stop
697 * signals. The process stop is done as a signal action for SIG_DFL.
699 static void handle_stop_signal(int sig, struct task_struct *p)
701 struct task_struct *t;
703 if (p->flags & SIGNAL_GROUP_EXIT)
705 * The process is in the middle of dying already.
709 if (sig_kernel_stop(sig)) {
711 * This is a stop signal. Remove SIGCONT from all queues.
713 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
716 rm_from_queue(sigmask(SIGCONT), &t->pending);
719 } else if (sig == SIGCONT) {
721 * Remove all stop signals from all queues,
722 * and wake all threads.
724 if (unlikely(p->signal->group_stop_count > 0)) {
726 * There was a group stop in progress. We'll
727 * pretend it finished before we got here. We are
728 * obliged to report it to the parent: if the
729 * SIGSTOP happened "after" this SIGCONT, then it
730 * would have cleared this pending SIGCONT. If it
731 * happened "before" this SIGCONT, then the parent
732 * got the SIGCHLD about the stop finishing before
733 * the continue happened. We do the notification
734 * now, and it's as if the stop had finished and
735 * the SIGCHLD was pending on entry to this kill.
737 p->signal->group_stop_count = 0;
738 p->signal->flags = SIGNAL_STOP_CONTINUED;
739 spin_unlock(&p->sighand->siglock);
740 if (p->ptrace & PT_PTRACED)
741 do_notify_parent_cldstop(p, p->parent,
744 do_notify_parent_cldstop(
746 p->group_leader->real_parent,
748 spin_lock(&p->sighand->siglock);
750 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
754 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
757 * If there is a handler for SIGCONT, we must make
758 * sure that no thread returns to user mode before
759 * we post the signal, in case it was the only
760 * thread eligible to run the signal handler--then
761 * it must not do anything between resuming and
762 * running the handler. With the TIF_SIGPENDING
763 * flag set, the thread will pause and acquire the
764 * siglock that we hold now and until we've queued
765 * the pending signal.
767 * Wake up the stopped thread _after_ setting
770 state = TASK_STOPPED;
771 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
772 set_tsk_thread_flag(t, TIF_SIGPENDING);
773 state |= TASK_INTERRUPTIBLE;
775 wake_up_state(t, state);
780 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
782 * We were in fact stopped, and are now continued.
783 * Notify the parent with CLD_CONTINUED.
785 p->signal->flags = SIGNAL_STOP_CONTINUED;
786 p->signal->group_exit_code = 0;
787 spin_unlock(&p->sighand->siglock);
788 if (p->ptrace & PT_PTRACED)
789 do_notify_parent_cldstop(p, p->parent,
792 do_notify_parent_cldstop(
794 p->group_leader->real_parent,
796 spin_lock(&p->sighand->siglock);
799 * We are not stopped, but there could be a stop
800 * signal in the middle of being processed after
801 * being removed from the queue. Clear that too.
803 p->signal->flags = 0;
805 } else if (sig == SIGKILL) {
807 * Make sure that any pending stop signal already dequeued
808 * is undone by the wakeup for SIGKILL.
810 p->signal->flags = 0;
814 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
815 struct sigpending *signals)
817 struct sigqueue * q = NULL;
821 * fast-pathed signals for kernel-internal things like SIGSTOP
824 if ((unsigned long)info == 2)
827 /* Real-time signals must be queued if sent by sigqueue, or
828 some other real-time mechanism. It is implementation
829 defined whether kill() does so. We attempt to do so, on
830 the principle of least surprise, but since kill is not
831 allowed to fail with EAGAIN when low on memory we just
832 make sure at least one signal gets delivered and don't
833 pass on the info struct. */
835 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
836 ((unsigned long) info < 2 ||
837 info->si_code >= 0)));
839 list_add_tail(&q->list, &signals->list);
840 switch ((unsigned long) info) {
842 q->info.si_signo = sig;
843 q->info.si_errno = 0;
844 q->info.si_code = SI_USER;
845 q->info.si_pid = current->pid;
846 q->info.si_uid = current->uid;
849 q->info.si_signo = sig;
850 q->info.si_errno = 0;
851 q->info.si_code = SI_KERNEL;
856 copy_siginfo(&q->info, info);
860 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
861 && info->si_code != SI_USER)
863 * Queue overflow, abort. We may abort if the signal was rt
864 * and sent by user using something other than kill().
867 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
869 * Set up a return to indicate that we dropped
872 ret = info->si_sys_private;
876 sigaddset(&signals->signal, sig);
880 #define LEGACY_QUEUE(sigptr, sig) \
881 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
885 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
889 if (!irqs_disabled())
891 assert_spin_locked(&t->sighand->siglock);
893 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
895 * Set up a return to indicate that we dropped the signal.
897 ret = info->si_sys_private;
899 /* Short-circuit ignored signals. */
900 if (sig_ignored(t, sig))
903 /* Support queueing exactly one non-rt signal, so that we
904 can get more detailed information about the cause of
906 if (LEGACY_QUEUE(&t->pending, sig))
909 ret = send_signal(sig, info, t, &t->pending);
910 if (!ret && !sigismember(&t->blocked, sig))
911 signal_wake_up(t, sig == SIGKILL);
917 * Force a signal that the process can't ignore: if necessary
918 * we unblock the signal and change any SIG_IGN to SIG_DFL.
922 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
924 unsigned long int flags;
927 spin_lock_irqsave(&t->sighand->siglock, flags);
928 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
929 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
930 sigdelset(&t->blocked, sig);
931 recalc_sigpending_tsk(t);
933 ret = specific_send_sig_info(sig, info, t);
934 spin_unlock_irqrestore(&t->sighand->siglock, flags);
940 force_sig_specific(int sig, struct task_struct *t)
942 unsigned long int flags;
944 spin_lock_irqsave(&t->sighand->siglock, flags);
945 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
946 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
947 sigdelset(&t->blocked, sig);
948 recalc_sigpending_tsk(t);
949 specific_send_sig_info(sig, (void *)2, t);
950 spin_unlock_irqrestore(&t->sighand->siglock, flags);
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
961 #define wants_signal(sig, p, mask) \
962 (!sigismember(&(p)->blocked, sig) \
963 && !((p)->state & mask) \
964 && !((p)->flags & PF_EXITING) \
965 && (task_curr(p) || !signal_pending(p)))
969 __group_complete_signal(int sig, struct task_struct *p)
972 struct task_struct *t;
975 * Don't bother traced and stopped tasks (but
976 * SIGKILL will punch through that).
978 mask = TASK_STOPPED | TASK_TRACED;
983 * Now find a thread we can wake up to take the signal off the queue.
985 * If the main thread wants the signal, it gets first crack.
986 * Probably the least surprising to the average bear.
988 if (wants_signal(sig, p, mask))
990 else if (thread_group_empty(p))
992 * There is just one thread and it does not need to be woken.
993 * It will dequeue unblocked signals before it runs again.
998 * Otherwise try to find a suitable thread.
1000 t = p->signal->curr_target;
1002 /* restart balancing at this thread */
1003 t = p->signal->curr_target = p;
1004 BUG_ON(t->tgid != p->tgid);
1006 while (!wants_signal(sig, t, mask)) {
1008 if (t == p->signal->curr_target)
1010 * No thread needs to be woken.
1011 * Any eligible threads will see
1012 * the signal in the queue soon.
1016 p->signal->curr_target = t;
1020 * Found a killable thread. If the signal will be fatal,
1021 * then start taking the whole group down immediately.
1023 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1024 !sigismember(&t->real_blocked, sig) &&
1025 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1027 * This signal will be fatal to the whole group.
1029 if (!sig_kernel_coredump(sig)) {
1031 * Start a group exit and wake everybody up.
1032 * This way we don't have other threads
1033 * running and doing things after a slower
1034 * thread has the fatal signal pending.
1036 p->signal->flags = SIGNAL_GROUP_EXIT;
1037 p->signal->group_exit_code = sig;
1038 p->signal->group_stop_count = 0;
1041 sigaddset(&t->pending.signal, SIGKILL);
1042 signal_wake_up(t, 1);
1049 * There will be a core dump. We make all threads other
1050 * than the chosen one go into a group stop so that nothing
1051 * happens until it gets scheduled, takes the signal off
1052 * the shared queue, and does the core dump. This is a
1053 * little more complicated than strictly necessary, but it
1054 * keeps the signal state that winds up in the core dump
1055 * unchanged from the death state, e.g. which thread had
1056 * the core-dump signal unblocked.
1058 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1059 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1060 p->signal->group_stop_count = 0;
1061 p->signal->group_exit_task = t;
1064 p->signal->group_stop_count++;
1065 signal_wake_up(t, 0);
1068 wake_up_process(p->signal->group_exit_task);
1073 * The signal is already in the shared-pending queue.
1074 * Tell the chosen thread to wake up and dequeue it.
1076 signal_wake_up(t, sig == SIGKILL);
1081 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1085 assert_spin_locked(&p->sighand->siglock);
1086 handle_stop_signal(sig, p);
1088 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1090 * Set up a return to indicate that we dropped the signal.
1092 ret = info->si_sys_private;
1094 /* Short-circuit ignored signals. */
1095 if (sig_ignored(p, sig))
1098 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1099 /* This is a non-RT signal and we already have one queued. */
1103 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1104 * We always use the shared queue for process-wide signals,
1105 * to avoid several races.
1107 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1111 __group_complete_signal(sig, p);
1116 * Nuke all other threads in the group.
1118 void zap_other_threads(struct task_struct *p)
1120 struct task_struct *t;
1122 p->signal->flags = SIGNAL_GROUP_EXIT;
1123 p->signal->group_stop_count = 0;
1125 if (thread_group_empty(p))
1128 for (t = next_thread(p); t != p; t = next_thread(t)) {
1130 * Don't bother with already dead threads
1136 * We don't want to notify the parent, since we are
1137 * killed as part of a thread group due to another
1138 * thread doing an execve() or similar. So set the
1139 * exit signal to -1 to allow immediate reaping of
1140 * the process. But don't detach the thread group
1143 if (t != p->group_leader)
1144 t->exit_signal = -1;
1146 sigaddset(&t->pending.signal, SIGKILL);
1147 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1148 signal_wake_up(t, 1);
1153 * Must be called with the tasklist_lock held for reading!
1155 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1157 unsigned long flags;
1160 ret = check_kill_permission(sig, info, p);
1161 if (!ret && sig && p->sighand) {
1162 spin_lock_irqsave(&p->sighand->siglock, flags);
1163 ret = __group_send_sig_info(sig, info, p);
1164 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1171 * kill_pg_info() sends a signal to a process group: this is what the tty
1172 * control characters do (^C, ^Z etc)
1175 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1177 struct task_struct *p = NULL;
1178 int retval, success;
1185 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1186 int err = group_send_sig_info(sig, info, p);
1189 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1190 return success ? 0 : retval;
1194 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1198 read_lock(&tasklist_lock);
1199 retval = __kill_pg_info(sig, info, pgrp);
1200 read_unlock(&tasklist_lock);
1206 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1209 struct task_struct *p;
1211 read_lock(&tasklist_lock);
1212 p = find_task_by_pid(pid);
1215 error = group_send_sig_info(sig, info, p);
1216 read_unlock(&tasklist_lock);
1220 int print_fatal_signals = 0;
1222 static void print_fatal_signal(struct pt_regs *regs, int signr)
1224 printk("%s/%d: potentially unexpected fatal signal %d.\n",
1225 current->comm, current->pid, signr);
1228 printk("code at %08lx: ", regs->eip);
1231 for (i = 0; i < 16; i++) {
1234 __get_user(insn, (unsigned char *)(regs->eip + i));
1235 printk("%02x ", insn);
1243 static int __init setup_print_fatal_signals(char *str)
1245 get_option (&str, &print_fatal_signals);
1250 __setup("print-fatal-signals=", setup_print_fatal_signals);
1253 * kill_something_info() interprets pid in interesting ways just like kill(2).
1255 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1256 * is probably wrong. Should make it like BSD or SYSV.
1259 static int kill_something_info(int sig, struct siginfo *info, int pid)
1262 return kill_pg_info(sig, info, process_group(current));
1263 } else if (pid == -1) {
1264 int retval = 0, count = 0;
1265 struct task_struct * p;
1267 read_lock(&tasklist_lock);
1268 for_each_process(p) {
1269 if (p->pid > 1 && p->tgid != current->tgid) {
1270 int err = group_send_sig_info(sig, info, p);
1276 read_unlock(&tasklist_lock);
1277 return count ? retval : -ESRCH;
1278 } else if (pid < 0) {
1279 return kill_pg_info(sig, info, -pid);
1281 return kill_proc_info(sig, info, pid);
1286 * These are for backward compatibility with the rest of the kernel source.
1290 * These two are the most common entry points. They send a signal
1291 * just to the specific thread.
1294 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1297 unsigned long flags;
1300 * Make sure legacy kernel users don't send in bad values
1301 * (normal paths check this in check_kill_permission).
1303 if (!valid_signal(sig))
1307 * We need the tasklist lock even for the specific
1308 * thread case (when we don't need to follow the group
1309 * lists) in order to avoid races with "p->sighand"
1310 * going away or changing from under us.
1312 read_lock(&tasklist_lock);
1313 spin_lock_irqsave(&p->sighand->siglock, flags);
1314 ret = specific_send_sig_info(sig, info, p);
1315 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1316 read_unlock(&tasklist_lock);
1321 send_sig(int sig, struct task_struct *p, int priv)
1323 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1327 * This is the entry point for "process-wide" signals.
1328 * They will go to an appropriate thread in the thread group.
1331 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1334 read_lock(&tasklist_lock);
1335 ret = group_send_sig_info(sig, info, p);
1336 read_unlock(&tasklist_lock);
1341 force_sig(int sig, struct task_struct *p)
1343 force_sig_info(sig, (void*)1L, p);
1347 * When things go south during signal handling, we
1348 * will force a SIGSEGV. And if the signal that caused
1349 * the problem was already a SIGSEGV, we'll want to
1350 * make sure we don't even try to deliver the signal..
1353 force_sigsegv(int sig, struct task_struct *p)
1355 if (sig == SIGSEGV) {
1356 unsigned long flags;
1357 spin_lock_irqsave(&p->sighand->siglock, flags);
1358 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1359 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1361 force_sig(SIGSEGV, p);
1366 kill_pg(pid_t pgrp, int sig, int priv)
1368 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1372 kill_proc(pid_t pid, int sig, int priv)
1374 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1378 * These functions support sending signals using preallocated sigqueue
1379 * structures. This is needed "because realtime applications cannot
1380 * afford to lose notifications of asynchronous events, like timer
1381 * expirations or I/O completions". In the case of Posix Timers
1382 * we allocate the sigqueue structure from the timer_create. If this
1383 * allocation fails we are able to report the failure to the application
1384 * with an EAGAIN error.
1387 struct sigqueue *sigqueue_alloc(void)
1391 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1392 q->flags |= SIGQUEUE_PREALLOC;
1396 void sigqueue_free(struct sigqueue *q)
1398 unsigned long flags;
1399 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1401 * If the signal is still pending remove it from the
1404 if (unlikely(!list_empty(&q->list))) {
1405 read_lock(&tasklist_lock);
1406 spin_lock_irqsave(q->lock, flags);
1407 if (!list_empty(&q->list))
1408 list_del_init(&q->list);
1409 spin_unlock_irqrestore(q->lock, flags);
1410 read_unlock(&tasklist_lock);
1412 q->flags &= ~SIGQUEUE_PREALLOC;
1417 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1419 unsigned long flags;
1423 * We need the tasklist lock even for the specific
1424 * thread case (when we don't need to follow the group
1425 * lists) in order to avoid races with "p->sighand"
1426 * going away or changing from under us.
1428 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1429 read_lock(&tasklist_lock);
1430 spin_lock_irqsave(&p->sighand->siglock, flags);
1432 if (unlikely(!list_empty(&q->list))) {
1434 * If an SI_TIMER entry is already queue just increment
1435 * the overrun count.
1437 if (q->info.si_code != SI_TIMER)
1439 q->info.si_overrun++;
1442 /* Short-circuit ignored signals. */
1443 if (sig_ignored(p, sig)) {
1448 q->lock = &p->sighand->siglock;
1449 list_add_tail(&q->list, &p->pending.list);
1450 sigaddset(&p->pending.signal, sig);
1451 if (!sigismember(&p->blocked, sig))
1452 signal_wake_up(p, sig == SIGKILL);
1455 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1456 read_unlock(&tasklist_lock);
1461 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1463 unsigned long flags;
1466 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1467 read_lock(&tasklist_lock);
1468 spin_lock_irqsave(&p->sighand->siglock, flags);
1469 handle_stop_signal(sig, p);
1471 /* Short-circuit ignored signals. */
1472 if (sig_ignored(p, sig)) {
1477 if (unlikely(!list_empty(&q->list))) {
1479 * If an SI_TIMER entry is already queue just increment
1480 * the overrun count. Other uses should not try to
1481 * send the signal multiple times.
1483 if (q->info.si_code != SI_TIMER)
1485 q->info.si_overrun++;
1490 * Put this signal on the shared-pending queue.
1491 * We always use the shared queue for process-wide signals,
1492 * to avoid several races.
1494 q->lock = &p->sighand->siglock;
1495 list_add_tail(&q->list, &p->signal->shared_pending.list);
1496 sigaddset(&p->signal->shared_pending.signal, sig);
1498 __group_complete_signal(sig, p);
1500 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1501 read_unlock(&tasklist_lock);
1506 * Wake up any threads in the parent blocked in wait* syscalls.
1508 static inline void __wake_up_parent(struct task_struct *p,
1509 struct task_struct *parent)
1511 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1515 * Let a parent know about the death of a child.
1516 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1519 void do_notify_parent(struct task_struct *tsk, int sig)
1521 struct siginfo info;
1522 unsigned long flags;
1523 struct sighand_struct *psig;
1527 /* do_notify_parent_cldstop should have been called instead. */
1528 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1530 BUG_ON(!tsk->ptrace &&
1531 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1533 info.si_signo = sig;
1535 info.si_pid = tsk->pid;
1536 info.si_uid = tsk->uid;
1538 /* FIXME: find out whether or not this is supposed to be c*time. */
1539 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1540 tsk->signal->utime));
1541 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1542 tsk->signal->stime));
1544 info.si_status = tsk->exit_code & 0x7f;
1545 if (tsk->exit_code & 0x80)
1546 info.si_code = CLD_DUMPED;
1547 else if (tsk->exit_code & 0x7f)
1548 info.si_code = CLD_KILLED;
1550 info.si_code = CLD_EXITED;
1551 info.si_status = tsk->exit_code >> 8;
1554 psig = tsk->parent->sighand;
1555 spin_lock_irqsave(&psig->siglock, flags);
1556 if (sig == SIGCHLD &&
1557 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1558 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1560 * We are exiting and our parent doesn't care. POSIX.1
1561 * defines special semantics for setting SIGCHLD to SIG_IGN
1562 * or setting the SA_NOCLDWAIT flag: we should be reaped
1563 * automatically and not left for our parent's wait4 call.
1564 * Rather than having the parent do it as a magic kind of
1565 * signal handler, we just set this to tell do_exit that we
1566 * can be cleaned up without becoming a zombie. Note that
1567 * we still call __wake_up_parent in this case, because a
1568 * blocked sys_wait4 might now return -ECHILD.
1570 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1571 * is implementation-defined: we do (if you don't want
1572 * it, just use SIG_IGN instead).
1574 tsk->exit_signal = -1;
1575 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1578 if (valid_signal(sig) && sig > 0)
1579 __group_send_sig_info(sig, &info, tsk->parent);
1580 __wake_up_parent(tsk, tsk->parent);
1581 spin_unlock_irqrestore(&psig->siglock, flags);
1585 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1588 struct siginfo info;
1589 unsigned long flags;
1590 struct sighand_struct *sighand;
1592 info.si_signo = SIGCHLD;
1594 info.si_pid = tsk->pid;
1595 info.si_uid = tsk->uid;
1597 /* FIXME: find out whether or not this is supposed to be c*time. */
1598 info.si_utime = cputime_to_jiffies(tsk->utime);
1599 info.si_stime = cputime_to_jiffies(tsk->stime);
1604 info.si_status = SIGCONT;
1607 info.si_status = tsk->signal->group_exit_code & 0x7f;
1610 info.si_status = tsk->exit_code & 0x7f;
1616 sighand = parent->sighand;
1617 spin_lock_irqsave(&sighand->siglock, flags);
1618 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1619 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1620 __group_send_sig_info(SIGCHLD, &info, parent);
1622 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1624 __wake_up_parent(tsk, parent);
1625 spin_unlock_irqrestore(&sighand->siglock, flags);
1629 * This must be called with current->sighand->siglock held.
1631 * This should be the path for all ptrace stops.
1632 * We always set current->last_siginfo while stopped here.
1633 * That makes it a way to test a stopped process for
1634 * being ptrace-stopped vs being job-control-stopped.
1636 * If we actually decide not to stop at all because the tracer is gone,
1637 * we leave nostop_code in current->exit_code.
1639 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1642 * If there is a group stop in progress,
1643 * we must participate in the bookkeeping.
1645 if (current->signal->group_stop_count > 0)
1646 --current->signal->group_stop_count;
1648 current->last_siginfo = info;
1649 current->exit_code = exit_code;
1651 /* Let the debugger run. */
1652 set_current_state(TASK_TRACED);
1653 spin_unlock_irq(¤t->sighand->siglock);
1654 read_lock(&tasklist_lock);
1655 if (likely(current->ptrace & PT_PTRACED) &&
1656 likely(current->parent != current->real_parent ||
1657 !(current->ptrace & PT_ATTACHED)) &&
1658 (likely(current->parent->signal != current->signal) ||
1659 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1660 do_notify_parent_cldstop(current, current->parent,
1662 read_unlock(&tasklist_lock);
1666 * By the time we got the lock, our tracer went away.
1669 read_unlock(&tasklist_lock);
1670 set_current_state(TASK_RUNNING);
1671 current->exit_code = nostop_code;
1675 * We are back. Now reacquire the siglock before touching
1676 * last_siginfo, so that we are sure to have synchronized with
1677 * any signal-sending on another CPU that wants to examine it.
1679 spin_lock_irq(¤t->sighand->siglock);
1680 current->last_siginfo = NULL;
1683 * Queued signals ignored us while we were stopped for tracing.
1684 * So check for any that we should take before resuming user mode.
1686 recalc_sigpending();
1689 void ptrace_notify(int exit_code)
1693 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1695 memset(&info, 0, sizeof info);
1696 info.si_signo = SIGTRAP;
1697 info.si_code = exit_code;
1698 info.si_pid = current->pid;
1699 info.si_uid = current->uid;
1701 /* Let the debugger run. */
1702 spin_lock_irq(¤t->sighand->siglock);
1703 ptrace_stop(exit_code, 0, &info);
1704 spin_unlock_irq(¤t->sighand->siglock);
1708 finish_stop(int stop_count)
1711 * If there are no other threads in the group, or if there is
1712 * a group stop in progress and we are the last to stop,
1713 * report to the parent. When ptraced, every thread reports itself.
1715 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1716 read_lock(&tasklist_lock);
1717 do_notify_parent_cldstop(current, current->parent,
1719 read_unlock(&tasklist_lock);
1721 else if (stop_count == 0) {
1722 read_lock(&tasklist_lock);
1723 do_notify_parent_cldstop(current->group_leader,
1724 current->group_leader->real_parent,
1726 read_unlock(&tasklist_lock);
1731 * Now we don't run again until continued.
1733 current->exit_code = 0;
1737 * This performs the stopping for SIGSTOP and other stop signals.
1738 * We have to stop all threads in the thread group.
1739 * Returns nonzero if we've actually stopped and released the siglock.
1740 * Returns zero if we didn't stop and still hold the siglock.
1743 do_signal_stop(int signr)
1745 struct signal_struct *sig = current->signal;
1746 struct sighand_struct *sighand = current->sighand;
1747 int stop_count = -1;
1749 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1752 if (sig->group_stop_count > 0) {
1754 * There is a group stop in progress. We don't need to
1755 * start another one.
1757 signr = sig->group_exit_code;
1758 stop_count = --sig->group_stop_count;
1759 current->exit_code = signr;
1760 set_current_state(TASK_STOPPED);
1761 if (stop_count == 0)
1762 sig->flags = SIGNAL_STOP_STOPPED;
1763 spin_unlock_irq(&sighand->siglock);
1765 else if (thread_group_empty(current)) {
1767 * Lock must be held through transition to stopped state.
1769 current->exit_code = current->signal->group_exit_code = signr;
1770 set_current_state(TASK_STOPPED);
1771 sig->flags = SIGNAL_STOP_STOPPED;
1772 spin_unlock_irq(&sighand->siglock);
1776 * There is no group stop already in progress.
1777 * We must initiate one now, but that requires
1778 * dropping siglock to get both the tasklist lock
1779 * and siglock again in the proper order. Note that
1780 * this allows an intervening SIGCONT to be posted.
1781 * We need to check for that and bail out if necessary.
1783 struct task_struct *t;
1785 spin_unlock_irq(&sighand->siglock);
1787 /* signals can be posted during this window */
1789 read_lock(&tasklist_lock);
1790 spin_lock_irq(&sighand->siglock);
1792 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1794 * Another stop or continue happened while we
1795 * didn't have the lock. We can just swallow this
1796 * signal now. If we raced with a SIGCONT, that
1797 * should have just cleared it now. If we raced
1798 * with another processor delivering a stop signal,
1799 * then the SIGCONT that wakes us up should clear it.
1801 read_unlock(&tasklist_lock);
1805 if (sig->group_stop_count == 0) {
1806 sig->group_exit_code = signr;
1808 for (t = next_thread(current); t != current;
1811 * Setting state to TASK_STOPPED for a group
1812 * stop is always done with the siglock held,
1813 * so this check has no races.
1815 if (t->state < TASK_STOPPED) {
1817 signal_wake_up(t, 0);
1819 sig->group_stop_count = stop_count;
1822 /* A race with another thread while unlocked. */
1823 signr = sig->group_exit_code;
1824 stop_count = --sig->group_stop_count;
1827 current->exit_code = signr;
1828 set_current_state(TASK_STOPPED);
1829 if (stop_count == 0)
1830 sig->flags = SIGNAL_STOP_STOPPED;
1832 spin_unlock_irq(&sighand->siglock);
1833 read_unlock(&tasklist_lock);
1836 finish_stop(stop_count);
1841 * Do appropriate magic when group_stop_count > 0.
1842 * We return nonzero if we stopped, after releasing the siglock.
1843 * We return zero if we still hold the siglock and should look
1844 * for another signal without checking group_stop_count again.
1846 static inline int handle_group_stop(void)
1850 if (current->signal->group_exit_task == current) {
1852 * Group stop is so we can do a core dump,
1853 * We are the initiating thread, so get on with it.
1855 current->signal->group_exit_task = NULL;
1859 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1861 * Group stop is so another thread can do a core dump,
1862 * or else we are racing against a death signal.
1863 * Just punt the stop so we can get the next signal.
1868 * There is a group stop in progress. We stop
1869 * without any associated signal being in our queue.
1871 stop_count = --current->signal->group_stop_count;
1872 if (stop_count == 0)
1873 current->signal->flags = SIGNAL_STOP_STOPPED;
1874 current->exit_code = current->signal->group_exit_code;
1875 set_current_state(TASK_STOPPED);
1876 spin_unlock_irq(¤t->sighand->siglock);
1877 finish_stop(stop_count);
1881 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1882 struct pt_regs *regs, void *cookie)
1884 sigset_t *mask = ¤t->blocked;
1888 spin_lock_irq(¤t->sighand->siglock);
1890 struct k_sigaction *ka;
1892 if (unlikely(current->signal->group_stop_count > 0) &&
1893 handle_group_stop())
1896 signr = dequeue_signal(current, mask, info);
1899 break; /* will return 0 */
1901 if ((signr == SIGSEGV) && print_fatal_signals) {
1902 spin_unlock_irq(¤t->sighand->siglock);
1903 print_fatal_signal(regs, signr);
1904 spin_lock_irq(¤t->sighand->siglock);
1906 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1907 ptrace_signal_deliver(regs, cookie);
1909 /* Let the debugger run. */
1910 ptrace_stop(signr, signr, info);
1912 /* We're back. Did the debugger cancel the sig? */
1913 signr = current->exit_code;
1917 current->exit_code = 0;
1919 /* Update the siginfo structure if the signal has
1920 changed. If the debugger wanted something
1921 specific in the siginfo structure then it should
1922 have updated *info via PTRACE_SETSIGINFO. */
1923 if (signr != info->si_signo) {
1924 info->si_signo = signr;
1926 info->si_code = SI_USER;
1927 info->si_pid = current->parent->pid;
1928 info->si_uid = current->parent->uid;
1931 /* If the (new) signal is now blocked, requeue it. */
1932 if (sigismember(¤t->blocked, signr)) {
1933 specific_send_sig_info(signr, info, current);
1938 ka = ¤t->sighand->action[signr-1];
1939 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1941 if (ka->sa.sa_handler != SIG_DFL) {
1942 /* Run the handler. */
1945 if (ka->sa.sa_flags & SA_ONESHOT)
1946 ka->sa.sa_handler = SIG_DFL;
1948 break; /* will return non-zero "signr" value */
1952 * Now we are doing the default action for this signal.
1954 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1957 /* Init gets no signals it doesn't want. */
1958 if (current->pid == 1)
1961 /* virtual init is protected against user signals */
1962 if ((info->si_code == SI_USER) &&
1963 vx_current_initpid(current->pid))
1966 if (sig_kernel_stop(signr)) {
1968 * The default action is to stop all threads in
1969 * the thread group. The job control signals
1970 * do nothing in an orphaned pgrp, but SIGSTOP
1971 * always works. Note that siglock needs to be
1972 * dropped during the call to is_orphaned_pgrp()
1973 * because of lock ordering with tasklist_lock.
1974 * This allows an intervening SIGCONT to be posted.
1975 * We need to check for that and bail out if necessary.
1977 if (signr != SIGSTOP) {
1978 spin_unlock_irq(¤t->sighand->siglock);
1980 /* signals can be posted during this window */
1982 if (is_orphaned_pgrp(process_group(current)))
1985 spin_lock_irq(¤t->sighand->siglock);
1988 if (likely(do_signal_stop(signr))) {
1989 /* It released the siglock. */
1994 * We didn't actually stop, due to a race
1995 * with SIGCONT or something like that.
2000 spin_unlock_irq(¤t->sighand->siglock);
2003 * Anything else is fatal, maybe with a core dump.
2005 current->flags |= PF_SIGNALED;
2006 if (print_fatal_signals)
2007 print_fatal_signal(regs, signr);
2008 if (sig_kernel_coredump(signr)) {
2010 * If it was able to dump core, this kills all
2011 * other threads in the group and synchronizes with
2012 * their demise. If we lost the race with another
2013 * thread getting here, it set group_exit_code
2014 * first and our do_group_exit call below will use
2015 * that value and ignore the one we pass it.
2017 do_coredump((long)signr, signr, regs);
2021 * Death signals, no core dump.
2023 do_group_exit(signr);
2026 spin_unlock_irq(¤t->sighand->siglock);
2030 EXPORT_SYMBOL(recalc_sigpending);
2031 EXPORT_SYMBOL_GPL(dequeue_signal);
2032 EXPORT_SYMBOL(flush_signals);
2033 EXPORT_SYMBOL(force_sig);
2034 EXPORT_SYMBOL(kill_pg);
2035 EXPORT_SYMBOL(kill_proc);
2036 EXPORT_SYMBOL(ptrace_notify);
2037 EXPORT_SYMBOL(send_sig);
2038 EXPORT_SYMBOL(send_sig_info);
2039 EXPORT_SYMBOL(sigprocmask);
2040 EXPORT_SYMBOL(block_all_signals);
2041 EXPORT_SYMBOL(unblock_all_signals);
2044 * System call entry points.
2047 asmlinkage long sys_restart_syscall(void)
2049 struct restart_block *restart = ¤t_thread_info()->restart_block;
2050 return restart->fn(restart);
2053 long do_no_restart_syscall(struct restart_block *param)
2059 * We don't need to get the kernel lock - this is all local to this
2060 * particular thread.. (and that's good, because this is _heavily_
2061 * used by various programs)
2065 * This is also useful for kernel threads that want to temporarily
2066 * (or permanently) block certain signals.
2068 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2069 * interface happily blocks "unblockable" signals like SIGKILL
2072 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2077 spin_lock_irq(¤t->sighand->siglock);
2078 old_block = current->blocked;
2082 sigorsets(¤t->blocked, ¤t->blocked, set);
2085 signandsets(¤t->blocked, ¤t->blocked, set);
2088 current->blocked = *set;
2093 recalc_sigpending();
2094 spin_unlock_irq(¤t->sighand->siglock);
2096 *oldset = old_block;
2101 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2103 int error = -EINVAL;
2104 sigset_t old_set, new_set;
2106 /* XXX: Don't preclude handling different sized sigset_t's. */
2107 if (sigsetsize != sizeof(sigset_t))
2112 if (copy_from_user(&new_set, set, sizeof(*set)))
2114 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2116 error = sigprocmask(how, &new_set, &old_set);
2122 spin_lock_irq(¤t->sighand->siglock);
2123 old_set = current->blocked;
2124 spin_unlock_irq(¤t->sighand->siglock);
2128 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2136 long do_sigpending(void __user *set, unsigned long sigsetsize)
2138 long error = -EINVAL;
2141 if (sigsetsize > sizeof(sigset_t))
2144 spin_lock_irq(¤t->sighand->siglock);
2145 sigorsets(&pending, ¤t->pending.signal,
2146 ¤t->signal->shared_pending.signal);
2147 spin_unlock_irq(¤t->sighand->siglock);
2149 /* Outside the lock because only this thread touches it. */
2150 sigandsets(&pending, ¤t->blocked, &pending);
2153 if (!copy_to_user(set, &pending, sigsetsize))
2161 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2163 return do_sigpending(set, sigsetsize);
2166 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2168 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2172 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2174 if (from->si_code < 0)
2175 return __copy_to_user(to, from, sizeof(siginfo_t))
2178 * If you change siginfo_t structure, please be sure
2179 * this code is fixed accordingly.
2180 * It should never copy any pad contained in the structure
2181 * to avoid security leaks, but must copy the generic
2182 * 3 ints plus the relevant union member.
2184 err = __put_user(from->si_signo, &to->si_signo);
2185 err |= __put_user(from->si_errno, &to->si_errno);
2186 err |= __put_user((short)from->si_code, &to->si_code);
2187 switch (from->si_code & __SI_MASK) {
2189 err |= __put_user(from->si_pid, &to->si_pid);
2190 err |= __put_user(from->si_uid, &to->si_uid);
2193 err |= __put_user(from->si_tid, &to->si_tid);
2194 err |= __put_user(from->si_overrun, &to->si_overrun);
2195 err |= __put_user(from->si_ptr, &to->si_ptr);
2198 err |= __put_user(from->si_band, &to->si_band);
2199 err |= __put_user(from->si_fd, &to->si_fd);
2202 err |= __put_user(from->si_addr, &to->si_addr);
2203 #ifdef __ARCH_SI_TRAPNO
2204 err |= __put_user(from->si_trapno, &to->si_trapno);
2208 err |= __put_user(from->si_pid, &to->si_pid);
2209 err |= __put_user(from->si_uid, &to->si_uid);
2210 err |= __put_user(from->si_status, &to->si_status);
2211 err |= __put_user(from->si_utime, &to->si_utime);
2212 err |= __put_user(from->si_stime, &to->si_stime);
2214 case __SI_RT: /* This is not generated by the kernel as of now. */
2215 case __SI_MESGQ: /* But this is */
2216 err |= __put_user(from->si_pid, &to->si_pid);
2217 err |= __put_user(from->si_uid, &to->si_uid);
2218 err |= __put_user(from->si_ptr, &to->si_ptr);
2220 default: /* this is just in case for now ... */
2221 err |= __put_user(from->si_pid, &to->si_pid);
2222 err |= __put_user(from->si_uid, &to->si_uid);
2231 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2232 siginfo_t __user *uinfo,
2233 const struct timespec __user *uts,
2242 /* XXX: Don't preclude handling different sized sigset_t's. */
2243 if (sigsetsize != sizeof(sigset_t))
2246 if (copy_from_user(&these, uthese, sizeof(these)))
2250 * Invert the set of allowed signals to get those we
2253 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2257 if (copy_from_user(&ts, uts, sizeof(ts)))
2259 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2264 spin_lock_irq(¤t->sighand->siglock);
2265 sig = dequeue_signal(current, &these, &info);
2267 timeout = MAX_SCHEDULE_TIMEOUT;
2269 timeout = (timespec_to_jiffies(&ts)
2270 + (ts.tv_sec || ts.tv_nsec));
2273 /* None ready -- temporarily unblock those we're
2274 * interested while we are sleeping in so that we'll
2275 * be awakened when they arrive. */
2276 current->real_blocked = current->blocked;
2277 sigandsets(¤t->blocked, ¤t->blocked, &these);
2278 recalc_sigpending();
2279 spin_unlock_irq(¤t->sighand->siglock);
2281 current->state = TASK_INTERRUPTIBLE;
2282 timeout = schedule_timeout(timeout);
2284 if (current->flags & PF_FREEZE)
2285 refrigerator(PF_FREEZE);
2286 spin_lock_irq(¤t->sighand->siglock);
2287 sig = dequeue_signal(current, &these, &info);
2288 current->blocked = current->real_blocked;
2289 siginitset(¤t->real_blocked, 0);
2290 recalc_sigpending();
2293 spin_unlock_irq(¤t->sighand->siglock);
2298 if (copy_siginfo_to_user(uinfo, &info))
2311 sys_kill(int pid, int sig)
2313 struct siginfo info;
2315 info.si_signo = sig;
2317 info.si_code = SI_USER;
2318 info.si_pid = current->tgid;
2319 info.si_uid = current->uid;
2321 return kill_something_info(sig, &info, pid);
2325 * sys_tgkill - send signal to one specific thread
2326 * @tgid: the thread group ID of the thread
2327 * @pid: the PID of the thread
2328 * @sig: signal to be sent
2330 * This syscall also checks the tgid and returns -ESRCH even if the PID
2331 * exists but it's not belonging to the target process anymore. This
2332 * method solves the problem of threads exiting and PIDs getting reused.
2334 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2336 struct siginfo info;
2338 struct task_struct *p;
2340 /* This is only valid for single tasks */
2341 if (pid <= 0 || tgid <= 0)
2344 info.si_signo = sig;
2346 info.si_code = SI_TKILL;
2347 info.si_pid = current->tgid;
2348 info.si_uid = current->uid;
2350 read_lock(&tasklist_lock);
2351 p = find_task_by_pid(pid);
2353 if (p && (p->tgid == tgid)) {
2354 error = check_kill_permission(sig, &info, p);
2356 * The null signal is a permissions and process existence
2357 * probe. No signal is actually delivered.
2359 if (!error && sig && p->sighand) {
2360 spin_lock_irq(&p->sighand->siglock);
2361 handle_stop_signal(sig, p);
2362 error = specific_send_sig_info(sig, &info, p);
2363 spin_unlock_irq(&p->sighand->siglock);
2366 read_unlock(&tasklist_lock);
2371 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2374 sys_tkill(int pid, int sig)
2376 struct siginfo info;
2378 struct task_struct *p;
2380 /* This is only valid for single tasks */
2384 info.si_signo = sig;
2386 info.si_code = SI_TKILL;
2387 info.si_pid = current->tgid;
2388 info.si_uid = current->uid;
2390 read_lock(&tasklist_lock);
2391 p = find_task_by_pid(pid);
2394 error = check_kill_permission(sig, &info, p);
2396 * The null signal is a permissions and process existence
2397 * probe. No signal is actually delivered.
2399 if (!error && sig && p->sighand) {
2400 spin_lock_irq(&p->sighand->siglock);
2401 handle_stop_signal(sig, p);
2402 error = specific_send_sig_info(sig, &info, p);
2403 spin_unlock_irq(&p->sighand->siglock);
2406 read_unlock(&tasklist_lock);
2411 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2415 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2418 /* Not even root can pretend to send signals from the kernel.
2419 Nor can they impersonate a kill(), which adds source info. */
2420 if (info.si_code >= 0)
2422 info.si_signo = sig;
2424 /* POSIX.1b doesn't mention process groups. */
2425 return kill_proc_info(sig, &info, pid);
2429 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2431 struct k_sigaction *k;
2433 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2436 k = ¤t->sighand->action[sig-1];
2438 spin_lock_irq(¤t->sighand->siglock);
2439 if (signal_pending(current)) {
2441 * If there might be a fatal signal pending on multiple
2442 * threads, make sure we take it before changing the action.
2444 spin_unlock_irq(¤t->sighand->siglock);
2445 return -ERESTARTNOINTR;
2454 * "Setting a signal action to SIG_IGN for a signal that is
2455 * pending shall cause the pending signal to be discarded,
2456 * whether or not it is blocked."
2458 * "Setting a signal action to SIG_DFL for a signal that is
2459 * pending and whose default action is to ignore the signal
2460 * (for example, SIGCHLD), shall cause the pending signal to
2461 * be discarded, whether or not it is blocked"
2463 if (act->sa.sa_handler == SIG_IGN ||
2464 (act->sa.sa_handler == SIG_DFL &&
2465 sig_kernel_ignore(sig))) {
2467 * This is a fairly rare case, so we only take the
2468 * tasklist_lock once we're sure we'll need it.
2469 * Now we must do this little unlock and relock
2470 * dance to maintain the lock hierarchy.
2472 struct task_struct *t = current;
2473 spin_unlock_irq(&t->sighand->siglock);
2474 read_lock(&tasklist_lock);
2475 spin_lock_irq(&t->sighand->siglock);
2477 sigdelsetmask(&k->sa.sa_mask,
2478 sigmask(SIGKILL) | sigmask(SIGSTOP));
2479 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2481 rm_from_queue(sigmask(sig), &t->pending);
2482 recalc_sigpending_tsk(t);
2484 } while (t != current);
2485 spin_unlock_irq(¤t->sighand->siglock);
2486 read_unlock(&tasklist_lock);
2491 sigdelsetmask(&k->sa.sa_mask,
2492 sigmask(SIGKILL) | sigmask(SIGSTOP));
2495 spin_unlock_irq(¤t->sighand->siglock);
2500 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2506 oss.ss_sp = (void __user *) current->sas_ss_sp;
2507 oss.ss_size = current->sas_ss_size;
2508 oss.ss_flags = sas_ss_flags(sp);
2517 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2518 || __get_user(ss_sp, &uss->ss_sp)
2519 || __get_user(ss_flags, &uss->ss_flags)
2520 || __get_user(ss_size, &uss->ss_size))
2524 if (on_sig_stack(sp))
2530 * Note - this code used to test ss_flags incorrectly
2531 * old code may have been written using ss_flags==0
2532 * to mean ss_flags==SS_ONSTACK (as this was the only
2533 * way that worked) - this fix preserves that older
2536 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2539 if (ss_flags == SS_DISABLE) {
2544 if (ss_size < MINSIGSTKSZ)
2548 current->sas_ss_sp = (unsigned long) ss_sp;
2549 current->sas_ss_size = ss_size;
2554 if (copy_to_user(uoss, &oss, sizeof(oss)))
2563 #ifdef __ARCH_WANT_SYS_SIGPENDING
2566 sys_sigpending(old_sigset_t __user *set)
2568 return do_sigpending(set, sizeof(*set));
2573 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2574 /* Some platforms have their own version with special arguments others
2575 support only sys_rt_sigprocmask. */
2578 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2581 old_sigset_t old_set, new_set;
2585 if (copy_from_user(&new_set, set, sizeof(*set)))
2587 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2589 spin_lock_irq(¤t->sighand->siglock);
2590 old_set = current->blocked.sig[0];
2598 sigaddsetmask(¤t->blocked, new_set);
2601 sigdelsetmask(¤t->blocked, new_set);
2604 current->blocked.sig[0] = new_set;
2608 recalc_sigpending();
2609 spin_unlock_irq(¤t->sighand->siglock);
2615 old_set = current->blocked.sig[0];
2618 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2625 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2627 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2629 sys_rt_sigaction(int sig,
2630 const struct sigaction __user *act,
2631 struct sigaction __user *oact,
2634 struct k_sigaction new_sa, old_sa;
2637 /* XXX: Don't preclude handling different sized sigset_t's. */
2638 if (sigsetsize != sizeof(sigset_t))
2642 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2646 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2649 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2655 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2657 #ifdef __ARCH_WANT_SYS_SGETMASK
2660 * For backwards compatibility. Functionality superseded by sigprocmask.
2666 return current->blocked.sig[0];
2670 sys_ssetmask(int newmask)
2674 spin_lock_irq(¤t->sighand->siglock);
2675 old = current->blocked.sig[0];
2677 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2679 recalc_sigpending();
2680 spin_unlock_irq(¤t->sighand->siglock);
2684 #endif /* __ARCH_WANT_SGETMASK */
2686 #ifdef __ARCH_WANT_SYS_SIGNAL
2688 * For backwards compatibility. Functionality superseded by sigaction.
2690 asmlinkage unsigned long
2691 sys_signal(int sig, __sighandler_t handler)
2693 struct k_sigaction new_sa, old_sa;
2696 new_sa.sa.sa_handler = handler;
2697 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2699 ret = do_sigaction(sig, &new_sa, &old_sa);
2701 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2703 #endif /* __ARCH_WANT_SYS_SIGNAL */
2705 #ifdef __ARCH_WANT_SYS_PAUSE
2710 current->state = TASK_INTERRUPTIBLE;
2712 return -ERESTARTNOHAND;
2717 void __init signals_init(void)
2720 kmem_cache_create("sigqueue",
2721 sizeof(struct sigqueue),
2722 __alignof__(struct sigqueue),
2723 SLAB_PANIC, NULL, NULL);