2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <linux/vs_cvirt.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
36 * SLAB caches for signal bits.
39 static kmem_cache_t *sigqueue_cachep;
42 * In POSIX a signal is sent either to a specific thread (Linux task)
43 * or to the process as a whole (Linux thread group). How the signal
44 * is sent determines whether it's to one thread or the whole group,
45 * which determines which signal mask(s) are involved in blocking it
46 * from being delivered until later. When the signal is delivered,
47 * either it's caught or ignored by a user handler or it has a default
48 * effect that applies to the whole thread group (POSIX process).
50 * The possible effects an unblocked signal set to SIG_DFL can have are:
51 * ignore - Nothing Happens
52 * terminate - kill the process, i.e. all threads in the group,
53 * similar to exit_group. The group leader (only) reports
54 * WIFSIGNALED status to its parent.
55 * coredump - write a core dump file describing all threads using
56 * the same mm and then kill all those threads
57 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
59 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
60 * Other signals when not blocked and set to SIG_DFL behaves as follows.
61 * The job control signals also have other special effects.
63 * +--------------------+------------------+
64 * | POSIX signal | default action |
65 * +--------------------+------------------+
66 * | SIGHUP | terminate |
67 * | SIGINT | terminate |
68 * | SIGQUIT | coredump |
69 * | SIGILL | coredump |
70 * | SIGTRAP | coredump |
71 * | SIGABRT/SIGIOT | coredump |
72 * | SIGBUS | coredump |
73 * | SIGFPE | coredump |
74 * | SIGKILL | terminate(+) |
75 * | SIGUSR1 | terminate |
76 * | SIGSEGV | coredump |
77 * | SIGUSR2 | terminate |
78 * | SIGPIPE | terminate |
79 * | SIGALRM | terminate |
80 * | SIGTERM | terminate |
81 * | SIGCHLD | ignore |
82 * | SIGCONT | ignore(*) |
83 * | SIGSTOP | stop(*)(+) |
84 * | SIGTSTP | stop(*) |
85 * | SIGTTIN | stop(*) |
86 * | SIGTTOU | stop(*) |
88 * | SIGXCPU | coredump |
89 * | SIGXFSZ | coredump |
90 * | SIGVTALRM | terminate |
91 * | SIGPROF | terminate |
92 * | SIGPOLL/SIGIO | terminate |
93 * | SIGSYS/SIGUNUSED | coredump |
94 * | SIGSTKFLT | terminate |
95 * | SIGWINCH | ignore |
96 * | SIGPWR | terminate |
97 * | SIGRTMIN-SIGRTMAX | terminate |
98 * +--------------------+------------------+
99 * | non-POSIX signal | default action |
100 * +--------------------+------------------+
101 * | SIGEMT | coredump |
102 * +--------------------+------------------+
104 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
105 * (*) Special job control effects:
106 * When SIGCONT is sent, it resumes the process (all threads in the group)
107 * from TASK_STOPPED state and also clears any pending/queued stop signals
108 * (any of those marked with "stop(*)"). This happens regardless of blocking,
109 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
110 * any pending/queued SIGCONT signals; this happens regardless of blocking,
111 * catching, or ignored the stop signal, though (except for SIGSTOP) the
112 * default action of stopping the process may happen later or never.
116 #define M_SIGEMT M(SIGEMT)
121 #if SIGRTMIN > BITS_PER_LONG
122 #define M(sig) (1ULL << ((sig)-1))
124 #define M(sig) (1UL << ((sig)-1))
126 #define T(sig, mask) (M(sig) & (mask))
128 #define SIG_KERNEL_ONLY_MASK (\
129 M(SIGKILL) | M(SIGSTOP) )
131 #define SIG_KERNEL_STOP_MASK (\
132 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
134 #define SIG_KERNEL_COREDUMP_MASK (\
135 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
136 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
137 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
139 #define SIG_KERNEL_IGNORE_MASK (\
140 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
142 #define sig_kernel_only(sig) \
143 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
144 #define sig_kernel_coredump(sig) \
145 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
146 #define sig_kernel_ignore(sig) \
147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
148 #define sig_kernel_stop(sig) \
149 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
151 #define sig_user_defined(t, signr) \
152 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
153 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
155 #define sig_fatal(t, signr) \
156 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
157 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
159 static int sig_ignored(struct task_struct *t, int sig)
161 void __user * handler;
164 * Tracers always want to know about signals..
166 if (t->ptrace & PT_PTRACED)
170 * Blocked signals are never ignored, since the
171 * signal handler may change by the time it is
174 if (sigismember(&t->blocked, sig))
177 /* Is it explicitly or implicitly ignored? */
178 handler = t->sighand->action[sig-1].sa.sa_handler;
179 return handler == SIG_IGN ||
180 (handler == SIG_DFL && sig_kernel_ignore(sig));
184 * Re-calculate pending state from the set of locally pending
185 * signals, globally pending signals, and blocked signals.
187 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
192 switch (_NSIG_WORDS) {
194 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
195 ready |= signal->sig[i] &~ blocked->sig[i];
198 case 4: ready = signal->sig[3] &~ blocked->sig[3];
199 ready |= signal->sig[2] &~ blocked->sig[2];
200 ready |= signal->sig[1] &~ blocked->sig[1];
201 ready |= signal->sig[0] &~ blocked->sig[0];
204 case 2: ready = signal->sig[1] &~ blocked->sig[1];
205 ready |= signal->sig[0] &~ blocked->sig[0];
208 case 1: ready = signal->sig[0] &~ blocked->sig[0];
213 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
215 fastcall void recalc_sigpending_tsk(struct task_struct *t)
217 if (t->signal->group_stop_count > 0 ||
219 PENDING(&t->pending, &t->blocked) ||
220 PENDING(&t->signal->shared_pending, &t->blocked))
221 set_tsk_thread_flag(t, TIF_SIGPENDING);
223 clear_tsk_thread_flag(t, TIF_SIGPENDING);
226 void recalc_sigpending(void)
228 recalc_sigpending_tsk(current);
231 /* Given the mask, find the first available signal that should be serviced. */
234 next_signal(struct sigpending *pending, sigset_t *mask)
236 unsigned long i, *s, *m, x;
239 s = pending->signal.sig;
241 switch (_NSIG_WORDS) {
243 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
244 if ((x = *s &~ *m) != 0) {
245 sig = ffz(~x) + i*_NSIG_BPW + 1;
250 case 2: if ((x = s[0] &~ m[0]) != 0)
252 else if ((x = s[1] &~ m[1]) != 0)
259 case 1: if ((x = *s &~ *m) != 0)
267 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
270 struct sigqueue *q = NULL;
272 atomic_inc(&t->user->sigpending);
273 if (override_rlimit ||
274 atomic_read(&t->user->sigpending) <=
275 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
276 q = kmem_cache_alloc(sigqueue_cachep, flags);
277 if (unlikely(q == NULL)) {
278 atomic_dec(&t->user->sigpending);
280 INIT_LIST_HEAD(&q->list);
282 q->user = get_uid(t->user);
287 static void __sigqueue_free(struct sigqueue *q)
289 if (q->flags & SIGQUEUE_PREALLOC)
291 atomic_dec(&q->user->sigpending);
293 kmem_cache_free(sigqueue_cachep, q);
296 static void flush_sigqueue(struct sigpending *queue)
300 sigemptyset(&queue->signal);
301 while (!list_empty(&queue->list)) {
302 q = list_entry(queue->list.next, struct sigqueue , list);
303 list_del_init(&q->list);
309 * Flush all pending signals for a task.
313 flush_signals(struct task_struct *t)
317 spin_lock_irqsave(&t->sighand->siglock, flags);
318 clear_tsk_thread_flag(t,TIF_SIGPENDING);
319 flush_sigqueue(&t->pending);
320 flush_sigqueue(&t->signal->shared_pending);
321 spin_unlock_irqrestore(&t->sighand->siglock, flags);
325 * This function expects the tasklist_lock write-locked.
327 void __exit_sighand(struct task_struct *tsk)
329 struct sighand_struct * sighand = tsk->sighand;
331 /* Ok, we're done with the signal handlers */
333 if (atomic_dec_and_test(&sighand->count))
334 sighand_free(sighand);
337 void exit_sighand(struct task_struct *tsk)
339 write_lock_irq(&tasklist_lock);
341 if (tsk->sighand != NULL) {
342 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
343 spin_lock(&sighand->siglock);
345 spin_unlock(&sighand->siglock);
348 write_unlock_irq(&tasklist_lock);
352 * This function expects the tasklist_lock write-locked.
354 void __exit_signal(struct task_struct *tsk)
356 struct signal_struct * sig = tsk->signal;
357 struct sighand_struct * sighand;
361 if (!atomic_read(&sig->count))
364 sighand = rcu_dereference(tsk->sighand);
365 spin_lock(&sighand->siglock);
366 posix_cpu_timers_exit(tsk);
367 if (atomic_dec_and_test(&sig->count)) {
368 posix_cpu_timers_exit_group(tsk);
371 spin_unlock(&sighand->siglock);
372 flush_sigqueue(&sig->shared_pending);
375 * If there is any task waiting for the group exit
378 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
379 wake_up_process(sig->group_exit_task);
380 sig->group_exit_task = NULL;
382 if (tsk == sig->curr_target)
383 sig->curr_target = next_thread(tsk);
386 * Accumulate here the counters for all threads but the
387 * group leader as they die, so they can be added into
388 * the process-wide totals when those are taken.
389 * The group leader stays around as a zombie as long
390 * as there are other threads. When it gets reaped,
391 * the exit.c code will add its counts into these totals.
392 * We won't ever get here for the group leader, since it
393 * will have been the last reference on the signal_struct.
395 sig->utime = cputime_add(sig->utime, tsk->utime);
396 sig->stime = cputime_add(sig->stime, tsk->stime);
397 sig->min_flt += tsk->min_flt;
398 sig->maj_flt += tsk->maj_flt;
399 sig->nvcsw += tsk->nvcsw;
400 sig->nivcsw += tsk->nivcsw;
401 sig->sched_time += tsk->sched_time;
403 spin_unlock(&sighand->siglock);
404 sig = NULL; /* Marker for below. */
407 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
408 flush_sigqueue(&tsk->pending);
411 * We are cleaning up the signal_struct here.
413 exit_thread_group_keys(sig);
414 kmem_cache_free(signal_cachep, sig);
418 void exit_signal(struct task_struct *tsk)
420 atomic_dec(&tsk->signal->live);
422 write_lock_irq(&tasklist_lock);
424 write_unlock_irq(&tasklist_lock);
428 * Flush all handlers for a task.
432 flush_signal_handlers(struct task_struct *t, int force_default)
435 struct k_sigaction *ka = &t->sighand->action[0];
436 for (i = _NSIG ; i != 0 ; i--) {
437 if (force_default || ka->sa.sa_handler != SIG_IGN)
438 ka->sa.sa_handler = SIG_DFL;
440 sigemptyset(&ka->sa.sa_mask);
446 /* Notify the system that a driver wants to block all signals for this
447 * process, and wants to be notified if any signals at all were to be
448 * sent/acted upon. If the notifier routine returns non-zero, then the
449 * signal will be acted upon after all. If the notifier routine returns 0,
450 * then then signal will be blocked. Only one block per process is
451 * allowed. priv is a pointer to private data that the notifier routine
452 * can use to determine if the signal should be blocked or not. */
455 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
459 spin_lock_irqsave(¤t->sighand->siglock, flags);
460 current->notifier_mask = mask;
461 current->notifier_data = priv;
462 current->notifier = notifier;
463 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
466 /* Notify the system that blocking has ended. */
469 unblock_all_signals(void)
473 spin_lock_irqsave(¤t->sighand->siglock, flags);
474 current->notifier = NULL;
475 current->notifier_data = NULL;
477 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
480 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
482 struct sigqueue *q, *first = NULL;
483 int still_pending = 0;
485 if (unlikely(!sigismember(&list->signal, sig)))
489 * Collect the siginfo appropriate to this signal. Check if
490 * there is another siginfo for the same signal.
492 list_for_each_entry(q, &list->list, list) {
493 if (q->info.si_signo == sig) {
502 list_del_init(&first->list);
503 copy_siginfo(info, &first->info);
504 __sigqueue_free(first);
506 sigdelset(&list->signal, sig);
509 /* Ok, it wasn't in the queue. This must be
510 a fast-pathed signal or we must have been
511 out of queue space. So zero out the info.
513 sigdelset(&list->signal, sig);
514 info->si_signo = sig;
523 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
528 sig = next_signal(pending, mask);
530 if (current->notifier) {
531 if (sigismember(current->notifier_mask, sig)) {
532 if (!(current->notifier)(current->notifier_data)) {
533 clear_thread_flag(TIF_SIGPENDING);
539 if (!collect_signal(sig, pending, info))
549 * Dequeue a signal and return the element to the caller, which is
550 * expected to free it.
552 * All callers have to hold the siglock.
554 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
556 int signr = __dequeue_signal(&tsk->pending, mask, info);
558 signr = __dequeue_signal(&tsk->signal->shared_pending,
560 if (signr && unlikely(sig_kernel_stop(signr))) {
562 * Set a marker that we have dequeued a stop signal. Our
563 * caller might release the siglock and then the pending
564 * stop signal it is about to process is no longer in the
565 * pending bitmasks, but must still be cleared by a SIGCONT
566 * (and overruled by a SIGKILL). So those cases clear this
567 * shared flag after we've set it. Note that this flag may
568 * remain set after the signal we return is ignored or
569 * handled. That doesn't matter because its only purpose
570 * is to alert stop-signal processing code when another
571 * processor has come along and cleared the flag.
573 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
574 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
577 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
578 info->si_sys_private){
580 * Release the siglock to ensure proper locking order
581 * of timer locks outside of siglocks. Note, we leave
582 * irqs disabled here, since the posix-timers code is
583 * about to disable them again anyway.
585 spin_unlock(&tsk->sighand->siglock);
586 do_schedule_next_timer(info);
587 spin_lock(&tsk->sighand->siglock);
593 * Tell a process that it has a new active signal..
595 * NOTE! we rely on the previous spin_lock to
596 * lock interrupts for us! We can only be called with
597 * "siglock" held, and the local interrupt must
598 * have been disabled when that got acquired!
600 * No need to set need_resched since signal event passing
601 * goes through ->blocked
603 void signal_wake_up(struct task_struct *t, int resume)
607 set_tsk_thread_flag(t, TIF_SIGPENDING);
610 * For SIGKILL, we want to wake it up in the stopped/traced case.
611 * We don't check t->state here because there is a race with it
612 * executing another processor and just now entering stopped state.
613 * By using wake_up_state, we ensure the process will wake up and
614 * handle its death signal.
616 mask = TASK_INTERRUPTIBLE;
618 mask |= TASK_STOPPED | TASK_TRACED;
619 if (!wake_up_state(t, mask))
624 * Remove signals in mask from the pending set and queue.
625 * Returns 1 if any signals were found.
627 * All callers must be holding the siglock.
629 * This version takes a sigset mask and looks at all signals,
630 * not just those in the first mask word.
632 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634 struct sigqueue *q, *n;
637 sigandsets(&m, mask, &s->signal);
638 if (sigisemptyset(&m))
641 signandsets(&s->signal, &s->signal, mask);
642 list_for_each_entry_safe(q, n, &s->list, list) {
643 if (sigismember(mask, q->info.si_signo)) {
644 list_del_init(&q->list);
651 * Remove signals in mask from the pending set and queue.
652 * Returns 1 if any signals were found.
654 * All callers must be holding the siglock.
656 static int rm_from_queue(unsigned long mask, struct sigpending *s)
658 struct sigqueue *q, *n;
660 if (!sigtestsetmask(&s->signal, mask))
663 sigdelsetmask(&s->signal, mask);
664 list_for_each_entry_safe(q, n, &s->list, list) {
665 if (q->info.si_signo < SIGRTMIN &&
666 (mask & sigmask(q->info.si_signo))) {
667 list_del_init(&q->list);
675 * Bad permissions for sending the signal
677 static int check_kill_permission(int sig, struct siginfo *info,
678 struct task_struct *t)
683 if (!valid_signal(sig))
686 user = ((info == SEND_SIG_NOINFO) ||
687 (!is_si_special(info) && SI_FROMUSER(info)));
690 if (user && ((sig != SIGCONT) ||
691 (current->signal->session != t->signal->session))
692 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
693 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
694 && !capable(CAP_KILL))
698 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
701 error = security_task_kill(t, info, sig);
703 audit_signal_info(sig, t); /* Let audit system see the signal */
708 static void do_notify_parent_cldstop(struct task_struct *tsk,
713 * Handle magic process-wide effects of stop/continue signals.
714 * Unlike the signal actions, these happen immediately at signal-generation
715 * time regardless of blocking, ignoring, or handling. This does the
716 * actual continuing for SIGCONT, but not the actual stopping for stop
717 * signals. The process stop is done as a signal action for SIG_DFL.
719 static void handle_stop_signal(int sig, struct task_struct *p)
721 struct task_struct *t;
723 if (p->signal->flags & SIGNAL_GROUP_EXIT)
725 * The process is in the middle of dying already.
729 if (sig_kernel_stop(sig)) {
731 * This is a stop signal. Remove SIGCONT from all queues.
733 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
736 rm_from_queue(sigmask(SIGCONT), &t->pending);
739 } else if (sig == SIGCONT) {
741 * Remove all stop signals from all queues,
742 * and wake all threads.
744 if (unlikely(p->signal->group_stop_count > 0)) {
746 * There was a group stop in progress. We'll
747 * pretend it finished before we got here. We are
748 * obliged to report it to the parent: if the
749 * SIGSTOP happened "after" this SIGCONT, then it
750 * would have cleared this pending SIGCONT. If it
751 * happened "before" this SIGCONT, then the parent
752 * got the SIGCHLD about the stop finishing before
753 * the continue happened. We do the notification
754 * now, and it's as if the stop had finished and
755 * the SIGCHLD was pending on entry to this kill.
757 p->signal->group_stop_count = 0;
758 p->signal->flags = SIGNAL_STOP_CONTINUED;
759 spin_unlock(&p->sighand->siglock);
760 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
761 spin_lock(&p->sighand->siglock);
763 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
767 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
770 * If there is a handler for SIGCONT, we must make
771 * sure that no thread returns to user mode before
772 * we post the signal, in case it was the only
773 * thread eligible to run the signal handler--then
774 * it must not do anything between resuming and
775 * running the handler. With the TIF_SIGPENDING
776 * flag set, the thread will pause and acquire the
777 * siglock that we hold now and until we've queued
778 * the pending signal.
780 * Wake up the stopped thread _after_ setting
783 state = TASK_STOPPED;
784 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
785 set_tsk_thread_flag(t, TIF_SIGPENDING);
786 state |= TASK_INTERRUPTIBLE;
788 wake_up_state(t, state);
793 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
795 * We were in fact stopped, and are now continued.
796 * Notify the parent with CLD_CONTINUED.
798 p->signal->flags = SIGNAL_STOP_CONTINUED;
799 p->signal->group_exit_code = 0;
800 spin_unlock(&p->sighand->siglock);
801 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
802 spin_lock(&p->sighand->siglock);
805 * We are not stopped, but there could be a stop
806 * signal in the middle of being processed after
807 * being removed from the queue. Clear that too.
809 p->signal->flags = 0;
811 } else if (sig == SIGKILL) {
813 * Make sure that any pending stop signal already dequeued
814 * is undone by the wakeup for SIGKILL.
816 p->signal->flags = 0;
820 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
821 struct sigpending *signals)
823 struct sigqueue * q = NULL;
827 * fast-pathed signals for kernel-internal things like SIGSTOP
830 if (info == SEND_SIG_FORCED)
833 /* Real-time signals must be queued if sent by sigqueue, or
834 some other real-time mechanism. It is implementation
835 defined whether kill() does so. We attempt to do so, on
836 the principle of least surprise, but since kill is not
837 allowed to fail with EAGAIN when low on memory we just
838 make sure at least one signal gets delivered and don't
839 pass on the info struct. */
841 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
842 (is_si_special(info) ||
843 info->si_code >= 0)));
845 list_add_tail(&q->list, &signals->list);
846 switch ((unsigned long) info) {
847 case (unsigned long) SEND_SIG_NOINFO:
848 q->info.si_signo = sig;
849 q->info.si_errno = 0;
850 q->info.si_code = SI_USER;
851 q->info.si_pid = current->pid;
852 q->info.si_uid = current->uid;
854 case (unsigned long) SEND_SIG_PRIV:
855 q->info.si_signo = sig;
856 q->info.si_errno = 0;
857 q->info.si_code = SI_KERNEL;
862 copy_siginfo(&q->info, info);
865 } else if (!is_si_special(info)) {
866 if (sig >= SIGRTMIN && info->si_code != SI_USER)
868 * Queue overflow, abort. We may abort if the signal was rt
869 * and sent by user using something other than kill().
875 sigaddset(&signals->signal, sig);
879 #define LEGACY_QUEUE(sigptr, sig) \
880 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
884 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
888 if (!irqs_disabled())
890 assert_spin_locked(&t->sighand->siglock);
892 /* Short-circuit ignored signals. */
893 if (sig_ignored(t, sig))
896 /* Support queueing exactly one non-rt signal, so that we
897 can get more detailed information about the cause of
899 if (LEGACY_QUEUE(&t->pending, sig))
902 ret = send_signal(sig, info, t, &t->pending);
903 if (!ret && !sigismember(&t->blocked, sig))
904 signal_wake_up(t, sig == SIGKILL);
910 * Force a signal that the process can't ignore: if necessary
911 * we unblock the signal and change any SIG_IGN to SIG_DFL.
915 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
917 unsigned long int flags;
920 spin_lock_irqsave(&t->sighand->siglock, flags);
921 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
922 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
924 if (sigismember(&t->blocked, sig)) {
925 sigdelset(&t->blocked, sig);
927 recalc_sigpending_tsk(t);
928 ret = specific_send_sig_info(sig, info, t);
929 spin_unlock_irqrestore(&t->sighand->siglock, flags);
935 force_sig_specific(int sig, struct task_struct *t)
937 force_sig_info(sig, SEND_SIG_FORCED, t);
941 * Test if P wants to take SIG. After we've checked all threads with this,
942 * it's equivalent to finding no threads not blocking SIG. Any threads not
943 * blocking SIG were ruled out because they are not running and already
944 * have pending signals. Such threads will dequeue from the shared queue
945 * as soon as they're available, so putting the signal on the shared queue
946 * will be equivalent to sending it to one such thread.
948 static inline int wants_signal(int sig, struct task_struct *p)
950 if (sigismember(&p->blocked, sig))
952 if (p->flags & PF_EXITING)
956 if (p->state & (TASK_STOPPED | TASK_TRACED))
958 return task_curr(p) || !signal_pending(p);
962 __group_complete_signal(int sig, struct task_struct *p)
964 struct task_struct *t;
967 * Now find a thread we can wake up to take the signal off the queue.
969 * If the main thread wants the signal, it gets first crack.
970 * Probably the least surprising to the average bear.
972 if (wants_signal(sig, p))
974 else if (thread_group_empty(p))
976 * There is just one thread and it does not need to be woken.
977 * It will dequeue unblocked signals before it runs again.
982 * Otherwise try to find a suitable thread.
984 t = p->signal->curr_target;
986 /* restart balancing at this thread */
987 t = p->signal->curr_target = p;
989 while (!wants_signal(sig, t)) {
991 if (t == p->signal->curr_target)
993 * No thread needs to be woken.
994 * Any eligible threads will see
995 * the signal in the queue soon.
999 p->signal->curr_target = t;
1003 * Found a killable thread. If the signal will be fatal,
1004 * then start taking the whole group down immediately.
1006 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1007 !sigismember(&t->real_blocked, sig) &&
1008 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1010 * This signal will be fatal to the whole group.
1012 if (!sig_kernel_coredump(sig)) {
1014 * Start a group exit and wake everybody up.
1015 * This way we don't have other threads
1016 * running and doing things after a slower
1017 * thread has the fatal signal pending.
1019 p->signal->flags = SIGNAL_GROUP_EXIT;
1020 p->signal->group_exit_code = sig;
1021 p->signal->group_stop_count = 0;
1024 sigaddset(&t->pending.signal, SIGKILL);
1025 signal_wake_up(t, 1);
1032 * There will be a core dump. We make all threads other
1033 * than the chosen one go into a group stop so that nothing
1034 * happens until it gets scheduled, takes the signal off
1035 * the shared queue, and does the core dump. This is a
1036 * little more complicated than strictly necessary, but it
1037 * keeps the signal state that winds up in the core dump
1038 * unchanged from the death state, e.g. which thread had
1039 * the core-dump signal unblocked.
1041 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1042 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1043 p->signal->group_stop_count = 0;
1044 p->signal->group_exit_task = t;
1047 p->signal->group_stop_count++;
1048 signal_wake_up(t, 0);
1051 wake_up_process(p->signal->group_exit_task);
1056 * The signal is already in the shared-pending queue.
1057 * Tell the chosen thread to wake up and dequeue it.
1059 signal_wake_up(t, sig == SIGKILL);
1064 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1068 assert_spin_locked(&p->sighand->siglock);
1069 handle_stop_signal(sig, p);
1071 /* Short-circuit ignored signals. */
1072 if (sig_ignored(p, sig))
1075 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1076 /* This is a non-RT signal and we already have one queued. */
1080 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1081 * We always use the shared queue for process-wide signals,
1082 * to avoid several races.
1084 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1088 __group_complete_signal(sig, p);
1093 * Nuke all other threads in the group.
1095 void zap_other_threads(struct task_struct *p)
1097 struct task_struct *t;
1099 p->signal->flags = SIGNAL_GROUP_EXIT;
1100 p->signal->group_stop_count = 0;
1102 if (thread_group_empty(p))
1105 for (t = next_thread(p); t != p; t = next_thread(t)) {
1107 * Don't bother with already dead threads
1113 * We don't want to notify the parent, since we are
1114 * killed as part of a thread group due to another
1115 * thread doing an execve() or similar. So set the
1116 * exit signal to -1 to allow immediate reaping of
1117 * the process. But don't detach the thread group
1120 if (t != p->group_leader)
1121 t->exit_signal = -1;
1123 /* SIGKILL will be handled before any pending SIGSTOP */
1124 sigaddset(&t->pending.signal, SIGKILL);
1125 signal_wake_up(t, 1);
1130 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1132 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1134 unsigned long flags;
1135 struct sighand_struct *sp;
1139 ret = check_kill_permission(sig, info, p);
1140 if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1141 spin_lock_irqsave(&sp->siglock, flags);
1142 if (p->sighand != sp) {
1143 spin_unlock_irqrestore(&sp->siglock, flags);
1146 if ((atomic_read(&sp->count) == 0) ||
1147 (atomic_read(&p->usage) == 0)) {
1148 spin_unlock_irqrestore(&sp->siglock, flags);
1151 ret = __group_send_sig_info(sig, info, p);
1152 spin_unlock_irqrestore(&sp->siglock, flags);
1159 * kill_pg_info() sends a signal to a process group: this is what the tty
1160 * control characters do (^C, ^Z etc)
1163 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1165 struct task_struct *p = NULL;
1166 int retval, success;
1173 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1174 int err = group_send_sig_info(sig, info, p);
1177 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1178 return success ? 0 : retval;
1182 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1186 read_lock(&tasklist_lock);
1187 retval = __kill_pg_info(sig, info, pgrp);
1188 read_unlock(&tasklist_lock);
1194 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1197 int acquired_tasklist_lock = 0;
1198 struct task_struct *p;
1201 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1202 read_lock(&tasklist_lock);
1203 acquired_tasklist_lock = 1;
1205 p = find_task_by_pid(pid);
1207 if (p && vx_check(vx_task_xid(p), VX_IDENT))
1208 error = group_send_sig_info(sig, info, p);
1209 if (unlikely(acquired_tasklist_lock))
1210 read_unlock(&tasklist_lock);
1215 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1216 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1217 uid_t uid, uid_t euid)
1220 struct task_struct *p;
1222 if (!valid_signal(sig))
1225 read_lock(&tasklist_lock);
1226 p = find_task_by_pid(pid);
1231 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1232 && (euid != p->suid) && (euid != p->uid)
1233 && (uid != p->suid) && (uid != p->uid)) {
1237 if (sig && p->sighand) {
1238 unsigned long flags;
1239 spin_lock_irqsave(&p->sighand->siglock, flags);
1240 ret = __group_send_sig_info(sig, info, p);
1241 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1244 read_unlock(&tasklist_lock);
1247 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1250 * kill_something_info() interprets pid in interesting ways just like kill(2).
1252 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1253 * is probably wrong. Should make it like BSD or SYSV.
1256 static int kill_something_info(int sig, struct siginfo *info, int pid)
1259 return kill_pg_info(sig, info, process_group(current));
1260 } else if (pid == -1) {
1261 int retval = 0, count = 0;
1262 struct task_struct * p;
1264 read_lock(&tasklist_lock);
1265 for_each_process(p) {
1266 if (vx_check(vx_task_xid(p), VX_ADMIN|VX_IDENT)&&
1267 p->pid > 1 && p->tgid != current->tgid) {
1268 int err = group_send_sig_info(sig, info, p);
1274 read_unlock(&tasklist_lock);
1275 return count ? retval : -ESRCH;
1276 } else if (pid < 0) {
1277 return kill_pg_info(sig, info, -pid);
1279 return kill_proc_info(sig, info, pid);
1284 * These are for backward compatibility with the rest of the kernel source.
1288 * These two are the most common entry points. They send a signal
1289 * just to the specific thread.
1292 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1295 unsigned long flags;
1298 * Make sure legacy kernel users don't send in bad values
1299 * (normal paths check this in check_kill_permission).
1301 if (!valid_signal(sig))
1305 * We need the tasklist lock even for the specific
1306 * thread case (when we don't need to follow the group
1307 * lists) in order to avoid races with "p->sighand"
1308 * going away or changing from under us.
1310 read_lock(&tasklist_lock);
1311 spin_lock_irqsave(&p->sighand->siglock, flags);
1312 ret = specific_send_sig_info(sig, info, p);
1313 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1314 read_unlock(&tasklist_lock);
1318 #define __si_special(priv) \
1319 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1322 send_sig(int sig, struct task_struct *p, int priv)
1324 return send_sig_info(sig, __si_special(priv), p);
1328 * This is the entry point for "process-wide" signals.
1329 * They will go to an appropriate thread in the thread group.
1332 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1335 read_lock(&tasklist_lock);
1336 ret = group_send_sig_info(sig, info, p);
1337 read_unlock(&tasklist_lock);
1342 force_sig(int sig, struct task_struct *p)
1344 force_sig_info(sig, SEND_SIG_PRIV, p);
1348 * When things go south during signal handling, we
1349 * will force a SIGSEGV. And if the signal that caused
1350 * the problem was already a SIGSEGV, we'll want to
1351 * make sure we don't even try to deliver the signal..
1354 force_sigsegv(int sig, struct task_struct *p)
1356 if (sig == SIGSEGV) {
1357 unsigned long flags;
1358 spin_lock_irqsave(&p->sighand->siglock, flags);
1359 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1360 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1362 force_sig(SIGSEGV, p);
1367 kill_pg(pid_t pgrp, int sig, int priv)
1369 return kill_pg_info(sig, __si_special(priv), pgrp);
1373 kill_proc(pid_t pid, int sig, int priv)
1375 return kill_proc_info(sig, __si_special(priv), pid);
1379 * These functions support sending signals using preallocated sigqueue
1380 * structures. This is needed "because realtime applications cannot
1381 * afford to lose notifications of asynchronous events, like timer
1382 * expirations or I/O completions". In the case of Posix Timers
1383 * we allocate the sigqueue structure from the timer_create. If this
1384 * allocation fails we are able to report the failure to the application
1385 * with an EAGAIN error.
1388 struct sigqueue *sigqueue_alloc(void)
1392 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1393 q->flags |= SIGQUEUE_PREALLOC;
1397 void sigqueue_free(struct sigqueue *q)
1399 unsigned long flags;
1400 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1402 * If the signal is still pending remove it from the
1405 if (unlikely(!list_empty(&q->list))) {
1406 spinlock_t *lock = ¤t->sighand->siglock;
1407 read_lock(&tasklist_lock);
1408 spin_lock_irqsave(lock, flags);
1409 if (!list_empty(&q->list))
1410 list_del_init(&q->list);
1411 spin_unlock_irqrestore(lock, flags);
1412 read_unlock(&tasklist_lock);
1414 q->flags &= ~SIGQUEUE_PREALLOC;
1419 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1421 unsigned long flags;
1423 struct sighand_struct *sh;
1425 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1428 * The rcu based delayed sighand destroy makes it possible to
1429 * run this without tasklist lock held. The task struct itself
1430 * cannot go away as create_timer did get_task_struct().
1432 * We return -1, when the task is marked exiting, so
1433 * posix_timer_event can redirect it to the group leader
1437 if (unlikely(p->flags & PF_EXITING)) {
1443 sh = rcu_dereference(p->sighand);
1445 spin_lock_irqsave(&sh->siglock, flags);
1446 if (p->sighand != sh) {
1447 /* We raced with exec() in a multithreaded process... */
1448 spin_unlock_irqrestore(&sh->siglock, flags);
1453 * We do the check here again to handle the following scenario:
1458 * interrupt exit code running
1460 * lock sighand->siglock
1461 * unlock sighand->siglock
1463 * add(tsk->pending) flush_sigqueue(tsk->pending)
1467 if (unlikely(p->flags & PF_EXITING)) {
1472 if (unlikely(!list_empty(&q->list))) {
1474 * If an SI_TIMER entry is already queue just increment
1475 * the overrun count.
1477 if (q->info.si_code != SI_TIMER)
1479 q->info.si_overrun++;
1482 /* Short-circuit ignored signals. */
1483 if (sig_ignored(p, sig)) {
1488 list_add_tail(&q->list, &p->pending.list);
1489 sigaddset(&p->pending.signal, sig);
1490 if (!sigismember(&p->blocked, sig))
1491 signal_wake_up(p, sig == SIGKILL);
1494 spin_unlock_irqrestore(&sh->siglock, flags);
1502 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1504 unsigned long flags;
1507 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1509 read_lock(&tasklist_lock);
1510 /* Since it_lock is held, p->sighand cannot be NULL. */
1511 spin_lock_irqsave(&p->sighand->siglock, flags);
1512 handle_stop_signal(sig, p);
1514 /* Short-circuit ignored signals. */
1515 if (sig_ignored(p, sig)) {
1520 if (unlikely(!list_empty(&q->list))) {
1522 * If an SI_TIMER entry is already queue just increment
1523 * the overrun count. Other uses should not try to
1524 * send the signal multiple times.
1526 if (q->info.si_code != SI_TIMER)
1528 q->info.si_overrun++;
1533 * Put this signal on the shared-pending queue.
1534 * We always use the shared queue for process-wide signals,
1535 * to avoid several races.
1537 list_add_tail(&q->list, &p->signal->shared_pending.list);
1538 sigaddset(&p->signal->shared_pending.signal, sig);
1540 __group_complete_signal(sig, p);
1542 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1543 read_unlock(&tasklist_lock);
1548 * Wake up any threads in the parent blocked in wait* syscalls.
1550 static inline void __wake_up_parent(struct task_struct *p,
1551 struct task_struct *parent)
1553 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1557 * Let a parent know about the death of a child.
1558 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1561 void do_notify_parent(struct task_struct *tsk, int sig)
1563 struct siginfo info;
1564 unsigned long flags;
1565 struct sighand_struct *psig;
1569 /* do_notify_parent_cldstop should have been called instead. */
1570 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1572 BUG_ON(!tsk->ptrace &&
1573 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1575 info.si_signo = sig;
1577 info.si_pid = tsk->pid;
1578 info.si_uid = tsk->uid;
1580 /* FIXME: find out whether or not this is supposed to be c*time. */
1581 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1582 tsk->signal->utime));
1583 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1584 tsk->signal->stime));
1586 info.si_status = tsk->exit_code & 0x7f;
1587 if (tsk->exit_code & 0x80)
1588 info.si_code = CLD_DUMPED;
1589 else if (tsk->exit_code & 0x7f)
1590 info.si_code = CLD_KILLED;
1592 info.si_code = CLD_EXITED;
1593 info.si_status = tsk->exit_code >> 8;
1596 psig = tsk->parent->sighand;
1597 spin_lock_irqsave(&psig->siglock, flags);
1598 if (!tsk->ptrace && sig == SIGCHLD &&
1599 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1600 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1602 * We are exiting and our parent doesn't care. POSIX.1
1603 * defines special semantics for setting SIGCHLD to SIG_IGN
1604 * or setting the SA_NOCLDWAIT flag: we should be reaped
1605 * automatically and not left for our parent's wait4 call.
1606 * Rather than having the parent do it as a magic kind of
1607 * signal handler, we just set this to tell do_exit that we
1608 * can be cleaned up without becoming a zombie. Note that
1609 * we still call __wake_up_parent in this case, because a
1610 * blocked sys_wait4 might now return -ECHILD.
1612 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1613 * is implementation-defined: we do (if you don't want
1614 * it, just use SIG_IGN instead).
1616 tsk->exit_signal = -1;
1617 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1620 if (valid_signal(sig) && sig > 0)
1621 __group_send_sig_info(sig, &info, tsk->parent);
1622 __wake_up_parent(tsk, tsk->parent);
1623 spin_unlock_irqrestore(&psig->siglock, flags);
1626 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1628 struct siginfo info;
1629 unsigned long flags;
1630 struct task_struct *parent;
1631 struct sighand_struct *sighand;
1634 parent = tsk->parent;
1636 tsk = tsk->group_leader;
1637 parent = tsk->real_parent;
1640 info.si_signo = SIGCHLD;
1642 info.si_pid = tsk->pid;
1643 info.si_uid = tsk->uid;
1645 /* FIXME: find out whether or not this is supposed to be c*time. */
1646 info.si_utime = cputime_to_jiffies(tsk->utime);
1647 info.si_stime = cputime_to_jiffies(tsk->stime);
1652 info.si_status = SIGCONT;
1655 info.si_status = tsk->signal->group_exit_code & 0x7f;
1658 info.si_status = tsk->exit_code & 0x7f;
1664 sighand = parent->sighand;
1665 spin_lock_irqsave(&sighand->siglock, flags);
1666 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1667 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1668 __group_send_sig_info(SIGCHLD, &info, parent);
1670 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1672 __wake_up_parent(tsk, parent);
1673 spin_unlock_irqrestore(&sighand->siglock, flags);
1677 * This must be called with current->sighand->siglock held.
1679 * This should be the path for all ptrace stops.
1680 * We always set current->last_siginfo while stopped here.
1681 * That makes it a way to test a stopped process for
1682 * being ptrace-stopped vs being job-control-stopped.
1684 * If we actually decide not to stop at all because the tracer is gone,
1685 * we leave nostop_code in current->exit_code.
1687 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1690 * If there is a group stop in progress,
1691 * we must participate in the bookkeeping.
1693 if (current->signal->group_stop_count > 0)
1694 --current->signal->group_stop_count;
1696 current->last_siginfo = info;
1697 current->exit_code = exit_code;
1699 /* Let the debugger run. */
1700 set_current_state(TASK_TRACED);
1701 spin_unlock_irq(¤t->sighand->siglock);
1703 read_lock(&tasklist_lock);
1704 if (likely(current->ptrace & PT_PTRACED) &&
1705 likely(current->parent != current->real_parent ||
1706 !(current->ptrace & PT_ATTACHED)) &&
1707 (likely(current->parent->signal != current->signal) ||
1708 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1709 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1710 read_unlock(&tasklist_lock);
1714 * By the time we got the lock, our tracer went away.
1717 read_unlock(&tasklist_lock);
1718 set_current_state(TASK_RUNNING);
1719 current->exit_code = nostop_code;
1723 * We are back. Now reacquire the siglock before touching
1724 * last_siginfo, so that we are sure to have synchronized with
1725 * any signal-sending on another CPU that wants to examine it.
1727 spin_lock_irq(¤t->sighand->siglock);
1728 current->last_siginfo = NULL;
1731 * Queued signals ignored us while we were stopped for tracing.
1732 * So check for any that we should take before resuming user mode.
1734 recalc_sigpending();
1737 void ptrace_notify(int exit_code)
1741 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1743 memset(&info, 0, sizeof info);
1744 info.si_signo = SIGTRAP;
1745 info.si_code = exit_code;
1746 info.si_pid = current->pid;
1747 info.si_uid = current->uid;
1749 /* Let the debugger run. */
1750 spin_lock_irq(¤t->sighand->siglock);
1751 ptrace_stop(exit_code, 0, &info);
1752 spin_unlock_irq(¤t->sighand->siglock);
1756 finish_stop(int stop_count)
1761 * If there are no other threads in the group, or if there is
1762 * a group stop in progress and we are the last to stop,
1763 * report to the parent. When ptraced, every thread reports itself.
1765 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1767 else if (stop_count == 0)
1772 read_lock(&tasklist_lock);
1773 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1774 read_unlock(&tasklist_lock);
1779 * Now we don't run again until continued.
1781 current->exit_code = 0;
1785 * This performs the stopping for SIGSTOP and other stop signals.
1786 * We have to stop all threads in the thread group.
1787 * Returns nonzero if we've actually stopped and released the siglock.
1788 * Returns zero if we didn't stop and still hold the siglock.
1791 do_signal_stop(int signr)
1793 struct signal_struct *sig = current->signal;
1794 struct sighand_struct *sighand = current->sighand;
1795 int stop_count = -1;
1797 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1800 if (sig->group_stop_count > 0) {
1802 * There is a group stop in progress. We don't need to
1803 * start another one.
1805 signr = sig->group_exit_code;
1806 stop_count = --sig->group_stop_count;
1807 current->exit_code = signr;
1808 set_current_state(TASK_STOPPED);
1809 if (stop_count == 0)
1810 sig->flags = SIGNAL_STOP_STOPPED;
1811 spin_unlock_irq(&sighand->siglock);
1813 else if (thread_group_empty(current)) {
1815 * Lock must be held through transition to stopped state.
1817 current->exit_code = current->signal->group_exit_code = signr;
1818 set_current_state(TASK_STOPPED);
1819 sig->flags = SIGNAL_STOP_STOPPED;
1820 spin_unlock_irq(&sighand->siglock);
1824 * There is no group stop already in progress.
1825 * We must initiate one now, but that requires
1826 * dropping siglock to get both the tasklist lock
1827 * and siglock again in the proper order. Note that
1828 * this allows an intervening SIGCONT to be posted.
1829 * We need to check for that and bail out if necessary.
1831 struct task_struct *t;
1833 spin_unlock_irq(&sighand->siglock);
1835 /* signals can be posted during this window */
1837 read_lock(&tasklist_lock);
1838 spin_lock_irq(&sighand->siglock);
1840 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1842 * Another stop or continue happened while we
1843 * didn't have the lock. We can just swallow this
1844 * signal now. If we raced with a SIGCONT, that
1845 * should have just cleared it now. If we raced
1846 * with another processor delivering a stop signal,
1847 * then the SIGCONT that wakes us up should clear it.
1849 read_unlock(&tasklist_lock);
1853 if (sig->group_stop_count == 0) {
1854 sig->group_exit_code = signr;
1856 for (t = next_thread(current); t != current;
1859 * Setting state to TASK_STOPPED for a group
1860 * stop is always done with the siglock held,
1861 * so this check has no races.
1863 if (!t->exit_state &&
1864 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1866 signal_wake_up(t, 0);
1868 sig->group_stop_count = stop_count;
1871 /* A race with another thread while unlocked. */
1872 signr = sig->group_exit_code;
1873 stop_count = --sig->group_stop_count;
1876 current->exit_code = signr;
1877 set_current_state(TASK_STOPPED);
1878 if (stop_count == 0)
1879 sig->flags = SIGNAL_STOP_STOPPED;
1881 spin_unlock_irq(&sighand->siglock);
1882 read_unlock(&tasklist_lock);
1885 finish_stop(stop_count);
1890 * Do appropriate magic when group_stop_count > 0.
1891 * We return nonzero if we stopped, after releasing the siglock.
1892 * We return zero if we still hold the siglock and should look
1893 * for another signal without checking group_stop_count again.
1895 static int handle_group_stop(void)
1899 if (current->signal->group_exit_task == current) {
1901 * Group stop is so we can do a core dump,
1902 * We are the initiating thread, so get on with it.
1904 current->signal->group_exit_task = NULL;
1908 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1910 * Group stop is so another thread can do a core dump,
1911 * or else we are racing against a death signal.
1912 * Just punt the stop so we can get the next signal.
1917 * There is a group stop in progress. We stop
1918 * without any associated signal being in our queue.
1920 stop_count = --current->signal->group_stop_count;
1921 if (stop_count == 0)
1922 current->signal->flags = SIGNAL_STOP_STOPPED;
1923 current->exit_code = current->signal->group_exit_code;
1924 set_current_state(TASK_STOPPED);
1925 spin_unlock_irq(¤t->sighand->siglock);
1926 finish_stop(stop_count);
1930 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1931 struct pt_regs *regs, void *cookie)
1933 sigset_t *mask = ¤t->blocked;
1937 spin_lock_irq(¤t->sighand->siglock);
1939 struct k_sigaction *ka;
1941 if (unlikely(current->signal->group_stop_count > 0) &&
1942 handle_group_stop())
1945 signr = dequeue_signal(current, mask, info);
1948 break; /* will return 0 */
1950 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1951 ptrace_signal_deliver(regs, cookie);
1953 /* Let the debugger run. */
1954 ptrace_stop(signr, signr, info);
1956 /* We're back. Did the debugger cancel the sig? */
1957 signr = current->exit_code;
1961 current->exit_code = 0;
1963 /* Update the siginfo structure if the signal has
1964 changed. If the debugger wanted something
1965 specific in the siginfo structure then it should
1966 have updated *info via PTRACE_SETSIGINFO. */
1967 if (signr != info->si_signo) {
1968 info->si_signo = signr;
1970 info->si_code = SI_USER;
1971 info->si_pid = current->parent->pid;
1972 info->si_uid = current->parent->uid;
1975 /* If the (new) signal is now blocked, requeue it. */
1976 if (sigismember(¤t->blocked, signr)) {
1977 specific_send_sig_info(signr, info, current);
1982 ka = ¤t->sighand->action[signr-1];
1983 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1985 if (ka->sa.sa_handler != SIG_DFL) {
1986 /* Run the handler. */
1989 if (ka->sa.sa_flags & SA_ONESHOT)
1990 ka->sa.sa_handler = SIG_DFL;
1992 break; /* will return non-zero "signr" value */
1996 * Now we are doing the default action for this signal.
1998 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2001 /* Init gets no signals it doesn't want. */
2002 if (current->pid == 1)
2005 /* virtual init is protected against user signals */
2006 if ((info->si_code == SI_USER) &&
2007 vx_current_initpid(current->pid))
2010 if (sig_kernel_stop(signr)) {
2012 * The default action is to stop all threads in
2013 * the thread group. The job control signals
2014 * do nothing in an orphaned pgrp, but SIGSTOP
2015 * always works. Note that siglock needs to be
2016 * dropped during the call to is_orphaned_pgrp()
2017 * because of lock ordering with tasklist_lock.
2018 * This allows an intervening SIGCONT to be posted.
2019 * We need to check for that and bail out if necessary.
2021 if (signr != SIGSTOP) {
2022 spin_unlock_irq(¤t->sighand->siglock);
2024 /* signals can be posted during this window */
2026 if (is_orphaned_pgrp(process_group(current)))
2029 spin_lock_irq(¤t->sighand->siglock);
2032 if (likely(do_signal_stop(signr))) {
2033 /* It released the siglock. */
2038 * We didn't actually stop, due to a race
2039 * with SIGCONT or something like that.
2044 spin_unlock_irq(¤t->sighand->siglock);
2047 * Anything else is fatal, maybe with a core dump.
2049 current->flags |= PF_SIGNALED;
2050 if (sig_kernel_coredump(signr)) {
2052 * If it was able to dump core, this kills all
2053 * other threads in the group and synchronizes with
2054 * their demise. If we lost the race with another
2055 * thread getting here, it set group_exit_code
2056 * first and our do_group_exit call below will use
2057 * that value and ignore the one we pass it.
2059 do_coredump((long)signr, signr, regs);
2063 * Death signals, no core dump.
2065 do_group_exit(signr);
2068 spin_unlock_irq(¤t->sighand->siglock);
2072 EXPORT_SYMBOL(recalc_sigpending);
2073 EXPORT_SYMBOL_GPL(dequeue_signal);
2074 EXPORT_SYMBOL(flush_signals);
2075 EXPORT_SYMBOL(force_sig);
2076 EXPORT_SYMBOL(kill_pg);
2077 EXPORT_SYMBOL(kill_proc);
2078 EXPORT_SYMBOL(ptrace_notify);
2079 EXPORT_SYMBOL(send_sig);
2080 EXPORT_SYMBOL(send_sig_info);
2081 EXPORT_SYMBOL(sigprocmask);
2082 EXPORT_SYMBOL(block_all_signals);
2083 EXPORT_SYMBOL(unblock_all_signals);
2087 * System call entry points.
2090 asmlinkage long sys_restart_syscall(void)
2092 struct restart_block *restart = ¤t_thread_info()->restart_block;
2093 return restart->fn(restart);
2096 long do_no_restart_syscall(struct restart_block *param)
2102 * We don't need to get the kernel lock - this is all local to this
2103 * particular thread.. (and that's good, because this is _heavily_
2104 * used by various programs)
2108 * This is also useful for kernel threads that want to temporarily
2109 * (or permanently) block certain signals.
2111 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2112 * interface happily blocks "unblockable" signals like SIGKILL
2115 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2120 spin_lock_irq(¤t->sighand->siglock);
2121 old_block = current->blocked;
2125 sigorsets(¤t->blocked, ¤t->blocked, set);
2128 signandsets(¤t->blocked, ¤t->blocked, set);
2131 current->blocked = *set;
2136 recalc_sigpending();
2137 spin_unlock_irq(¤t->sighand->siglock);
2139 *oldset = old_block;
2144 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2146 int error = -EINVAL;
2147 sigset_t old_set, new_set;
2149 /* XXX: Don't preclude handling different sized sigset_t's. */
2150 if (sigsetsize != sizeof(sigset_t))
2155 if (copy_from_user(&new_set, set, sizeof(*set)))
2157 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2159 error = sigprocmask(how, &new_set, &old_set);
2165 spin_lock_irq(¤t->sighand->siglock);
2166 old_set = current->blocked;
2167 spin_unlock_irq(¤t->sighand->siglock);
2171 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2179 long do_sigpending(void __user *set, unsigned long sigsetsize)
2181 long error = -EINVAL;
2184 if (sigsetsize > sizeof(sigset_t))
2187 spin_lock_irq(¤t->sighand->siglock);
2188 sigorsets(&pending, ¤t->pending.signal,
2189 ¤t->signal->shared_pending.signal);
2190 spin_unlock_irq(¤t->sighand->siglock);
2192 /* Outside the lock because only this thread touches it. */
2193 sigandsets(&pending, ¤t->blocked, &pending);
2196 if (!copy_to_user(set, &pending, sigsetsize))
2204 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2206 return do_sigpending(set, sigsetsize);
2209 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2211 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2215 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2217 if (from->si_code < 0)
2218 return __copy_to_user(to, from, sizeof(siginfo_t))
2221 * If you change siginfo_t structure, please be sure
2222 * this code is fixed accordingly.
2223 * It should never copy any pad contained in the structure
2224 * to avoid security leaks, but must copy the generic
2225 * 3 ints plus the relevant union member.
2227 err = __put_user(from->si_signo, &to->si_signo);
2228 err |= __put_user(from->si_errno, &to->si_errno);
2229 err |= __put_user((short)from->si_code, &to->si_code);
2230 switch (from->si_code & __SI_MASK) {
2232 err |= __put_user(from->si_pid, &to->si_pid);
2233 err |= __put_user(from->si_uid, &to->si_uid);
2236 err |= __put_user(from->si_tid, &to->si_tid);
2237 err |= __put_user(from->si_overrun, &to->si_overrun);
2238 err |= __put_user(from->si_ptr, &to->si_ptr);
2241 err |= __put_user(from->si_band, &to->si_band);
2242 err |= __put_user(from->si_fd, &to->si_fd);
2245 err |= __put_user(from->si_addr, &to->si_addr);
2246 #ifdef __ARCH_SI_TRAPNO
2247 err |= __put_user(from->si_trapno, &to->si_trapno);
2251 err |= __put_user(from->si_pid, &to->si_pid);
2252 err |= __put_user(from->si_uid, &to->si_uid);
2253 err |= __put_user(from->si_status, &to->si_status);
2254 err |= __put_user(from->si_utime, &to->si_utime);
2255 err |= __put_user(from->si_stime, &to->si_stime);
2257 case __SI_RT: /* This is not generated by the kernel as of now. */
2258 case __SI_MESGQ: /* But this is */
2259 err |= __put_user(from->si_pid, &to->si_pid);
2260 err |= __put_user(from->si_uid, &to->si_uid);
2261 err |= __put_user(from->si_ptr, &to->si_ptr);
2263 default: /* this is just in case for now ... */
2264 err |= __put_user(from->si_pid, &to->si_pid);
2265 err |= __put_user(from->si_uid, &to->si_uid);
2274 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2275 siginfo_t __user *uinfo,
2276 const struct timespec __user *uts,
2285 /* XXX: Don't preclude handling different sized sigset_t's. */
2286 if (sigsetsize != sizeof(sigset_t))
2289 if (copy_from_user(&these, uthese, sizeof(these)))
2293 * Invert the set of allowed signals to get those we
2296 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2300 if (copy_from_user(&ts, uts, sizeof(ts)))
2302 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2307 spin_lock_irq(¤t->sighand->siglock);
2308 sig = dequeue_signal(current, &these, &info);
2310 timeout = MAX_SCHEDULE_TIMEOUT;
2312 timeout = (timespec_to_jiffies(&ts)
2313 + (ts.tv_sec || ts.tv_nsec));
2316 /* None ready -- temporarily unblock those we're
2317 * interested while we are sleeping in so that we'll
2318 * be awakened when they arrive. */
2319 current->real_blocked = current->blocked;
2320 sigandsets(¤t->blocked, ¤t->blocked, &these);
2321 recalc_sigpending();
2322 spin_unlock_irq(¤t->sighand->siglock);
2324 timeout = schedule_timeout_interruptible(timeout);
2327 spin_lock_irq(¤t->sighand->siglock);
2328 sig = dequeue_signal(current, &these, &info);
2329 current->blocked = current->real_blocked;
2330 siginitset(¤t->real_blocked, 0);
2331 recalc_sigpending();
2334 spin_unlock_irq(¤t->sighand->siglock);
2339 if (copy_siginfo_to_user(uinfo, &info))
2352 sys_kill(int pid, int sig)
2354 struct siginfo info;
2356 info.si_signo = sig;
2358 info.si_code = SI_USER;
2359 info.si_pid = current->tgid;
2360 info.si_uid = current->uid;
2362 return kill_something_info(sig, &info, pid);
2365 static int do_tkill(int tgid, int pid, int sig)
2368 struct siginfo info;
2369 struct task_struct *p;
2372 info.si_signo = sig;
2374 info.si_code = SI_TKILL;
2375 info.si_pid = current->tgid;
2376 info.si_uid = current->uid;
2378 read_lock(&tasklist_lock);
2379 p = find_task_by_pid(pid);
2380 if (p && (tgid <= 0 || p->tgid == tgid)) {
2381 error = check_kill_permission(sig, &info, p);
2383 * The null signal is a permissions and process existence
2384 * probe. No signal is actually delivered.
2386 if (!error && sig && p->sighand) {
2387 spin_lock_irq(&p->sighand->siglock);
2388 handle_stop_signal(sig, p);
2389 error = specific_send_sig_info(sig, &info, p);
2390 spin_unlock_irq(&p->sighand->siglock);
2393 read_unlock(&tasklist_lock);
2399 * sys_tgkill - send signal to one specific thread
2400 * @tgid: the thread group ID of the thread
2401 * @pid: the PID of the thread
2402 * @sig: signal to be sent
2404 * This syscall also checks the tgid and returns -ESRCH even if the PID
2405 * exists but it's not belonging to the target process anymore. This
2406 * method solves the problem of threads exiting and PIDs getting reused.
2408 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2410 /* This is only valid for single tasks */
2411 if (pid <= 0 || tgid <= 0)
2414 return do_tkill(tgid, pid, sig);
2418 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2421 sys_tkill(int pid, int sig)
2423 /* This is only valid for single tasks */
2427 return do_tkill(0, pid, sig);
2431 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2435 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2438 /* Not even root can pretend to send signals from the kernel.
2439 Nor can they impersonate a kill(), which adds source info. */
2440 if (info.si_code >= 0)
2442 info.si_signo = sig;
2444 /* POSIX.1b doesn't mention process groups. */
2445 return kill_proc_info(sig, &info, pid);
2449 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2451 struct k_sigaction *k;
2454 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2457 k = ¤t->sighand->action[sig-1];
2459 spin_lock_irq(¤t->sighand->siglock);
2460 if (signal_pending(current)) {
2462 * If there might be a fatal signal pending on multiple
2463 * threads, make sure we take it before changing the action.
2465 spin_unlock_irq(¤t->sighand->siglock);
2466 return -ERESTARTNOINTR;
2473 sigdelsetmask(&act->sa.sa_mask,
2474 sigmask(SIGKILL) | sigmask(SIGSTOP));
2477 * "Setting a signal action to SIG_IGN for a signal that is
2478 * pending shall cause the pending signal to be discarded,
2479 * whether or not it is blocked."
2481 * "Setting a signal action to SIG_DFL for a signal that is
2482 * pending and whose default action is to ignore the signal
2483 * (for example, SIGCHLD), shall cause the pending signal to
2484 * be discarded, whether or not it is blocked"
2486 if (act->sa.sa_handler == SIG_IGN ||
2487 (act->sa.sa_handler == SIG_DFL &&
2488 sig_kernel_ignore(sig))) {
2490 * This is a fairly rare case, so we only take the
2491 * tasklist_lock once we're sure we'll need it.
2492 * Now we must do this little unlock and relock
2493 * dance to maintain the lock hierarchy.
2495 struct task_struct *t = current;
2496 spin_unlock_irq(&t->sighand->siglock);
2497 read_lock(&tasklist_lock);
2498 spin_lock_irq(&t->sighand->siglock);
2501 sigaddset(&mask, sig);
2502 rm_from_queue_full(&mask, &t->signal->shared_pending);
2504 rm_from_queue_full(&mask, &t->pending);
2505 recalc_sigpending_tsk(t);
2507 } while (t != current);
2508 spin_unlock_irq(¤t->sighand->siglock);
2509 read_unlock(&tasklist_lock);
2516 spin_unlock_irq(¤t->sighand->siglock);
2521 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2527 oss.ss_sp = (void __user *) current->sas_ss_sp;
2528 oss.ss_size = current->sas_ss_size;
2529 oss.ss_flags = sas_ss_flags(sp);
2538 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2539 || __get_user(ss_sp, &uss->ss_sp)
2540 || __get_user(ss_flags, &uss->ss_flags)
2541 || __get_user(ss_size, &uss->ss_size))
2545 if (on_sig_stack(sp))
2551 * Note - this code used to test ss_flags incorrectly
2552 * old code may have been written using ss_flags==0
2553 * to mean ss_flags==SS_ONSTACK (as this was the only
2554 * way that worked) - this fix preserves that older
2557 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2560 if (ss_flags == SS_DISABLE) {
2565 if (ss_size < MINSIGSTKSZ)
2569 current->sas_ss_sp = (unsigned long) ss_sp;
2570 current->sas_ss_size = ss_size;
2575 if (copy_to_user(uoss, &oss, sizeof(oss)))
2584 #ifdef __ARCH_WANT_SYS_SIGPENDING
2587 sys_sigpending(old_sigset_t __user *set)
2589 return do_sigpending(set, sizeof(*set));
2594 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2595 /* Some platforms have their own version with special arguments others
2596 support only sys_rt_sigprocmask. */
2599 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2602 old_sigset_t old_set, new_set;
2606 if (copy_from_user(&new_set, set, sizeof(*set)))
2608 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2610 spin_lock_irq(¤t->sighand->siglock);
2611 old_set = current->blocked.sig[0];
2619 sigaddsetmask(¤t->blocked, new_set);
2622 sigdelsetmask(¤t->blocked, new_set);
2625 current->blocked.sig[0] = new_set;
2629 recalc_sigpending();
2630 spin_unlock_irq(¤t->sighand->siglock);
2636 old_set = current->blocked.sig[0];
2639 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2646 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2648 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2650 sys_rt_sigaction(int sig,
2651 const struct sigaction __user *act,
2652 struct sigaction __user *oact,
2655 struct k_sigaction new_sa, old_sa;
2658 /* XXX: Don't preclude handling different sized sigset_t's. */
2659 if (sigsetsize != sizeof(sigset_t))
2663 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2667 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2670 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2676 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2678 #ifdef __ARCH_WANT_SYS_SGETMASK
2681 * For backwards compatibility. Functionality superseded by sigprocmask.
2687 return current->blocked.sig[0];
2691 sys_ssetmask(int newmask)
2695 spin_lock_irq(¤t->sighand->siglock);
2696 old = current->blocked.sig[0];
2698 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2700 recalc_sigpending();
2701 spin_unlock_irq(¤t->sighand->siglock);
2705 #endif /* __ARCH_WANT_SGETMASK */
2707 #ifdef __ARCH_WANT_SYS_SIGNAL
2709 * For backwards compatibility. Functionality superseded by sigaction.
2711 asmlinkage unsigned long
2712 sys_signal(int sig, __sighandler_t handler)
2714 struct k_sigaction new_sa, old_sa;
2717 new_sa.sa.sa_handler = handler;
2718 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2719 sigemptyset(&new_sa.sa.sa_mask);
2721 ret = do_sigaction(sig, &new_sa, &old_sa);
2723 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2725 #endif /* __ARCH_WANT_SYS_SIGNAL */
2727 #ifdef __ARCH_WANT_SYS_PAUSE
2732 current->state = TASK_INTERRUPTIBLE;
2734 return -ERESTARTNOHAND;
2739 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2740 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2744 /* XXX: Don't preclude handling different sized sigset_t's. */
2745 if (sigsetsize != sizeof(sigset_t))
2748 if (copy_from_user(&newset, unewset, sizeof(newset)))
2750 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2752 spin_lock_irq(¤t->sighand->siglock);
2753 current->saved_sigmask = current->blocked;
2754 current->blocked = newset;
2755 recalc_sigpending();
2756 spin_unlock_irq(¤t->sighand->siglock);
2758 current->state = TASK_INTERRUPTIBLE;
2760 set_thread_flag(TIF_RESTORE_SIGMASK);
2761 return -ERESTARTNOHAND;
2763 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2765 void __init signals_init(void)
2768 kmem_cache_create("sigqueue",
2769 sizeof(struct sigqueue),
2770 __alignof__(struct sigqueue),
2771 SLAB_PANIC, NULL, NULL);