2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <asm/param.h>
28 #include <asm/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/siginfo.h>
33 * SLAB caches for signal bits.
36 static kmem_cache_t *sigqueue_cachep;
39 * In POSIX a signal is sent either to a specific thread (Linux task)
40 * or to the process as a whole (Linux thread group). How the signal
41 * is sent determines whether it's to one thread or the whole group,
42 * which determines which signal mask(s) are involved in blocking it
43 * from being delivered until later. When the signal is delivered,
44 * either it's caught or ignored by a user handler or it has a default
45 * effect that applies to the whole thread group (POSIX process).
47 * The possible effects an unblocked signal set to SIG_DFL can have are:
48 * ignore - Nothing Happens
49 * terminate - kill the process, i.e. all threads in the group,
50 * similar to exit_group. The group leader (only) reports
51 * WIFSIGNALED status to its parent.
52 * coredump - write a core dump file describing all threads using
53 * the same mm and then kill all those threads
54 * stop - stop all the threads in the group, i.e. TASK_STOPPED state
56 * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
57 * Other signals when not blocked and set to SIG_DFL behaves as follows.
58 * The job control signals also have other special effects.
60 * +--------------------+------------------+
61 * | POSIX signal | default action |
62 * +--------------------+------------------+
63 * | SIGHUP | terminate |
64 * | SIGINT | terminate |
65 * | SIGQUIT | coredump |
66 * | SIGILL | coredump |
67 * | SIGTRAP | coredump |
68 * | SIGABRT/SIGIOT | coredump |
69 * | SIGBUS | coredump |
70 * | SIGFPE | coredump |
71 * | SIGKILL | terminate(+) |
72 * | SIGUSR1 | terminate |
73 * | SIGSEGV | coredump |
74 * | SIGUSR2 | terminate |
75 * | SIGPIPE | terminate |
76 * | SIGALRM | terminate |
77 * | SIGTERM | terminate |
78 * | SIGCHLD | ignore |
79 * | SIGCONT | ignore(*) |
80 * | SIGSTOP | stop(*)(+) |
81 * | SIGTSTP | stop(*) |
82 * | SIGTTIN | stop(*) |
83 * | SIGTTOU | stop(*) |
85 * | SIGXCPU | coredump |
86 * | SIGXFSZ | coredump |
87 * | SIGVTALRM | terminate |
88 * | SIGPROF | terminate |
89 * | SIGPOLL/SIGIO | terminate |
90 * | SIGSYS/SIGUNUSED | coredump |
91 * | SIGSTKFLT | terminate |
92 * | SIGWINCH | ignore |
93 * | SIGPWR | terminate |
94 * | SIGRTMIN-SIGRTMAX | terminate |
95 * +--------------------+------------------+
96 * | non-POSIX signal | default action |
97 * +--------------------+------------------+
98 * | SIGEMT | coredump |
99 * +--------------------+------------------+
101 * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
102 * (*) Special job control effects:
103 * When SIGCONT is sent, it resumes the process (all threads in the group)
104 * from TASK_STOPPED state and also clears any pending/queued stop signals
105 * (any of those marked with "stop(*)"). This happens regardless of blocking,
106 * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
107 * any pending/queued SIGCONT signals; this happens regardless of blocking,
108 * catching, or ignored the stop signal, though (except for SIGSTOP) the
109 * default action of stopping the process may happen later or never.
113 #define M_SIGEMT M(SIGEMT)
118 #if SIGRTMIN > BITS_PER_LONG
119 #define M(sig) (1ULL << ((sig)-1))
121 #define M(sig) (1UL << ((sig)-1))
123 #define T(sig, mask) (M(sig) & (mask))
125 #define SIG_KERNEL_ONLY_MASK (\
126 M(SIGKILL) | M(SIGSTOP) )
128 #define SIG_KERNEL_STOP_MASK (\
129 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
131 #define SIG_KERNEL_COREDUMP_MASK (\
132 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
133 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
134 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
136 #define SIG_KERNEL_IGNORE_MASK (\
137 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
139 #define sig_kernel_only(sig) \
140 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
141 #define sig_kernel_coredump(sig) \
142 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
143 #define sig_kernel_ignore(sig) \
144 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
145 #define sig_kernel_stop(sig) \
146 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
148 #define sig_user_defined(t, signr) \
149 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
150 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
152 #define sig_fatal(t, signr) \
153 (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
154 (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
156 static int sig_ignored(struct task_struct *t, int sig)
158 void __user * handler;
161 * Tracers always want to know about signals..
163 if (t->ptrace & PT_PTRACED)
167 * Blocked signals are never ignored, since the
168 * signal handler may change by the time it is
171 if (sigismember(&t->blocked, sig))
174 /* Is it explicitly or implicitly ignored? */
175 handler = t->sighand->action[sig-1].sa.sa_handler;
176 return handler == SIG_IGN ||
177 (handler == SIG_DFL && sig_kernel_ignore(sig));
181 * Re-calculate pending state from the set of locally pending
182 * signals, globally pending signals, and blocked signals.
184 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
189 switch (_NSIG_WORDS) {
191 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
192 ready |= signal->sig[i] &~ blocked->sig[i];
195 case 4: ready = signal->sig[3] &~ blocked->sig[3];
196 ready |= signal->sig[2] &~ blocked->sig[2];
197 ready |= signal->sig[1] &~ blocked->sig[1];
198 ready |= signal->sig[0] &~ blocked->sig[0];
201 case 2: ready = signal->sig[1] &~ blocked->sig[1];
202 ready |= signal->sig[0] &~ blocked->sig[0];
205 case 1: ready = signal->sig[0] &~ blocked->sig[0];
210 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
212 fastcall void recalc_sigpending_tsk(struct task_struct *t)
214 if (t->signal->group_stop_count > 0 ||
215 PENDING(&t->pending, &t->blocked) ||
216 PENDING(&t->signal->shared_pending, &t->blocked))
217 set_tsk_thread_flag(t, TIF_SIGPENDING);
219 clear_tsk_thread_flag(t, TIF_SIGPENDING);
222 void recalc_sigpending(void)
224 recalc_sigpending_tsk(current);
227 /* Given the mask, find the first available signal that should be serviced. */
230 next_signal(struct sigpending *pending, sigset_t *mask)
232 unsigned long i, *s, *m, x;
235 s = pending->signal.sig;
237 switch (_NSIG_WORDS) {
239 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
240 if ((x = *s &~ *m) != 0) {
241 sig = ffz(~x) + i*_NSIG_BPW + 1;
246 case 2: if ((x = s[0] &~ m[0]) != 0)
248 else if ((x = s[1] &~ m[1]) != 0)
255 case 1: if ((x = *s &~ *m) != 0)
263 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
266 struct sigqueue *q = NULL;
268 atomic_inc(&t->user->sigpending);
269 if (override_rlimit ||
270 atomic_read(&t->user->sigpending) <=
271 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
272 q = kmem_cache_alloc(sigqueue_cachep, flags);
273 if (unlikely(q == NULL)) {
274 atomic_dec(&t->user->sigpending);
276 INIT_LIST_HEAD(&q->list);
279 q->user = get_uid(t->user);
284 static inline void __sigqueue_free(struct sigqueue *q)
286 if (q->flags & SIGQUEUE_PREALLOC)
288 atomic_dec(&q->user->sigpending);
290 kmem_cache_free(sigqueue_cachep, q);
293 static void flush_sigqueue(struct sigpending *queue)
297 sigemptyset(&queue->signal);
298 while (!list_empty(&queue->list)) {
299 q = list_entry(queue->list.next, struct sigqueue , list);
300 list_del_init(&q->list);
306 * Flush all pending signals for a task.
310 flush_signals(struct task_struct *t)
314 spin_lock_irqsave(&t->sighand->siglock, flags);
315 clear_tsk_thread_flag(t,TIF_SIGPENDING);
316 flush_sigqueue(&t->pending);
317 flush_sigqueue(&t->signal->shared_pending);
318 spin_unlock_irqrestore(&t->sighand->siglock, flags);
322 * This function expects the tasklist_lock write-locked.
324 void __exit_sighand(struct task_struct *tsk)
326 struct sighand_struct * sighand = tsk->sighand;
328 /* Ok, we're done with the signal handlers */
330 if (atomic_dec_and_test(&sighand->count))
331 kmem_cache_free(sighand_cachep, sighand);
334 void exit_sighand(struct task_struct *tsk)
336 write_lock_irq(&tasklist_lock);
338 write_unlock_irq(&tasklist_lock);
342 * This function expects the tasklist_lock write-locked.
344 void __exit_signal(struct task_struct *tsk)
346 struct signal_struct * sig = tsk->signal;
347 struct sighand_struct * sighand = tsk->sighand;
351 if (!atomic_read(&sig->count))
353 spin_lock(&sighand->siglock);
354 posix_cpu_timers_exit(tsk);
355 if (atomic_dec_and_test(&sig->count)) {
356 posix_cpu_timers_exit_group(tsk);
357 if (tsk == sig->curr_target)
358 sig->curr_target = next_thread(tsk);
360 spin_unlock(&sighand->siglock);
361 flush_sigqueue(&sig->shared_pending);
364 * If there is any task waiting for the group exit
367 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
368 wake_up_process(sig->group_exit_task);
369 sig->group_exit_task = NULL;
371 if (tsk == sig->curr_target)
372 sig->curr_target = next_thread(tsk);
375 * Accumulate here the counters for all threads but the
376 * group leader as they die, so they can be added into
377 * the process-wide totals when those are taken.
378 * The group leader stays around as a zombie as long
379 * as there are other threads. When it gets reaped,
380 * the exit.c code will add its counts into these totals.
381 * We won't ever get here for the group leader, since it
382 * will have been the last reference on the signal_struct.
384 sig->utime = cputime_add(sig->utime, tsk->utime);
385 sig->stime = cputime_add(sig->stime, tsk->stime);
386 sig->min_flt += tsk->min_flt;
387 sig->maj_flt += tsk->maj_flt;
388 sig->nvcsw += tsk->nvcsw;
389 sig->nivcsw += tsk->nivcsw;
390 sig->sched_time += tsk->sched_time;
391 spin_unlock(&sighand->siglock);
392 sig = NULL; /* Marker for below. */
394 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
395 flush_sigqueue(&tsk->pending);
398 * We are cleaning up the signal_struct here. We delayed
399 * calling exit_itimers until after flush_sigqueue, just in
400 * case our thread-local pending queue contained a queued
401 * timer signal that would have been cleared in
402 * exit_itimers. When that called sigqueue_free, it would
403 * attempt to re-take the tasklist_lock and deadlock. This
404 * can never happen if we ensure that all queues the
405 * timer's signal might be queued on have been flushed
406 * first. The shared_pending queue, and our own pending
407 * queue are the only queues the timer could be on, since
408 * there are no other threads left in the group and timer
409 * signals are constrained to threads inside the group.
412 exit_thread_group_keys(sig);
413 kmem_cache_free(signal_cachep, sig);
417 void exit_signal(struct task_struct *tsk)
419 write_lock_irq(&tasklist_lock);
421 write_unlock_irq(&tasklist_lock);
425 * Flush all handlers for a task.
429 flush_signal_handlers(struct task_struct *t, int force_default)
432 struct k_sigaction *ka = &t->sighand->action[0];
433 for (i = _NSIG ; i != 0 ; i--) {
434 if (force_default || ka->sa.sa_handler != SIG_IGN)
435 ka->sa.sa_handler = SIG_DFL;
437 sigemptyset(&ka->sa.sa_mask);
443 /* Notify the system that a driver wants to block all signals for this
444 * process, and wants to be notified if any signals at all were to be
445 * sent/acted upon. If the notifier routine returns non-zero, then the
446 * signal will be acted upon after all. If the notifier routine returns 0,
447 * then then signal will be blocked. Only one block per process is
448 * allowed. priv is a pointer to private data that the notifier routine
449 * can use to determine if the signal should be blocked or not. */
452 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
456 spin_lock_irqsave(¤t->sighand->siglock, flags);
457 current->notifier_mask = mask;
458 current->notifier_data = priv;
459 current->notifier = notifier;
460 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
463 /* Notify the system that blocking has ended. */
466 unblock_all_signals(void)
470 spin_lock_irqsave(¤t->sighand->siglock, flags);
471 current->notifier = NULL;
472 current->notifier_data = NULL;
474 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
477 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
479 struct sigqueue *q, *first = NULL;
480 int still_pending = 0;
482 if (unlikely(!sigismember(&list->signal, sig)))
486 * Collect the siginfo appropriate to this signal. Check if
487 * there is another siginfo for the same signal.
489 list_for_each_entry(q, &list->list, list) {
490 if (q->info.si_signo == sig) {
499 list_del_init(&first->list);
500 copy_siginfo(info, &first->info);
501 __sigqueue_free(first);
503 sigdelset(&list->signal, sig);
506 /* Ok, it wasn't in the queue. This must be
507 a fast-pathed signal or we must have been
508 out of queue space. So zero out the info.
510 sigdelset(&list->signal, sig);
511 info->si_signo = sig;
520 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
525 /* SIGKILL must have priority, otherwise it is quite easy
526 * to create an unkillable process, sending sig < SIGKILL
528 if (unlikely(sigismember(&pending->signal, SIGKILL))) {
529 if (!sigismember(mask, SIGKILL))
534 sig = next_signal(pending, mask);
536 if (current->notifier) {
537 if (sigismember(current->notifier_mask, sig)) {
538 if (!(current->notifier)(current->notifier_data)) {
539 clear_thread_flag(TIF_SIGPENDING);
545 if (!collect_signal(sig, pending, info))
555 * Dequeue a signal and return the element to the caller, which is
556 * expected to free it.
558 * All callers have to hold the siglock.
560 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
562 int signr = __dequeue_signal(&tsk->pending, mask, info);
564 signr = __dequeue_signal(&tsk->signal->shared_pending,
566 if (signr && unlikely(sig_kernel_stop(signr))) {
568 * Set a marker that we have dequeued a stop signal. Our
569 * caller might release the siglock and then the pending
570 * stop signal it is about to process is no longer in the
571 * pending bitmasks, but must still be cleared by a SIGCONT
572 * (and overruled by a SIGKILL). So those cases clear this
573 * shared flag after we've set it. Note that this flag may
574 * remain set after the signal we return is ignored or
575 * handled. That doesn't matter because its only purpose
576 * is to alert stop-signal processing code when another
577 * processor has come along and cleared the flag.
579 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
582 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
583 info->si_sys_private){
585 * Release the siglock to ensure proper locking order
586 * of timer locks outside of siglocks. Note, we leave
587 * irqs disabled here, since the posix-timers code is
588 * about to disable them again anyway.
590 spin_unlock(&tsk->sighand->siglock);
591 do_schedule_next_timer(info);
592 spin_lock(&tsk->sighand->siglock);
598 * Tell a process that it has a new active signal..
600 * NOTE! we rely on the previous spin_lock to
601 * lock interrupts for us! We can only be called with
602 * "siglock" held, and the local interrupt must
603 * have been disabled when that got acquired!
605 * No need to set need_resched since signal event passing
606 * goes through ->blocked
608 void signal_wake_up(struct task_struct *t, int resume)
612 set_tsk_thread_flag(t, TIF_SIGPENDING);
615 * For SIGKILL, we want to wake it up in the stopped/traced case.
616 * We don't check t->state here because there is a race with it
617 * executing another processor and just now entering stopped state.
618 * By using wake_up_state, we ensure the process will wake up and
619 * handle its death signal.
621 mask = TASK_INTERRUPTIBLE;
623 mask |= TASK_STOPPED | TASK_TRACED;
624 if (!wake_up_state(t, mask))
629 * Remove signals in mask from the pending set and queue.
630 * Returns 1 if any signals were found.
632 * All callers must be holding the siglock.
634 static int rm_from_queue(unsigned long mask, struct sigpending *s)
636 struct sigqueue *q, *n;
638 if (!sigtestsetmask(&s->signal, mask))
641 sigdelsetmask(&s->signal, mask);
642 list_for_each_entry_safe(q, n, &s->list, list) {
643 if (q->info.si_signo < SIGRTMIN &&
644 (mask & sigmask(q->info.si_signo))) {
645 list_del_init(&q->list);
653 * Bad permissions for sending the signal
655 static int check_kill_permission(int sig, struct siginfo *info,
656 struct task_struct *t)
661 if (!valid_signal(sig))
664 user = (!info || ((unsigned long)info != 1 &&
665 (unsigned long)info != 2 && SI_FROMUSER(info)));
668 if (user && ((sig != SIGCONT) ||
669 (current->signal->session != t->signal->session))
670 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
671 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
672 && !capable(CAP_KILL))
676 if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
679 return security_task_kill(t, info, sig);
683 static void do_notify_parent_cldstop(struct task_struct *tsk,
684 struct task_struct *parent,
688 * Handle magic process-wide effects of stop/continue signals.
689 * Unlike the signal actions, these happen immediately at signal-generation
690 * time regardless of blocking, ignoring, or handling. This does the
691 * actual continuing for SIGCONT, but not the actual stopping for stop
692 * signals. The process stop is done as a signal action for SIG_DFL.
694 static void handle_stop_signal(int sig, struct task_struct *p)
696 struct task_struct *t;
698 if (p->flags & SIGNAL_GROUP_EXIT)
700 * The process is in the middle of dying already.
704 if (sig_kernel_stop(sig)) {
706 * This is a stop signal. Remove SIGCONT from all queues.
708 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
711 rm_from_queue(sigmask(SIGCONT), &t->pending);
714 } else if (sig == SIGCONT) {
716 * Remove all stop signals from all queues,
717 * and wake all threads.
719 if (unlikely(p->signal->group_stop_count > 0)) {
721 * There was a group stop in progress. We'll
722 * pretend it finished before we got here. We are
723 * obliged to report it to the parent: if the
724 * SIGSTOP happened "after" this SIGCONT, then it
725 * would have cleared this pending SIGCONT. If it
726 * happened "before" this SIGCONT, then the parent
727 * got the SIGCHLD about the stop finishing before
728 * the continue happened. We do the notification
729 * now, and it's as if the stop had finished and
730 * the SIGCHLD was pending on entry to this kill.
732 p->signal->group_stop_count = 0;
733 p->signal->flags = SIGNAL_STOP_CONTINUED;
734 spin_unlock(&p->sighand->siglock);
735 if (p->ptrace & PT_PTRACED)
736 do_notify_parent_cldstop(p, p->parent,
739 do_notify_parent_cldstop(
741 p->group_leader->real_parent,
743 spin_lock(&p->sighand->siglock);
745 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
749 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
752 * If there is a handler for SIGCONT, we must make
753 * sure that no thread returns to user mode before
754 * we post the signal, in case it was the only
755 * thread eligible to run the signal handler--then
756 * it must not do anything between resuming and
757 * running the handler. With the TIF_SIGPENDING
758 * flag set, the thread will pause and acquire the
759 * siglock that we hold now and until we've queued
760 * the pending signal.
762 * Wake up the stopped thread _after_ setting
765 state = TASK_STOPPED;
766 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
767 set_tsk_thread_flag(t, TIF_SIGPENDING);
768 state |= TASK_INTERRUPTIBLE;
770 wake_up_state(t, state);
775 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
777 * We were in fact stopped, and are now continued.
778 * Notify the parent with CLD_CONTINUED.
780 p->signal->flags = SIGNAL_STOP_CONTINUED;
781 p->signal->group_exit_code = 0;
782 spin_unlock(&p->sighand->siglock);
783 if (p->ptrace & PT_PTRACED)
784 do_notify_parent_cldstop(p, p->parent,
787 do_notify_parent_cldstop(
789 p->group_leader->real_parent,
791 spin_lock(&p->sighand->siglock);
794 * We are not stopped, but there could be a stop
795 * signal in the middle of being processed after
796 * being removed from the queue. Clear that too.
798 p->signal->flags = 0;
800 } else if (sig == SIGKILL) {
802 * Make sure that any pending stop signal already dequeued
803 * is undone by the wakeup for SIGKILL.
805 p->signal->flags = 0;
809 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
810 struct sigpending *signals)
812 struct sigqueue * q = NULL;
816 * fast-pathed signals for kernel-internal things like SIGSTOP
819 if ((unsigned long)info == 2)
822 /* Real-time signals must be queued if sent by sigqueue, or
823 some other real-time mechanism. It is implementation
824 defined whether kill() does so. We attempt to do so, on
825 the principle of least surprise, but since kill is not
826 allowed to fail with EAGAIN when low on memory we just
827 make sure at least one signal gets delivered and don't
828 pass on the info struct. */
830 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
831 ((unsigned long) info < 2 ||
832 info->si_code >= 0)));
834 list_add_tail(&q->list, &signals->list);
835 switch ((unsigned long) info) {
837 q->info.si_signo = sig;
838 q->info.si_errno = 0;
839 q->info.si_code = SI_USER;
840 q->info.si_pid = current->pid;
841 q->info.si_uid = current->uid;
844 q->info.si_signo = sig;
845 q->info.si_errno = 0;
846 q->info.si_code = SI_KERNEL;
851 copy_siginfo(&q->info, info);
855 if (sig >= SIGRTMIN && info && (unsigned long)info != 1
856 && info->si_code != SI_USER)
858 * Queue overflow, abort. We may abort if the signal was rt
859 * and sent by user using something other than kill().
862 if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
864 * Set up a return to indicate that we dropped
867 ret = info->si_sys_private;
871 sigaddset(&signals->signal, sig);
875 #define LEGACY_QUEUE(sigptr, sig) \
876 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
880 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
884 if (!irqs_disabled())
886 assert_spin_locked(&t->sighand->siglock);
888 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
890 * Set up a return to indicate that we dropped the signal.
892 ret = info->si_sys_private;
894 /* Short-circuit ignored signals. */
895 if (sig_ignored(t, sig))
898 /* Support queueing exactly one non-rt signal, so that we
899 can get more detailed information about the cause of
901 if (LEGACY_QUEUE(&t->pending, sig))
904 ret = send_signal(sig, info, t, &t->pending);
905 if (!ret && !sigismember(&t->blocked, sig))
906 signal_wake_up(t, sig == SIGKILL);
912 * Force a signal that the process can't ignore: if necessary
913 * we unblock the signal and change any SIG_IGN to SIG_DFL.
917 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
919 unsigned long int flags;
922 spin_lock_irqsave(&t->sighand->siglock, flags);
923 if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
924 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
925 sigdelset(&t->blocked, sig);
926 recalc_sigpending_tsk(t);
928 ret = specific_send_sig_info(sig, info, t);
929 spin_unlock_irqrestore(&t->sighand->siglock, flags);
935 force_sig_specific(int sig, struct task_struct *t)
937 unsigned long int flags;
939 spin_lock_irqsave(&t->sighand->siglock, flags);
940 if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
941 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
942 sigdelset(&t->blocked, sig);
943 recalc_sigpending_tsk(t);
944 specific_send_sig_info(sig, (void *)2, t);
945 spin_unlock_irqrestore(&t->sighand->siglock, flags);
949 * Test if P wants to take SIG. After we've checked all threads with this,
950 * it's equivalent to finding no threads not blocking SIG. Any threads not
951 * blocking SIG were ruled out because they are not running and already
952 * have pending signals. Such threads will dequeue from the shared queue
953 * as soon as they're available, so putting the signal on the shared queue
954 * will be equivalent to sending it to one such thread.
956 #define wants_signal(sig, p, mask) \
957 (!sigismember(&(p)->blocked, sig) \
958 && !((p)->state & mask) \
959 && !((p)->flags & PF_EXITING) \
960 && (task_curr(p) || !signal_pending(p)))
964 __group_complete_signal(int sig, struct task_struct *p)
967 struct task_struct *t;
970 * Don't bother traced and stopped tasks (but
971 * SIGKILL will punch through that).
973 mask = TASK_STOPPED | TASK_TRACED;
978 * Now find a thread we can wake up to take the signal off the queue.
980 * If the main thread wants the signal, it gets first crack.
981 * Probably the least surprising to the average bear.
983 if (wants_signal(sig, p, mask))
985 else if (thread_group_empty(p))
987 * There is just one thread and it does not need to be woken.
988 * It will dequeue unblocked signals before it runs again.
993 * Otherwise try to find a suitable thread.
995 t = p->signal->curr_target;
997 /* restart balancing at this thread */
998 t = p->signal->curr_target = p;
999 BUG_ON(t->tgid != p->tgid);
1001 while (!wants_signal(sig, t, mask)) {
1003 if (t == p->signal->curr_target)
1005 * No thread needs to be woken.
1006 * Any eligible threads will see
1007 * the signal in the queue soon.
1011 p->signal->curr_target = t;
1015 * Found a killable thread. If the signal will be fatal,
1016 * then start taking the whole group down immediately.
1018 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1019 !sigismember(&t->real_blocked, sig) &&
1020 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1022 * This signal will be fatal to the whole group.
1024 if (!sig_kernel_coredump(sig)) {
1026 * Start a group exit and wake everybody up.
1027 * This way we don't have other threads
1028 * running and doing things after a slower
1029 * thread has the fatal signal pending.
1031 p->signal->flags = SIGNAL_GROUP_EXIT;
1032 p->signal->group_exit_code = sig;
1033 p->signal->group_stop_count = 0;
1036 sigaddset(&t->pending.signal, SIGKILL);
1037 signal_wake_up(t, 1);
1044 * There will be a core dump. We make all threads other
1045 * than the chosen one go into a group stop so that nothing
1046 * happens until it gets scheduled, takes the signal off
1047 * the shared queue, and does the core dump. This is a
1048 * little more complicated than strictly necessary, but it
1049 * keeps the signal state that winds up in the core dump
1050 * unchanged from the death state, e.g. which thread had
1051 * the core-dump signal unblocked.
1053 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1054 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1055 p->signal->group_stop_count = 0;
1056 p->signal->group_exit_task = t;
1059 p->signal->group_stop_count++;
1060 signal_wake_up(t, 0);
1063 wake_up_process(p->signal->group_exit_task);
1068 * The signal is already in the shared-pending queue.
1069 * Tell the chosen thread to wake up and dequeue it.
1071 signal_wake_up(t, sig == SIGKILL);
1076 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1080 assert_spin_locked(&p->sighand->siglock);
1081 handle_stop_signal(sig, p);
1083 if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
1085 * Set up a return to indicate that we dropped the signal.
1087 ret = info->si_sys_private;
1089 /* Short-circuit ignored signals. */
1090 if (sig_ignored(p, sig))
1093 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1094 /* This is a non-RT signal and we already have one queued. */
1098 * Put this signal on the shared-pending queue, or fail with EAGAIN.
1099 * We always use the shared queue for process-wide signals,
1100 * to avoid several races.
1102 ret = send_signal(sig, info, p, &p->signal->shared_pending);
1106 __group_complete_signal(sig, p);
1111 * Nuke all other threads in the group.
1113 void zap_other_threads(struct task_struct *p)
1115 struct task_struct *t;
1117 p->signal->flags = SIGNAL_GROUP_EXIT;
1118 p->signal->group_stop_count = 0;
1120 if (thread_group_empty(p))
1123 for (t = next_thread(p); t != p; t = next_thread(t)) {
1125 * Don't bother with already dead threads
1131 * We don't want to notify the parent, since we are
1132 * killed as part of a thread group due to another
1133 * thread doing an execve() or similar. So set the
1134 * exit signal to -1 to allow immediate reaping of
1135 * the process. But don't detach the thread group
1138 if (t != p->group_leader)
1139 t->exit_signal = -1;
1141 sigaddset(&t->pending.signal, SIGKILL);
1142 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1143 signal_wake_up(t, 1);
1148 * Must be called with the tasklist_lock held for reading!
1150 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1152 unsigned long flags;
1155 ret = check_kill_permission(sig, info, p);
1156 if (!ret && sig && p->sighand) {
1157 spin_lock_irqsave(&p->sighand->siglock, flags);
1158 ret = __group_send_sig_info(sig, info, p);
1159 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1166 * kill_pg_info() sends a signal to a process group: this is what the tty
1167 * control characters do (^C, ^Z etc)
1170 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1172 struct task_struct *p = NULL;
1173 int retval, success;
1180 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1181 int err = group_send_sig_info(sig, info, p);
1184 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1185 return success ? 0 : retval;
1189 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1193 read_lock(&tasklist_lock);
1194 retval = __kill_pg_info(sig, info, pgrp);
1195 read_unlock(&tasklist_lock);
1201 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1204 struct task_struct *p;
1206 read_lock(&tasklist_lock);
1207 p = find_task_by_pid(pid);
1210 error = group_send_sig_info(sig, info, p);
1211 read_unlock(&tasklist_lock);
1217 * kill_something_info() interprets pid in interesting ways just like kill(2).
1219 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1220 * is probably wrong. Should make it like BSD or SYSV.
1223 static int kill_something_info(int sig, struct siginfo *info, int pid)
1226 return kill_pg_info(sig, info, process_group(current));
1227 } else if (pid == -1) {
1228 int retval = 0, count = 0;
1229 struct task_struct * p;
1231 read_lock(&tasklist_lock);
1232 for_each_process(p) {
1233 if (p->pid > 1 && p->tgid != current->tgid) {
1234 int err = group_send_sig_info(sig, info, p);
1240 read_unlock(&tasklist_lock);
1241 return count ? retval : -ESRCH;
1242 } else if (pid < 0) {
1243 return kill_pg_info(sig, info, -pid);
1245 return kill_proc_info(sig, info, pid);
1250 * These are for backward compatibility with the rest of the kernel source.
1254 * These two are the most common entry points. They send a signal
1255 * just to the specific thread.
1258 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1261 unsigned long flags;
1264 * Make sure legacy kernel users don't send in bad values
1265 * (normal paths check this in check_kill_permission).
1267 if (!valid_signal(sig))
1271 * We need the tasklist lock even for the specific
1272 * thread case (when we don't need to follow the group
1273 * lists) in order to avoid races with "p->sighand"
1274 * going away or changing from under us.
1276 read_lock(&tasklist_lock);
1277 spin_lock_irqsave(&p->sighand->siglock, flags);
1278 ret = specific_send_sig_info(sig, info, p);
1279 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1280 read_unlock(&tasklist_lock);
1285 send_sig(int sig, struct task_struct *p, int priv)
1287 return send_sig_info(sig, (void*)(long)(priv != 0), p);
1291 * This is the entry point for "process-wide" signals.
1292 * They will go to an appropriate thread in the thread group.
1295 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1298 read_lock(&tasklist_lock);
1299 ret = group_send_sig_info(sig, info, p);
1300 read_unlock(&tasklist_lock);
1305 force_sig(int sig, struct task_struct *p)
1307 force_sig_info(sig, (void*)1L, p);
1311 * When things go south during signal handling, we
1312 * will force a SIGSEGV. And if the signal that caused
1313 * the problem was already a SIGSEGV, we'll want to
1314 * make sure we don't even try to deliver the signal..
1317 force_sigsegv(int sig, struct task_struct *p)
1319 if (sig == SIGSEGV) {
1320 unsigned long flags;
1321 spin_lock_irqsave(&p->sighand->siglock, flags);
1322 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1323 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1325 force_sig(SIGSEGV, p);
1330 kill_pg(pid_t pgrp, int sig, int priv)
1332 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
1336 kill_proc(pid_t pid, int sig, int priv)
1338 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
1342 * These functions support sending signals using preallocated sigqueue
1343 * structures. This is needed "because realtime applications cannot
1344 * afford to lose notifications of asynchronous events, like timer
1345 * expirations or I/O completions". In the case of Posix Timers
1346 * we allocate the sigqueue structure from the timer_create. If this
1347 * allocation fails we are able to report the failure to the application
1348 * with an EAGAIN error.
1351 struct sigqueue *sigqueue_alloc(void)
1355 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1356 q->flags |= SIGQUEUE_PREALLOC;
1360 void sigqueue_free(struct sigqueue *q)
1362 unsigned long flags;
1363 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1365 * If the signal is still pending remove it from the
1368 if (unlikely(!list_empty(&q->list))) {
1369 read_lock(&tasklist_lock);
1370 spin_lock_irqsave(q->lock, flags);
1371 if (!list_empty(&q->list))
1372 list_del_init(&q->list);
1373 spin_unlock_irqrestore(q->lock, flags);
1374 read_unlock(&tasklist_lock);
1376 q->flags &= ~SIGQUEUE_PREALLOC;
1381 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1383 unsigned long flags;
1387 * We need the tasklist lock even for the specific
1388 * thread case (when we don't need to follow the group
1389 * lists) in order to avoid races with "p->sighand"
1390 * going away or changing from under us.
1392 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1393 read_lock(&tasklist_lock);
1394 spin_lock_irqsave(&p->sighand->siglock, flags);
1396 if (unlikely(!list_empty(&q->list))) {
1398 * If an SI_TIMER entry is already queue just increment
1399 * the overrun count.
1401 if (q->info.si_code != SI_TIMER)
1403 q->info.si_overrun++;
1406 /* Short-circuit ignored signals. */
1407 if (sig_ignored(p, sig)) {
1412 q->lock = &p->sighand->siglock;
1413 list_add_tail(&q->list, &p->pending.list);
1414 sigaddset(&p->pending.signal, sig);
1415 if (!sigismember(&p->blocked, sig))
1416 signal_wake_up(p, sig == SIGKILL);
1419 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1420 read_unlock(&tasklist_lock);
1425 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1427 unsigned long flags;
1430 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431 read_lock(&tasklist_lock);
1432 spin_lock_irqsave(&p->sighand->siglock, flags);
1433 handle_stop_signal(sig, p);
1435 /* Short-circuit ignored signals. */
1436 if (sig_ignored(p, sig)) {
1441 if (unlikely(!list_empty(&q->list))) {
1443 * If an SI_TIMER entry is already queue just increment
1444 * the overrun count. Other uses should not try to
1445 * send the signal multiple times.
1447 if (q->info.si_code != SI_TIMER)
1449 q->info.si_overrun++;
1454 * Put this signal on the shared-pending queue.
1455 * We always use the shared queue for process-wide signals,
1456 * to avoid several races.
1458 q->lock = &p->sighand->siglock;
1459 list_add_tail(&q->list, &p->signal->shared_pending.list);
1460 sigaddset(&p->signal->shared_pending.signal, sig);
1462 __group_complete_signal(sig, p);
1464 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1465 read_unlock(&tasklist_lock);
1470 * Wake up any threads in the parent blocked in wait* syscalls.
1472 static inline void __wake_up_parent(struct task_struct *p,
1473 struct task_struct *parent)
1475 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1479 * Let a parent know about the death of a child.
1480 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1483 void do_notify_parent(struct task_struct *tsk, int sig)
1485 struct siginfo info;
1486 unsigned long flags;
1487 struct sighand_struct *psig;
1491 /* do_notify_parent_cldstop should have been called instead. */
1492 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1494 BUG_ON(!tsk->ptrace &&
1495 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1497 info.si_signo = sig;
1499 info.si_pid = tsk->pid;
1500 info.si_uid = tsk->uid;
1502 /* FIXME: find out whether or not this is supposed to be c*time. */
1503 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1504 tsk->signal->utime));
1505 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1506 tsk->signal->stime));
1508 info.si_status = tsk->exit_code & 0x7f;
1509 if (tsk->exit_code & 0x80)
1510 info.si_code = CLD_DUMPED;
1511 else if (tsk->exit_code & 0x7f)
1512 info.si_code = CLD_KILLED;
1514 info.si_code = CLD_EXITED;
1515 info.si_status = tsk->exit_code >> 8;
1518 psig = tsk->parent->sighand;
1519 spin_lock_irqsave(&psig->siglock, flags);
1520 if (sig == SIGCHLD &&
1521 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1522 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1524 * We are exiting and our parent doesn't care. POSIX.1
1525 * defines special semantics for setting SIGCHLD to SIG_IGN
1526 * or setting the SA_NOCLDWAIT flag: we should be reaped
1527 * automatically and not left for our parent's wait4 call.
1528 * Rather than having the parent do it as a magic kind of
1529 * signal handler, we just set this to tell do_exit that we
1530 * can be cleaned up without becoming a zombie. Note that
1531 * we still call __wake_up_parent in this case, because a
1532 * blocked sys_wait4 might now return -ECHILD.
1534 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1535 * is implementation-defined: we do (if you don't want
1536 * it, just use SIG_IGN instead).
1538 tsk->exit_signal = -1;
1539 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1542 if (valid_signal(sig) && sig > 0)
1543 __group_send_sig_info(sig, &info, tsk->parent);
1544 __wake_up_parent(tsk, tsk->parent);
1545 spin_unlock_irqrestore(&psig->siglock, flags);
1549 do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1552 struct siginfo info;
1553 unsigned long flags;
1554 struct sighand_struct *sighand;
1556 info.si_signo = SIGCHLD;
1558 info.si_pid = tsk->pid;
1559 info.si_uid = tsk->uid;
1561 /* FIXME: find out whether or not this is supposed to be c*time. */
1562 info.si_utime = cputime_to_jiffies(tsk->utime);
1563 info.si_stime = cputime_to_jiffies(tsk->stime);
1568 info.si_status = SIGCONT;
1571 info.si_status = tsk->signal->group_exit_code & 0x7f;
1574 info.si_status = tsk->exit_code & 0x7f;
1580 sighand = parent->sighand;
1581 spin_lock_irqsave(&sighand->siglock, flags);
1582 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1583 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1584 __group_send_sig_info(SIGCHLD, &info, parent);
1586 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1588 __wake_up_parent(tsk, parent);
1589 spin_unlock_irqrestore(&sighand->siglock, flags);
1593 * This must be called with current->sighand->siglock held.
1595 * This should be the path for all ptrace stops.
1596 * We always set current->last_siginfo while stopped here.
1597 * That makes it a way to test a stopped process for
1598 * being ptrace-stopped vs being job-control-stopped.
1600 * If we actually decide not to stop at all because the tracer is gone,
1601 * we leave nostop_code in current->exit_code.
1603 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1606 * If there is a group stop in progress,
1607 * we must participate in the bookkeeping.
1609 if (current->signal->group_stop_count > 0)
1610 --current->signal->group_stop_count;
1612 current->last_siginfo = info;
1613 current->exit_code = exit_code;
1615 /* Let the debugger run. */
1616 set_current_state(TASK_TRACED);
1617 spin_unlock_irq(¤t->sighand->siglock);
1618 read_lock(&tasklist_lock);
1619 if (likely(current->ptrace & PT_PTRACED) &&
1620 likely(current->parent != current->real_parent ||
1621 !(current->ptrace & PT_ATTACHED)) &&
1622 (likely(current->parent->signal != current->signal) ||
1623 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1624 do_notify_parent_cldstop(current, current->parent,
1626 read_unlock(&tasklist_lock);
1630 * By the time we got the lock, our tracer went away.
1633 read_unlock(&tasklist_lock);
1634 set_current_state(TASK_RUNNING);
1635 current->exit_code = nostop_code;
1639 * We are back. Now reacquire the siglock before touching
1640 * last_siginfo, so that we are sure to have synchronized with
1641 * any signal-sending on another CPU that wants to examine it.
1643 spin_lock_irq(¤t->sighand->siglock);
1644 current->last_siginfo = NULL;
1647 * Queued signals ignored us while we were stopped for tracing.
1648 * So check for any that we should take before resuming user mode.
1650 recalc_sigpending();
1653 void ptrace_notify(int exit_code)
1657 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1659 memset(&info, 0, sizeof info);
1660 info.si_signo = SIGTRAP;
1661 info.si_code = exit_code;
1662 info.si_pid = current->pid;
1663 info.si_uid = current->uid;
1665 /* Let the debugger run. */
1666 spin_lock_irq(¤t->sighand->siglock);
1667 ptrace_stop(exit_code, 0, &info);
1668 spin_unlock_irq(¤t->sighand->siglock);
1672 finish_stop(int stop_count)
1675 * If there are no other threads in the group, or if there is
1676 * a group stop in progress and we are the last to stop,
1677 * report to the parent. When ptraced, every thread reports itself.
1679 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
1680 read_lock(&tasklist_lock);
1681 do_notify_parent_cldstop(current, current->parent,
1683 read_unlock(&tasklist_lock);
1685 else if (stop_count == 0) {
1686 read_lock(&tasklist_lock);
1687 do_notify_parent_cldstop(current->group_leader,
1688 current->group_leader->real_parent,
1690 read_unlock(&tasklist_lock);
1695 * Now we don't run again until continued.
1697 current->exit_code = 0;
1701 * This performs the stopping for SIGSTOP and other stop signals.
1702 * We have to stop all threads in the thread group.
1703 * Returns nonzero if we've actually stopped and released the siglock.
1704 * Returns zero if we didn't stop and still hold the siglock.
1707 do_signal_stop(int signr)
1709 struct signal_struct *sig = current->signal;
1710 struct sighand_struct *sighand = current->sighand;
1711 int stop_count = -1;
1713 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1716 if (sig->group_stop_count > 0) {
1718 * There is a group stop in progress. We don't need to
1719 * start another one.
1721 signr = sig->group_exit_code;
1722 stop_count = --sig->group_stop_count;
1723 current->exit_code = signr;
1724 set_current_state(TASK_STOPPED);
1725 if (stop_count == 0)
1726 sig->flags = SIGNAL_STOP_STOPPED;
1727 spin_unlock_irq(&sighand->siglock);
1729 else if (thread_group_empty(current)) {
1731 * Lock must be held through transition to stopped state.
1733 current->exit_code = current->signal->group_exit_code = signr;
1734 set_current_state(TASK_STOPPED);
1735 sig->flags = SIGNAL_STOP_STOPPED;
1736 spin_unlock_irq(&sighand->siglock);
1740 * There is no group stop already in progress.
1741 * We must initiate one now, but that requires
1742 * dropping siglock to get both the tasklist lock
1743 * and siglock again in the proper order. Note that
1744 * this allows an intervening SIGCONT to be posted.
1745 * We need to check for that and bail out if necessary.
1747 struct task_struct *t;
1749 spin_unlock_irq(&sighand->siglock);
1751 /* signals can be posted during this window */
1753 read_lock(&tasklist_lock);
1754 spin_lock_irq(&sighand->siglock);
1756 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1758 * Another stop or continue happened while we
1759 * didn't have the lock. We can just swallow this
1760 * signal now. If we raced with a SIGCONT, that
1761 * should have just cleared it now. If we raced
1762 * with another processor delivering a stop signal,
1763 * then the SIGCONT that wakes us up should clear it.
1765 read_unlock(&tasklist_lock);
1769 if (sig->group_stop_count == 0) {
1770 sig->group_exit_code = signr;
1772 for (t = next_thread(current); t != current;
1775 * Setting state to TASK_STOPPED for a group
1776 * stop is always done with the siglock held,
1777 * so this check has no races.
1779 if (t->state < TASK_STOPPED) {
1781 signal_wake_up(t, 0);
1783 sig->group_stop_count = stop_count;
1786 /* A race with another thread while unlocked. */
1787 signr = sig->group_exit_code;
1788 stop_count = --sig->group_stop_count;
1791 current->exit_code = signr;
1792 set_current_state(TASK_STOPPED);
1793 if (stop_count == 0)
1794 sig->flags = SIGNAL_STOP_STOPPED;
1796 spin_unlock_irq(&sighand->siglock);
1797 read_unlock(&tasklist_lock);
1800 finish_stop(stop_count);
1805 * Do appropriate magic when group_stop_count > 0.
1806 * We return nonzero if we stopped, after releasing the siglock.
1807 * We return zero if we still hold the siglock and should look
1808 * for another signal without checking group_stop_count again.
1810 static inline int handle_group_stop(void)
1814 if (current->signal->group_exit_task == current) {
1816 * Group stop is so we can do a core dump,
1817 * We are the initiating thread, so get on with it.
1819 current->signal->group_exit_task = NULL;
1823 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1825 * Group stop is so another thread can do a core dump,
1826 * or else we are racing against a death signal.
1827 * Just punt the stop so we can get the next signal.
1832 * There is a group stop in progress. We stop
1833 * without any associated signal being in our queue.
1835 stop_count = --current->signal->group_stop_count;
1836 if (stop_count == 0)
1837 current->signal->flags = SIGNAL_STOP_STOPPED;
1838 current->exit_code = current->signal->group_exit_code;
1839 set_current_state(TASK_STOPPED);
1840 spin_unlock_irq(¤t->sighand->siglock);
1841 finish_stop(stop_count);
1845 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1846 struct pt_regs *regs, void *cookie)
1848 sigset_t *mask = ¤t->blocked;
1852 spin_lock_irq(¤t->sighand->siglock);
1854 struct k_sigaction *ka;
1856 if (unlikely(current->signal->group_stop_count > 0) &&
1857 handle_group_stop())
1860 signr = dequeue_signal(current, mask, info);
1863 break; /* will return 0 */
1865 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1866 ptrace_signal_deliver(regs, cookie);
1868 /* Let the debugger run. */
1869 ptrace_stop(signr, signr, info);
1871 /* We're back. Did the debugger cancel the sig? */
1872 signr = current->exit_code;
1876 current->exit_code = 0;
1878 /* Update the siginfo structure if the signal has
1879 changed. If the debugger wanted something
1880 specific in the siginfo structure then it should
1881 have updated *info via PTRACE_SETSIGINFO. */
1882 if (signr != info->si_signo) {
1883 info->si_signo = signr;
1885 info->si_code = SI_USER;
1886 info->si_pid = current->parent->pid;
1887 info->si_uid = current->parent->uid;
1890 /* If the (new) signal is now blocked, requeue it. */
1891 if (sigismember(¤t->blocked, signr)) {
1892 specific_send_sig_info(signr, info, current);
1897 ka = ¤t->sighand->action[signr-1];
1898 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1900 if (ka->sa.sa_handler != SIG_DFL) {
1901 /* Run the handler. */
1904 if (ka->sa.sa_flags & SA_ONESHOT)
1905 ka->sa.sa_handler = SIG_DFL;
1907 break; /* will return non-zero "signr" value */
1911 * Now we are doing the default action for this signal.
1913 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1916 /* Init gets no signals it doesn't want. */
1917 if (current->pid == 1)
1920 /* virtual init is protected against user signals */
1921 if ((info->si_code == SI_USER) &&
1922 vx_current_initpid(current->pid))
1925 if (sig_kernel_stop(signr)) {
1927 * The default action is to stop all threads in
1928 * the thread group. The job control signals
1929 * do nothing in an orphaned pgrp, but SIGSTOP
1930 * always works. Note that siglock needs to be
1931 * dropped during the call to is_orphaned_pgrp()
1932 * because of lock ordering with tasklist_lock.
1933 * This allows an intervening SIGCONT to be posted.
1934 * We need to check for that and bail out if necessary.
1936 if (signr != SIGSTOP) {
1937 spin_unlock_irq(¤t->sighand->siglock);
1939 /* signals can be posted during this window */
1941 if (is_orphaned_pgrp(process_group(current)))
1944 spin_lock_irq(¤t->sighand->siglock);
1947 if (likely(do_signal_stop(signr))) {
1948 /* It released the siglock. */
1953 * We didn't actually stop, due to a race
1954 * with SIGCONT or something like that.
1959 spin_unlock_irq(¤t->sighand->siglock);
1962 * Anything else is fatal, maybe with a core dump.
1964 current->flags |= PF_SIGNALED;
1965 if (sig_kernel_coredump(signr)) {
1967 * If it was able to dump core, this kills all
1968 * other threads in the group and synchronizes with
1969 * their demise. If we lost the race with another
1970 * thread getting here, it set group_exit_code
1971 * first and our do_group_exit call below will use
1972 * that value and ignore the one we pass it.
1974 do_coredump((long)signr, signr, regs);
1978 * Death signals, no core dump.
1980 do_group_exit(signr);
1983 spin_unlock_irq(¤t->sighand->siglock);
1987 EXPORT_SYMBOL(recalc_sigpending);
1988 EXPORT_SYMBOL_GPL(dequeue_signal);
1989 EXPORT_SYMBOL(flush_signals);
1990 EXPORT_SYMBOL(force_sig);
1991 EXPORT_SYMBOL(kill_pg);
1992 EXPORT_SYMBOL(kill_proc);
1993 EXPORT_SYMBOL(ptrace_notify);
1994 EXPORT_SYMBOL(send_sig);
1995 EXPORT_SYMBOL(send_sig_info);
1996 EXPORT_SYMBOL(sigprocmask);
1997 EXPORT_SYMBOL(block_all_signals);
1998 EXPORT_SYMBOL(unblock_all_signals);
2002 * System call entry points.
2005 asmlinkage long sys_restart_syscall(void)
2007 struct restart_block *restart = ¤t_thread_info()->restart_block;
2008 return restart->fn(restart);
2011 long do_no_restart_syscall(struct restart_block *param)
2017 * We don't need to get the kernel lock - this is all local to this
2018 * particular thread.. (and that's good, because this is _heavily_
2019 * used by various programs)
2023 * This is also useful for kernel threads that want to temporarily
2024 * (or permanently) block certain signals.
2026 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2027 * interface happily blocks "unblockable" signals like SIGKILL
2030 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2035 spin_lock_irq(¤t->sighand->siglock);
2036 old_block = current->blocked;
2040 sigorsets(¤t->blocked, ¤t->blocked, set);
2043 signandsets(¤t->blocked, ¤t->blocked, set);
2046 current->blocked = *set;
2051 recalc_sigpending();
2052 spin_unlock_irq(¤t->sighand->siglock);
2054 *oldset = old_block;
2059 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2061 int error = -EINVAL;
2062 sigset_t old_set, new_set;
2064 /* XXX: Don't preclude handling different sized sigset_t's. */
2065 if (sigsetsize != sizeof(sigset_t))
2070 if (copy_from_user(&new_set, set, sizeof(*set)))
2072 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2074 error = sigprocmask(how, &new_set, &old_set);
2080 spin_lock_irq(¤t->sighand->siglock);
2081 old_set = current->blocked;
2082 spin_unlock_irq(¤t->sighand->siglock);
2086 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2094 long do_sigpending(void __user *set, unsigned long sigsetsize)
2096 long error = -EINVAL;
2099 if (sigsetsize > sizeof(sigset_t))
2102 spin_lock_irq(¤t->sighand->siglock);
2103 sigorsets(&pending, ¤t->pending.signal,
2104 ¤t->signal->shared_pending.signal);
2105 spin_unlock_irq(¤t->sighand->siglock);
2107 /* Outside the lock because only this thread touches it. */
2108 sigandsets(&pending, ¤t->blocked, &pending);
2111 if (!copy_to_user(set, &pending, sigsetsize))
2119 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2121 return do_sigpending(set, sigsetsize);
2124 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2126 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2130 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2132 if (from->si_code < 0)
2133 return __copy_to_user(to, from, sizeof(siginfo_t))
2136 * If you change siginfo_t structure, please be sure
2137 * this code is fixed accordingly.
2138 * It should never copy any pad contained in the structure
2139 * to avoid security leaks, but must copy the generic
2140 * 3 ints plus the relevant union member.
2142 err = __put_user(from->si_signo, &to->si_signo);
2143 err |= __put_user(from->si_errno, &to->si_errno);
2144 err |= __put_user((short)from->si_code, &to->si_code);
2145 switch (from->si_code & __SI_MASK) {
2147 err |= __put_user(from->si_pid, &to->si_pid);
2148 err |= __put_user(from->si_uid, &to->si_uid);
2151 err |= __put_user(from->si_tid, &to->si_tid);
2152 err |= __put_user(from->si_overrun, &to->si_overrun);
2153 err |= __put_user(from->si_ptr, &to->si_ptr);
2156 err |= __put_user(from->si_band, &to->si_band);
2157 err |= __put_user(from->si_fd, &to->si_fd);
2160 err |= __put_user(from->si_addr, &to->si_addr);
2161 #ifdef __ARCH_SI_TRAPNO
2162 err |= __put_user(from->si_trapno, &to->si_trapno);
2166 err |= __put_user(from->si_pid, &to->si_pid);
2167 err |= __put_user(from->si_uid, &to->si_uid);
2168 err |= __put_user(from->si_status, &to->si_status);
2169 err |= __put_user(from->si_utime, &to->si_utime);
2170 err |= __put_user(from->si_stime, &to->si_stime);
2172 case __SI_RT: /* This is not generated by the kernel as of now. */
2173 case __SI_MESGQ: /* But this is */
2174 err |= __put_user(from->si_pid, &to->si_pid);
2175 err |= __put_user(from->si_uid, &to->si_uid);
2176 err |= __put_user(from->si_ptr, &to->si_ptr);
2178 default: /* this is just in case for now ... */
2179 err |= __put_user(from->si_pid, &to->si_pid);
2180 err |= __put_user(from->si_uid, &to->si_uid);
2189 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2190 siginfo_t __user *uinfo,
2191 const struct timespec __user *uts,
2200 /* XXX: Don't preclude handling different sized sigset_t's. */
2201 if (sigsetsize != sizeof(sigset_t))
2204 if (copy_from_user(&these, uthese, sizeof(these)))
2208 * Invert the set of allowed signals to get those we
2211 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2215 if (copy_from_user(&ts, uts, sizeof(ts)))
2217 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2222 spin_lock_irq(¤t->sighand->siglock);
2223 sig = dequeue_signal(current, &these, &info);
2225 timeout = MAX_SCHEDULE_TIMEOUT;
2227 timeout = (timespec_to_jiffies(&ts)
2228 + (ts.tv_sec || ts.tv_nsec));
2231 /* None ready -- temporarily unblock those we're
2232 * interested while we are sleeping in so that we'll
2233 * be awakened when they arrive. */
2234 current->real_blocked = current->blocked;
2235 sigandsets(¤t->blocked, ¤t->blocked, &these);
2236 recalc_sigpending();
2237 spin_unlock_irq(¤t->sighand->siglock);
2239 current->state = TASK_INTERRUPTIBLE;
2240 timeout = schedule_timeout(timeout);
2242 if (current->flags & PF_FREEZE)
2243 refrigerator(PF_FREEZE);
2244 spin_lock_irq(¤t->sighand->siglock);
2245 sig = dequeue_signal(current, &these, &info);
2246 current->blocked = current->real_blocked;
2247 siginitset(¤t->real_blocked, 0);
2248 recalc_sigpending();
2251 spin_unlock_irq(¤t->sighand->siglock);
2256 if (copy_siginfo_to_user(uinfo, &info))
2269 sys_kill(int pid, int sig)
2271 struct siginfo info;
2273 info.si_signo = sig;
2275 info.si_code = SI_USER;
2276 info.si_pid = current->tgid;
2277 info.si_uid = current->uid;
2279 return kill_something_info(sig, &info, pid);
2283 * sys_tgkill - send signal to one specific thread
2284 * @tgid: the thread group ID of the thread
2285 * @pid: the PID of the thread
2286 * @sig: signal to be sent
2288 * This syscall also checks the tgid and returns -ESRCH even if the PID
2289 * exists but it's not belonging to the target process anymore. This
2290 * method solves the problem of threads exiting and PIDs getting reused.
2292 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2294 struct siginfo info;
2296 struct task_struct *p;
2298 /* This is only valid for single tasks */
2299 if (pid <= 0 || tgid <= 0)
2302 info.si_signo = sig;
2304 info.si_code = SI_TKILL;
2305 info.si_pid = current->tgid;
2306 info.si_uid = current->uid;
2308 read_lock(&tasklist_lock);
2309 p = find_task_by_pid(pid);
2311 if (p && (p->tgid == tgid)) {
2312 error = check_kill_permission(sig, &info, p);
2314 * The null signal is a permissions and process existence
2315 * probe. No signal is actually delivered.
2317 if (!error && sig && p->sighand) {
2318 spin_lock_irq(&p->sighand->siglock);
2319 handle_stop_signal(sig, p);
2320 error = specific_send_sig_info(sig, &info, p);
2321 spin_unlock_irq(&p->sighand->siglock);
2324 read_unlock(&tasklist_lock);
2329 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2332 sys_tkill(int pid, int sig)
2334 struct siginfo info;
2336 struct task_struct *p;
2338 /* This is only valid for single tasks */
2342 info.si_signo = sig;
2344 info.si_code = SI_TKILL;
2345 info.si_pid = current->tgid;
2346 info.si_uid = current->uid;
2348 read_lock(&tasklist_lock);
2349 p = find_task_by_pid(pid);
2352 error = check_kill_permission(sig, &info, p);
2354 * The null signal is a permissions and process existence
2355 * probe. No signal is actually delivered.
2357 if (!error && sig && p->sighand) {
2358 spin_lock_irq(&p->sighand->siglock);
2359 handle_stop_signal(sig, p);
2360 error = specific_send_sig_info(sig, &info, p);
2361 spin_unlock_irq(&p->sighand->siglock);
2364 read_unlock(&tasklist_lock);
2369 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2373 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2376 /* Not even root can pretend to send signals from the kernel.
2377 Nor can they impersonate a kill(), which adds source info. */
2378 if (info.si_code >= 0)
2380 info.si_signo = sig;
2382 /* POSIX.1b doesn't mention process groups. */
2383 return kill_proc_info(sig, &info, pid);
2387 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
2389 struct k_sigaction *k;
2391 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2394 k = ¤t->sighand->action[sig-1];
2396 spin_lock_irq(¤t->sighand->siglock);
2397 if (signal_pending(current)) {
2399 * If there might be a fatal signal pending on multiple
2400 * threads, make sure we take it before changing the action.
2402 spin_unlock_irq(¤t->sighand->siglock);
2403 return -ERESTARTNOINTR;
2412 * "Setting a signal action to SIG_IGN for a signal that is
2413 * pending shall cause the pending signal to be discarded,
2414 * whether or not it is blocked."
2416 * "Setting a signal action to SIG_DFL for a signal that is
2417 * pending and whose default action is to ignore the signal
2418 * (for example, SIGCHLD), shall cause the pending signal to
2419 * be discarded, whether or not it is blocked"
2421 if (act->sa.sa_handler == SIG_IGN ||
2422 (act->sa.sa_handler == SIG_DFL &&
2423 sig_kernel_ignore(sig))) {
2425 * This is a fairly rare case, so we only take the
2426 * tasklist_lock once we're sure we'll need it.
2427 * Now we must do this little unlock and relock
2428 * dance to maintain the lock hierarchy.
2430 struct task_struct *t = current;
2431 spin_unlock_irq(&t->sighand->siglock);
2432 read_lock(&tasklist_lock);
2433 spin_lock_irq(&t->sighand->siglock);
2435 sigdelsetmask(&k->sa.sa_mask,
2436 sigmask(SIGKILL) | sigmask(SIGSTOP));
2437 rm_from_queue(sigmask(sig), &t->signal->shared_pending);
2439 rm_from_queue(sigmask(sig), &t->pending);
2440 recalc_sigpending_tsk(t);
2442 } while (t != current);
2443 spin_unlock_irq(¤t->sighand->siglock);
2444 read_unlock(&tasklist_lock);
2449 sigdelsetmask(&k->sa.sa_mask,
2450 sigmask(SIGKILL) | sigmask(SIGSTOP));
2453 spin_unlock_irq(¤t->sighand->siglock);
2458 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2464 oss.ss_sp = (void __user *) current->sas_ss_sp;
2465 oss.ss_size = current->sas_ss_size;
2466 oss.ss_flags = sas_ss_flags(sp);
2475 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2476 || __get_user(ss_sp, &uss->ss_sp)
2477 || __get_user(ss_flags, &uss->ss_flags)
2478 || __get_user(ss_size, &uss->ss_size))
2482 if (on_sig_stack(sp))
2488 * Note - this code used to test ss_flags incorrectly
2489 * old code may have been written using ss_flags==0
2490 * to mean ss_flags==SS_ONSTACK (as this was the only
2491 * way that worked) - this fix preserves that older
2494 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2497 if (ss_flags == SS_DISABLE) {
2502 if (ss_size < MINSIGSTKSZ)
2506 current->sas_ss_sp = (unsigned long) ss_sp;
2507 current->sas_ss_size = ss_size;
2512 if (copy_to_user(uoss, &oss, sizeof(oss)))
2521 #ifdef __ARCH_WANT_SYS_SIGPENDING
2524 sys_sigpending(old_sigset_t __user *set)
2526 return do_sigpending(set, sizeof(*set));
2531 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2532 /* Some platforms have their own version with special arguments others
2533 support only sys_rt_sigprocmask. */
2536 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2539 old_sigset_t old_set, new_set;
2543 if (copy_from_user(&new_set, set, sizeof(*set)))
2545 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2547 spin_lock_irq(¤t->sighand->siglock);
2548 old_set = current->blocked.sig[0];
2556 sigaddsetmask(¤t->blocked, new_set);
2559 sigdelsetmask(¤t->blocked, new_set);
2562 current->blocked.sig[0] = new_set;
2566 recalc_sigpending();
2567 spin_unlock_irq(¤t->sighand->siglock);
2573 old_set = current->blocked.sig[0];
2576 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2583 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2585 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2587 sys_rt_sigaction(int sig,
2588 const struct sigaction __user *act,
2589 struct sigaction __user *oact,
2592 struct k_sigaction new_sa, old_sa;
2595 /* XXX: Don't preclude handling different sized sigset_t's. */
2596 if (sigsetsize != sizeof(sigset_t))
2600 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2604 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2607 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2613 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2615 #ifdef __ARCH_WANT_SYS_SGETMASK
2618 * For backwards compatibility. Functionality superseded by sigprocmask.
2624 return current->blocked.sig[0];
2628 sys_ssetmask(int newmask)
2632 spin_lock_irq(¤t->sighand->siglock);
2633 old = current->blocked.sig[0];
2635 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2637 recalc_sigpending();
2638 spin_unlock_irq(¤t->sighand->siglock);
2642 #endif /* __ARCH_WANT_SGETMASK */
2644 #ifdef __ARCH_WANT_SYS_SIGNAL
2646 * For backwards compatibility. Functionality superseded by sigaction.
2648 asmlinkage unsigned long
2649 sys_signal(int sig, __sighandler_t handler)
2651 struct k_sigaction new_sa, old_sa;
2654 new_sa.sa.sa_handler = handler;
2655 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2657 ret = do_sigaction(sig, &new_sa, &old_sa);
2659 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2661 #endif /* __ARCH_WANT_SYS_SIGNAL */
2663 #ifdef __ARCH_WANT_SYS_PAUSE
2668 current->state = TASK_INTERRUPTIBLE;
2670 return -ERESTARTNOHAND;
2675 void __init signals_init(void)
2678 kmem_cache_create("sigqueue",
2679 sizeof(struct sigqueue),
2680 __alignof__(struct sigqueue),
2681 SLAB_PANIC, NULL, NULL);