linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/config.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/smp_lock.h>
17 #include <linux/init.h>
18 #include <linux/sched.h>
19 #include <linux/fs.h>
20 #include <linux/tty.h>
21 #include <linux/binfmts.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/posix-timers.h>
26 #include <linux/signal.h>
27 #include <linux/audit.h>
28 #include <linux/capability.h>
29 #include <linux/vs_cvirt.h>
30 #include <asm/param.h>
31 #include <asm/uaccess.h>
32 #include <asm/unistd.h>
33 #include <asm/siginfo.h>
34
35 /*
36  * SLAB caches for signal bits.
37  */
38
39 static kmem_cache_t *sigqueue_cachep;
40
41 /*
42  * In POSIX a signal is sent either to a specific thread (Linux task)
43  * or to the process as a whole (Linux thread group).  How the signal
44  * is sent determines whether it's to one thread or the whole group,
45  * which determines which signal mask(s) are involved in blocking it
46  * from being delivered until later.  When the signal is delivered,
47  * either it's caught or ignored by a user handler or it has a default
48  * effect that applies to the whole thread group (POSIX process).
49  *
50  * The possible effects an unblocked signal set to SIG_DFL can have are:
51  *   ignore     - Nothing Happens
52  *   terminate  - kill the process, i.e. all threads in the group,
53  *                similar to exit_group.  The group leader (only) reports
54  *                WIFSIGNALED status to its parent.
55  *   coredump   - write a core dump file describing all threads using
56  *                the same mm and then kill all those threads
57  *   stop       - stop all the threads in the group, i.e. TASK_STOPPED state
58  *
59  * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
60  * Other signals when not blocked and set to SIG_DFL behaves as follows.
61  * The job control signals also have other special effects.
62  *
63  *      +--------------------+------------------+
64  *      |  POSIX signal      |  default action  |
65  *      +--------------------+------------------+
66  *      |  SIGHUP            |  terminate       |
67  *      |  SIGINT            |  terminate       |
68  *      |  SIGQUIT           |  coredump        |
69  *      |  SIGILL            |  coredump        |
70  *      |  SIGTRAP           |  coredump        |
71  *      |  SIGABRT/SIGIOT    |  coredump        |
72  *      |  SIGBUS            |  coredump        |
73  *      |  SIGFPE            |  coredump        |
74  *      |  SIGKILL           |  terminate(+)    |
75  *      |  SIGUSR1           |  terminate       |
76  *      |  SIGSEGV           |  coredump        |
77  *      |  SIGUSR2           |  terminate       |
78  *      |  SIGPIPE           |  terminate       |
79  *      |  SIGALRM           |  terminate       |
80  *      |  SIGTERM           |  terminate       |
81  *      |  SIGCHLD           |  ignore          |
82  *      |  SIGCONT           |  ignore(*)       |
83  *      |  SIGSTOP           |  stop(*)(+)      |
84  *      |  SIGTSTP           |  stop(*)         |
85  *      |  SIGTTIN           |  stop(*)         |
86  *      |  SIGTTOU           |  stop(*)         |
87  *      |  SIGURG            |  ignore          |
88  *      |  SIGXCPU           |  coredump        |
89  *      |  SIGXFSZ           |  coredump        |
90  *      |  SIGVTALRM         |  terminate       |
91  *      |  SIGPROF           |  terminate       |
92  *      |  SIGPOLL/SIGIO     |  terminate       |
93  *      |  SIGSYS/SIGUNUSED  |  coredump        |
94  *      |  SIGSTKFLT         |  terminate       |
95  *      |  SIGWINCH          |  ignore          |
96  *      |  SIGPWR            |  terminate       |
97  *      |  SIGRTMIN-SIGRTMAX |  terminate       |
98  *      +--------------------+------------------+
99  *      |  non-POSIX signal  |  default action  |
100  *      +--------------------+------------------+
101  *      |  SIGEMT            |  coredump        |
102  *      +--------------------+------------------+
103  *
104  * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
105  * (*) Special job control effects:
106  * When SIGCONT is sent, it resumes the process (all threads in the group)
107  * from TASK_STOPPED state and also clears any pending/queued stop signals
108  * (any of those marked with "stop(*)").  This happens regardless of blocking,
109  * catching, or ignoring SIGCONT.  When any stop signal is sent, it clears
110  * any pending/queued SIGCONT signals; this happens regardless of blocking,
111  * catching, or ignored the stop signal, though (except for SIGSTOP) the
112  * default action of stopping the process may happen later or never.
113  */
114
115 #ifdef SIGEMT
116 #define M_SIGEMT        M(SIGEMT)
117 #else
118 #define M_SIGEMT        0
119 #endif
120
121 #if SIGRTMIN > BITS_PER_LONG
122 #define M(sig) (1ULL << ((sig)-1))
123 #else
124 #define M(sig) (1UL << ((sig)-1))
125 #endif
126 #define T(sig, mask) (M(sig) & (mask))
127
128 #define SIG_KERNEL_ONLY_MASK (\
129         M(SIGKILL)   |  M(SIGSTOP)                                   )
130
131 #define SIG_KERNEL_STOP_MASK (\
132         M(SIGSTOP)   |  M(SIGTSTP)   |  M(SIGTTIN)   |  M(SIGTTOU)   )
133
134 #define SIG_KERNEL_COREDUMP_MASK (\
135         M(SIGQUIT)   |  M(SIGILL)    |  M(SIGTRAP)   |  M(SIGABRT)   | \
136         M(SIGFPE)    |  M(SIGSEGV)   |  M(SIGBUS)    |  M(SIGSYS)    | \
137         M(SIGXCPU)   |  M(SIGXFSZ)   |  M_SIGEMT                     )
138
139 #define SIG_KERNEL_IGNORE_MASK (\
140         M(SIGCONT)   |  M(SIGCHLD)   |  M(SIGWINCH)  |  M(SIGURG)    )
141
142 #define sig_kernel_only(sig) \
143                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_ONLY_MASK))
144 #define sig_kernel_coredump(sig) \
145                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_COREDUMP_MASK))
146 #define sig_kernel_ignore(sig) \
147                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_IGNORE_MASK))
148 #define sig_kernel_stop(sig) \
149                 (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
150
151 #define sig_user_defined(t, signr) \
152         (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
153          ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
154
155 #define sig_fatal(t, signr) \
156         (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
157          (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
158
159 static int sig_ignored(struct task_struct *t, int sig)
160 {
161         void __user * handler;
162
163         /*
164          * Tracers always want to know about signals..
165          */
166         if (t->ptrace & PT_PTRACED)
167                 return 0;
168
169         /*
170          * Blocked signals are never ignored, since the
171          * signal handler may change by the time it is
172          * unblocked.
173          */
174         if (sigismember(&t->blocked, sig))
175                 return 0;
176
177         /* Is it explicitly or implicitly ignored? */
178         handler = t->sighand->action[sig-1].sa.sa_handler;
179         return   handler == SIG_IGN ||
180                 (handler == SIG_DFL && sig_kernel_ignore(sig));
181 }
182
183 /*
184  * Re-calculate pending state from the set of locally pending
185  * signals, globally pending signals, and blocked signals.
186  */
187 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
188 {
189         unsigned long ready;
190         long i;
191
192         switch (_NSIG_WORDS) {
193         default:
194                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
195                         ready |= signal->sig[i] &~ blocked->sig[i];
196                 break;
197
198         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
199                 ready |= signal->sig[2] &~ blocked->sig[2];
200                 ready |= signal->sig[1] &~ blocked->sig[1];
201                 ready |= signal->sig[0] &~ blocked->sig[0];
202                 break;
203
204         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
205                 ready |= signal->sig[0] &~ blocked->sig[0];
206                 break;
207
208         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
209         }
210         return ready != 0;
211 }
212
213 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
214
215 fastcall void recalc_sigpending_tsk(struct task_struct *t)
216 {
217         if (t->signal->group_stop_count > 0 ||
218             (freezing(t)) ||
219             PENDING(&t->pending, &t->blocked) ||
220             PENDING(&t->signal->shared_pending, &t->blocked))
221                 set_tsk_thread_flag(t, TIF_SIGPENDING);
222         else
223                 clear_tsk_thread_flag(t, TIF_SIGPENDING);
224 }
225
226 void recalc_sigpending(void)
227 {
228         recalc_sigpending_tsk(current);
229 }
230
231 /* Given the mask, find the first available signal that should be serviced. */
232
233 static int
234 next_signal(struct sigpending *pending, sigset_t *mask)
235 {
236         unsigned long i, *s, *m, x;
237         int sig = 0;
238         
239         s = pending->signal.sig;
240         m = mask->sig;
241         switch (_NSIG_WORDS) {
242         default:
243                 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
244                         if ((x = *s &~ *m) != 0) {
245                                 sig = ffz(~x) + i*_NSIG_BPW + 1;
246                                 break;
247                         }
248                 break;
249
250         case 2: if ((x = s[0] &~ m[0]) != 0)
251                         sig = 1;
252                 else if ((x = s[1] &~ m[1]) != 0)
253                         sig = _NSIG_BPW + 1;
254                 else
255                         break;
256                 sig += ffz(~x);
257                 break;
258
259         case 1: if ((x = *s &~ *m) != 0)
260                         sig = ffz(~x) + 1;
261                 break;
262         }
263         
264         return sig;
265 }
266
267 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
268                                          int override_rlimit)
269 {
270         struct sigqueue *q = NULL;
271
272         atomic_inc(&t->user->sigpending);
273         if (override_rlimit ||
274             atomic_read(&t->user->sigpending) <=
275                         t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
276                 q = kmem_cache_alloc(sigqueue_cachep, flags);
277         if (unlikely(q == NULL)) {
278                 atomic_dec(&t->user->sigpending);
279         } else {
280                 INIT_LIST_HEAD(&q->list);
281                 q->flags = 0;
282                 q->user = get_uid(t->user);
283         }
284         return(q);
285 }
286
287 static void __sigqueue_free(struct sigqueue *q)
288 {
289         if (q->flags & SIGQUEUE_PREALLOC)
290                 return;
291         atomic_dec(&q->user->sigpending);
292         free_uid(q->user);
293         kmem_cache_free(sigqueue_cachep, q);
294 }
295
296 static void flush_sigqueue(struct sigpending *queue)
297 {
298         struct sigqueue *q;
299
300         sigemptyset(&queue->signal);
301         while (!list_empty(&queue->list)) {
302                 q = list_entry(queue->list.next, struct sigqueue , list);
303                 list_del_init(&q->list);
304                 __sigqueue_free(q);
305         }
306 }
307
308 /*
309  * Flush all pending signals for a task.
310  */
311
312 void
313 flush_signals(struct task_struct *t)
314 {
315         unsigned long flags;
316
317         spin_lock_irqsave(&t->sighand->siglock, flags);
318         clear_tsk_thread_flag(t,TIF_SIGPENDING);
319         flush_sigqueue(&t->pending);
320         flush_sigqueue(&t->signal->shared_pending);
321         spin_unlock_irqrestore(&t->sighand->siglock, flags);
322 }
323
324 /*
325  * This function expects the tasklist_lock write-locked.
326  */
327 void __exit_sighand(struct task_struct *tsk)
328 {
329         struct sighand_struct * sighand = tsk->sighand;
330
331         /* Ok, we're done with the signal handlers */
332         tsk->sighand = NULL;
333         if (atomic_dec_and_test(&sighand->count))
334                 sighand_free(sighand);
335 }
336
337 void exit_sighand(struct task_struct *tsk)
338 {
339         write_lock_irq(&tasklist_lock);
340         rcu_read_lock();
341         if (tsk->sighand != NULL) {
342                 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
343                 spin_lock(&sighand->siglock);
344                 __exit_sighand(tsk);
345                 spin_unlock(&sighand->siglock);
346         }
347         rcu_read_unlock();
348         write_unlock_irq(&tasklist_lock);
349 }
350
351 /*
352  * This function expects the tasklist_lock write-locked.
353  */
354 void __exit_signal(struct task_struct *tsk)
355 {
356         struct signal_struct * sig = tsk->signal;
357         struct sighand_struct * sighand;
358
359         if (!sig)
360                 BUG();
361         if (!atomic_read(&sig->count))
362                 BUG();
363         rcu_read_lock();
364         sighand = rcu_dereference(tsk->sighand);
365         spin_lock(&sighand->siglock);
366         posix_cpu_timers_exit(tsk);
367         if (atomic_dec_and_test(&sig->count)) {
368                 posix_cpu_timers_exit_group(tsk);
369                 tsk->signal = NULL;
370                 __exit_sighand(tsk);
371                 spin_unlock(&sighand->siglock);
372                 flush_sigqueue(&sig->shared_pending);
373         } else {
374                 /*
375                  * If there is any task waiting for the group exit
376                  * then notify it:
377                  */
378                 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
379                         wake_up_process(sig->group_exit_task);
380                         sig->group_exit_task = NULL;
381                 }
382                 if (tsk == sig->curr_target)
383                         sig->curr_target = next_thread(tsk);
384                 tsk->signal = NULL;
385                 /*
386                  * Accumulate here the counters for all threads but the
387                  * group leader as they die, so they can be added into
388                  * the process-wide totals when those are taken.
389                  * The group leader stays around as a zombie as long
390                  * as there are other threads.  When it gets reaped,
391                  * the exit.c code will add its counts into these totals.
392                  * We won't ever get here for the group leader, since it
393                  * will have been the last reference on the signal_struct.
394                  */
395                 sig->utime = cputime_add(sig->utime, tsk->utime);
396                 sig->stime = cputime_add(sig->stime, tsk->stime);
397                 sig->min_flt += tsk->min_flt;
398                 sig->maj_flt += tsk->maj_flt;
399                 sig->nvcsw += tsk->nvcsw;
400                 sig->nivcsw += tsk->nivcsw;
401                 sig->sched_time += tsk->sched_time;
402                 __exit_sighand(tsk);
403                 spin_unlock(&sighand->siglock);
404                 sig = NULL;     /* Marker for below.  */
405         }
406         rcu_read_unlock();
407         clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
408         flush_sigqueue(&tsk->pending);
409         if (sig) {
410                 /*
411                  * We are cleaning up the signal_struct here.
412                  */
413                 exit_thread_group_keys(sig);
414                 kmem_cache_free(signal_cachep, sig);
415         }
416 }
417
418 void exit_signal(struct task_struct *tsk)
419 {
420         atomic_dec(&tsk->signal->live);
421
422         write_lock_irq(&tasklist_lock);
423         __exit_signal(tsk);
424         write_unlock_irq(&tasklist_lock);
425 }
426
427 /*
428  * Flush all handlers for a task.
429  */
430
431 void
432 flush_signal_handlers(struct task_struct *t, int force_default)
433 {
434         int i;
435         struct k_sigaction *ka = &t->sighand->action[0];
436         for (i = _NSIG ; i != 0 ; i--) {
437                 if (force_default || ka->sa.sa_handler != SIG_IGN)
438                         ka->sa.sa_handler = SIG_DFL;
439                 ka->sa.sa_flags = 0;
440                 sigemptyset(&ka->sa.sa_mask);
441                 ka++;
442         }
443 }
444
445
446 /* Notify the system that a driver wants to block all signals for this
447  * process, and wants to be notified if any signals at all were to be
448  * sent/acted upon.  If the notifier routine returns non-zero, then the
449  * signal will be acted upon after all.  If the notifier routine returns 0,
450  * then then signal will be blocked.  Only one block per process is
451  * allowed.  priv is a pointer to private data that the notifier routine
452  * can use to determine if the signal should be blocked or not.  */
453
454 void
455 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
456 {
457         unsigned long flags;
458
459         spin_lock_irqsave(&current->sighand->siglock, flags);
460         current->notifier_mask = mask;
461         current->notifier_data = priv;
462         current->notifier = notifier;
463         spin_unlock_irqrestore(&current->sighand->siglock, flags);
464 }
465
466 /* Notify the system that blocking has ended. */
467
468 void
469 unblock_all_signals(void)
470 {
471         unsigned long flags;
472
473         spin_lock_irqsave(&current->sighand->siglock, flags);
474         current->notifier = NULL;
475         current->notifier_data = NULL;
476         recalc_sigpending();
477         spin_unlock_irqrestore(&current->sighand->siglock, flags);
478 }
479
480 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
481 {
482         struct sigqueue *q, *first = NULL;
483         int still_pending = 0;
484
485         if (unlikely(!sigismember(&list->signal, sig)))
486                 return 0;
487
488         /*
489          * Collect the siginfo appropriate to this signal.  Check if
490          * there is another siginfo for the same signal.
491         */
492         list_for_each_entry(q, &list->list, list) {
493                 if (q->info.si_signo == sig) {
494                         if (first) {
495                                 still_pending = 1;
496                                 break;
497                         }
498                         first = q;
499                 }
500         }
501         if (first) {
502                 list_del_init(&first->list);
503                 copy_siginfo(info, &first->info);
504                 __sigqueue_free(first);
505                 if (!still_pending)
506                         sigdelset(&list->signal, sig);
507         } else {
508
509                 /* Ok, it wasn't in the queue.  This must be
510                    a fast-pathed signal or we must have been
511                    out of queue space.  So zero out the info.
512                  */
513                 sigdelset(&list->signal, sig);
514                 info->si_signo = sig;
515                 info->si_errno = 0;
516                 info->si_code = 0;
517                 info->si_pid = 0;
518                 info->si_uid = 0;
519         }
520         return 1;
521 }
522
523 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
524                         siginfo_t *info)
525 {
526         int sig = 0;
527
528         sig = next_signal(pending, mask);
529         if (sig) {
530                 if (current->notifier) {
531                         if (sigismember(current->notifier_mask, sig)) {
532                                 if (!(current->notifier)(current->notifier_data)) {
533                                         clear_thread_flag(TIF_SIGPENDING);
534                                         return 0;
535                                 }
536                         }
537                 }
538
539                 if (!collect_signal(sig, pending, info))
540                         sig = 0;
541                                 
542         }
543         recalc_sigpending();
544
545         return sig;
546 }
547
548 /*
549  * Dequeue a signal and return the element to the caller, which is 
550  * expected to free it.
551  *
552  * All callers have to hold the siglock.
553  */
554 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
555 {
556         int signr = __dequeue_signal(&tsk->pending, mask, info);
557         if (!signr)
558                 signr = __dequeue_signal(&tsk->signal->shared_pending,
559                                          mask, info);
560         if (signr && unlikely(sig_kernel_stop(signr))) {
561                 /*
562                  * Set a marker that we have dequeued a stop signal.  Our
563                  * caller might release the siglock and then the pending
564                  * stop signal it is about to process is no longer in the
565                  * pending bitmasks, but must still be cleared by a SIGCONT
566                  * (and overruled by a SIGKILL).  So those cases clear this
567                  * shared flag after we've set it.  Note that this flag may
568                  * remain set after the signal we return is ignored or
569                  * handled.  That doesn't matter because its only purpose
570                  * is to alert stop-signal processing code when another
571                  * processor has come along and cleared the flag.
572                  */
573                 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
574                         tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
575         }
576         if ( signr &&
577              ((info->si_code & __SI_MASK) == __SI_TIMER) &&
578              info->si_sys_private){
579                 /*
580                  * Release the siglock to ensure proper locking order
581                  * of timer locks outside of siglocks.  Note, we leave
582                  * irqs disabled here, since the posix-timers code is
583                  * about to disable them again anyway.
584                  */
585                 spin_unlock(&tsk->sighand->siglock);
586                 do_schedule_next_timer(info);
587                 spin_lock(&tsk->sighand->siglock);
588         }
589         return signr;
590 }
591
592 /*
593  * Tell a process that it has a new active signal..
594  *
595  * NOTE! we rely on the previous spin_lock to
596  * lock interrupts for us! We can only be called with
597  * "siglock" held, and the local interrupt must
598  * have been disabled when that got acquired!
599  *
600  * No need to set need_resched since signal event passing
601  * goes through ->blocked
602  */
603 void signal_wake_up(struct task_struct *t, int resume)
604 {
605         unsigned int mask;
606
607         set_tsk_thread_flag(t, TIF_SIGPENDING);
608
609         /*
610          * For SIGKILL, we want to wake it up in the stopped/traced case.
611          * We don't check t->state here because there is a race with it
612          * executing another processor and just now entering stopped state.
613          * By using wake_up_state, we ensure the process will wake up and
614          * handle its death signal.
615          */
616         mask = TASK_INTERRUPTIBLE;
617         if (resume)
618                 mask |= TASK_STOPPED | TASK_TRACED;
619         if (!wake_up_state(t, mask))
620                 kick_process(t);
621 }
622
623 /*
624  * Remove signals in mask from the pending set and queue.
625  * Returns 1 if any signals were found.
626  *
627  * All callers must be holding the siglock.
628  *
629  * This version takes a sigset mask and looks at all signals,
630  * not just those in the first mask word.
631  */
632 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
633 {
634         struct sigqueue *q, *n;
635         sigset_t m;
636
637         sigandsets(&m, mask, &s->signal);
638         if (sigisemptyset(&m))
639                 return 0;
640
641         signandsets(&s->signal, &s->signal, mask);
642         list_for_each_entry_safe(q, n, &s->list, list) {
643                 if (sigismember(mask, q->info.si_signo)) {
644                         list_del_init(&q->list);
645                         __sigqueue_free(q);
646                 }
647         }
648         return 1;
649 }
650 /*
651  * Remove signals in mask from the pending set and queue.
652  * Returns 1 if any signals were found.
653  *
654  * All callers must be holding the siglock.
655  */
656 static int rm_from_queue(unsigned long mask, struct sigpending *s)
657 {
658         struct sigqueue *q, *n;
659
660         if (!sigtestsetmask(&s->signal, mask))
661                 return 0;
662
663         sigdelsetmask(&s->signal, mask);
664         list_for_each_entry_safe(q, n, &s->list, list) {
665                 if (q->info.si_signo < SIGRTMIN &&
666                     (mask & sigmask(q->info.si_signo))) {
667                         list_del_init(&q->list);
668                         __sigqueue_free(q);
669                 }
670         }
671         return 1;
672 }
673
674 /*
675  * Bad permissions for sending the signal
676  */
677 static int check_kill_permission(int sig, struct siginfo *info,
678                                  struct task_struct *t)
679 {
680         int user;
681         int error = -EINVAL;
682
683         if (!valid_signal(sig))
684                 return error;
685
686         user = ((info == SEND_SIG_NOINFO) ||
687                 (!is_si_special(info) && SI_FROMUSER(info)));
688
689         error = -EPERM;
690         if (user && ((sig != SIGCONT) ||
691                 (current->signal->session != t->signal->session))
692             && (current->euid ^ t->suid) && (current->euid ^ t->uid)
693             && (current->uid ^ t->suid) && (current->uid ^ t->uid)
694             && !capable(CAP_KILL))
695                 return error;
696
697         error = -ESRCH;
698         if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
699                 return error;
700
701         error = security_task_kill(t, info, sig);
702         if (!error)
703                 audit_signal_info(sig, t); /* Let audit system see the signal */
704         return error;
705 }
706
707 /* forward decl */
708 static void do_notify_parent_cldstop(struct task_struct *tsk,
709                                      int to_self,
710                                      int why);
711
712 /*
713  * Handle magic process-wide effects of stop/continue signals.
714  * Unlike the signal actions, these happen immediately at signal-generation
715  * time regardless of blocking, ignoring, or handling.  This does the
716  * actual continuing for SIGCONT, but not the actual stopping for stop
717  * signals.  The process stop is done as a signal action for SIG_DFL.
718  */
719 static void handle_stop_signal(int sig, struct task_struct *p)
720 {
721         struct task_struct *t;
722
723         if (p->signal->flags & SIGNAL_GROUP_EXIT)
724                 /*
725                  * The process is in the middle of dying already.
726                  */
727                 return;
728
729         if (sig_kernel_stop(sig)) {
730                 /*
731                  * This is a stop signal.  Remove SIGCONT from all queues.
732                  */
733                 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
734                 t = p;
735                 do {
736                         rm_from_queue(sigmask(SIGCONT), &t->pending);
737                         t = next_thread(t);
738                 } while (t != p);
739         } else if (sig == SIGCONT) {
740                 /*
741                  * Remove all stop signals from all queues,
742                  * and wake all threads.
743                  */
744                 if (unlikely(p->signal->group_stop_count > 0)) {
745                         /*
746                          * There was a group stop in progress.  We'll
747                          * pretend it finished before we got here.  We are
748                          * obliged to report it to the parent: if the
749                          * SIGSTOP happened "after" this SIGCONT, then it
750                          * would have cleared this pending SIGCONT.  If it
751                          * happened "before" this SIGCONT, then the parent
752                          * got the SIGCHLD about the stop finishing before
753                          * the continue happened.  We do the notification
754                          * now, and it's as if the stop had finished and
755                          * the SIGCHLD was pending on entry to this kill.
756                          */
757                         p->signal->group_stop_count = 0;
758                         p->signal->flags = SIGNAL_STOP_CONTINUED;
759                         spin_unlock(&p->sighand->siglock);
760                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
761                         spin_lock(&p->sighand->siglock);
762                 }
763                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
764                 t = p;
765                 do {
766                         unsigned int state;
767                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
768                         
769                         /*
770                          * If there is a handler for SIGCONT, we must make
771                          * sure that no thread returns to user mode before
772                          * we post the signal, in case it was the only
773                          * thread eligible to run the signal handler--then
774                          * it must not do anything between resuming and
775                          * running the handler.  With the TIF_SIGPENDING
776                          * flag set, the thread will pause and acquire the
777                          * siglock that we hold now and until we've queued
778                          * the pending signal. 
779                          *
780                          * Wake up the stopped thread _after_ setting
781                          * TIF_SIGPENDING
782                          */
783                         state = TASK_STOPPED;
784                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
785                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
786                                 state |= TASK_INTERRUPTIBLE;
787                         }
788                         wake_up_state(t, state);
789
790                         t = next_thread(t);
791                 } while (t != p);
792
793                 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
794                         /*
795                          * We were in fact stopped, and are now continued.
796                          * Notify the parent with CLD_CONTINUED.
797                          */
798                         p->signal->flags = SIGNAL_STOP_CONTINUED;
799                         p->signal->group_exit_code = 0;
800                         spin_unlock(&p->sighand->siglock);
801                         do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
802                         spin_lock(&p->sighand->siglock);
803                 } else {
804                         /*
805                          * We are not stopped, but there could be a stop
806                          * signal in the middle of being processed after
807                          * being removed from the queue.  Clear that too.
808                          */
809                         p->signal->flags = 0;
810                 }
811         } else if (sig == SIGKILL) {
812                 /*
813                  * Make sure that any pending stop signal already dequeued
814                  * is undone by the wakeup for SIGKILL.
815                  */
816                 p->signal->flags = 0;
817         }
818 }
819
820 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
821                         struct sigpending *signals)
822 {
823         struct sigqueue * q = NULL;
824         int ret = 0;
825
826         /*
827          * fast-pathed signals for kernel-internal things like SIGSTOP
828          * or SIGKILL.
829          */
830         if (info == SEND_SIG_FORCED)
831                 goto out_set;
832
833         /* Real-time signals must be queued if sent by sigqueue, or
834            some other real-time mechanism.  It is implementation
835            defined whether kill() does so.  We attempt to do so, on
836            the principle of least surprise, but since kill is not
837            allowed to fail with EAGAIN when low on memory we just
838            make sure at least one signal gets delivered and don't
839            pass on the info struct.  */
840
841         q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
842                                              (is_si_special(info) ||
843                                               info->si_code >= 0)));
844         if (q) {
845                 list_add_tail(&q->list, &signals->list);
846                 switch ((unsigned long) info) {
847                 case (unsigned long) SEND_SIG_NOINFO:
848                         q->info.si_signo = sig;
849                         q->info.si_errno = 0;
850                         q->info.si_code = SI_USER;
851                         q->info.si_pid = current->pid;
852                         q->info.si_uid = current->uid;
853                         break;
854                 case (unsigned long) SEND_SIG_PRIV:
855                         q->info.si_signo = sig;
856                         q->info.si_errno = 0;
857                         q->info.si_code = SI_KERNEL;
858                         q->info.si_pid = 0;
859                         q->info.si_uid = 0;
860                         break;
861                 default:
862                         copy_siginfo(&q->info, info);
863                         break;
864                 }
865         } else if (!is_si_special(info)) {
866                 if (sig >= SIGRTMIN && info->si_code != SI_USER)
867                 /*
868                  * Queue overflow, abort.  We may abort if the signal was rt
869                  * and sent by user using something other than kill().
870                  */
871                         return -EAGAIN;
872         }
873
874 out_set:
875         sigaddset(&signals->signal, sig);
876         return ret;
877 }
878
879 #define LEGACY_QUEUE(sigptr, sig) \
880         (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
881
882
883 static int
884 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
885 {
886         int ret = 0;
887
888         if (!irqs_disabled())
889                 BUG();
890         assert_spin_locked(&t->sighand->siglock);
891
892         /* Short-circuit ignored signals.  */
893         if (sig_ignored(t, sig))
894                 goto out;
895
896         /* Support queueing exactly one non-rt signal, so that we
897            can get more detailed information about the cause of
898            the signal. */
899         if (LEGACY_QUEUE(&t->pending, sig))
900                 goto out;
901
902         ret = send_signal(sig, info, t, &t->pending);
903         if (!ret && !sigismember(&t->blocked, sig))
904                 signal_wake_up(t, sig == SIGKILL);
905 out:
906         return ret;
907 }
908
909 /*
910  * Force a signal that the process can't ignore: if necessary
911  * we unblock the signal and change any SIG_IGN to SIG_DFL.
912  */
913
914 int
915 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
916 {
917         unsigned long int flags;
918         int ret;
919
920         spin_lock_irqsave(&t->sighand->siglock, flags);
921         if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
922                 t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
923         }
924         if (sigismember(&t->blocked, sig)) {
925                 sigdelset(&t->blocked, sig);
926         }
927         recalc_sigpending_tsk(t);
928         ret = specific_send_sig_info(sig, info, t);
929         spin_unlock_irqrestore(&t->sighand->siglock, flags);
930
931         return ret;
932 }
933
934 void
935 force_sig_specific(int sig, struct task_struct *t)
936 {
937         force_sig_info(sig, SEND_SIG_FORCED, t);
938 }
939
940 /*
941  * Test if P wants to take SIG.  After we've checked all threads with this,
942  * it's equivalent to finding no threads not blocking SIG.  Any threads not
943  * blocking SIG were ruled out because they are not running and already
944  * have pending signals.  Such threads will dequeue from the shared queue
945  * as soon as they're available, so putting the signal on the shared queue
946  * will be equivalent to sending it to one such thread.
947  */
948 static inline int wants_signal(int sig, struct task_struct *p)
949 {
950         if (sigismember(&p->blocked, sig))
951                 return 0;
952         if (p->flags & PF_EXITING)
953                 return 0;
954         if (sig == SIGKILL)
955                 return 1;
956         if (p->state & (TASK_STOPPED | TASK_TRACED))
957                 return 0;
958         return task_curr(p) || !signal_pending(p);
959 }
960
961 static void
962 __group_complete_signal(int sig, struct task_struct *p)
963 {
964         struct task_struct *t;
965
966         /*
967          * Now find a thread we can wake up to take the signal off the queue.
968          *
969          * If the main thread wants the signal, it gets first crack.
970          * Probably the least surprising to the average bear.
971          */
972         if (wants_signal(sig, p))
973                 t = p;
974         else if (thread_group_empty(p))
975                 /*
976                  * There is just one thread and it does not need to be woken.
977                  * It will dequeue unblocked signals before it runs again.
978                  */
979                 return;
980         else {
981                 /*
982                  * Otherwise try to find a suitable thread.
983                  */
984                 t = p->signal->curr_target;
985                 if (t == NULL)
986                         /* restart balancing at this thread */
987                         t = p->signal->curr_target = p;
988
989                 while (!wants_signal(sig, t)) {
990                         t = next_thread(t);
991                         if (t == p->signal->curr_target)
992                                 /*
993                                  * No thread needs to be woken.
994                                  * Any eligible threads will see
995                                  * the signal in the queue soon.
996                                  */
997                                 return;
998                 }
999                 p->signal->curr_target = t;
1000         }
1001
1002         /*
1003          * Found a killable thread.  If the signal will be fatal,
1004          * then start taking the whole group down immediately.
1005          */
1006         if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
1007             !sigismember(&t->real_blocked, sig) &&
1008             (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
1009                 /*
1010                  * This signal will be fatal to the whole group.
1011                  */
1012                 if (!sig_kernel_coredump(sig)) {
1013                         /*
1014                          * Start a group exit and wake everybody up.
1015                          * This way we don't have other threads
1016                          * running and doing things after a slower
1017                          * thread has the fatal signal pending.
1018                          */
1019                         p->signal->flags = SIGNAL_GROUP_EXIT;
1020                         p->signal->group_exit_code = sig;
1021                         p->signal->group_stop_count = 0;
1022                         t = p;
1023                         do {
1024                                 sigaddset(&t->pending.signal, SIGKILL);
1025                                 signal_wake_up(t, 1);
1026                                 t = next_thread(t);
1027                         } while (t != p);
1028                         return;
1029                 }
1030
1031                 /*
1032                  * There will be a core dump.  We make all threads other
1033                  * than the chosen one go into a group stop so that nothing
1034                  * happens until it gets scheduled, takes the signal off
1035                  * the shared queue, and does the core dump.  This is a
1036                  * little more complicated than strictly necessary, but it
1037                  * keeps the signal state that winds up in the core dump
1038                  * unchanged from the death state, e.g. which thread had
1039                  * the core-dump signal unblocked.
1040                  */
1041                 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1042                 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
1043                 p->signal->group_stop_count = 0;
1044                 p->signal->group_exit_task = t;
1045                 t = p;
1046                 do {
1047                         p->signal->group_stop_count++;
1048                         signal_wake_up(t, 0);
1049                         t = next_thread(t);
1050                 } while (t != p);
1051                 wake_up_process(p->signal->group_exit_task);
1052                 return;
1053         }
1054
1055         /*
1056          * The signal is already in the shared-pending queue.
1057          * Tell the chosen thread to wake up and dequeue it.
1058          */
1059         signal_wake_up(t, sig == SIGKILL);
1060         return;
1061 }
1062
1063 int
1064 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1065 {
1066         int ret = 0;
1067
1068         assert_spin_locked(&p->sighand->siglock);
1069         handle_stop_signal(sig, p);
1070
1071         /* Short-circuit ignored signals.  */
1072         if (sig_ignored(p, sig))
1073                 return ret;
1074
1075         if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
1076                 /* This is a non-RT signal and we already have one queued.  */
1077                 return ret;
1078
1079         /*
1080          * Put this signal on the shared-pending queue, or fail with EAGAIN.
1081          * We always use the shared queue for process-wide signals,
1082          * to avoid several races.
1083          */
1084         ret = send_signal(sig, info, p, &p->signal->shared_pending);
1085         if (unlikely(ret))
1086                 return ret;
1087
1088         __group_complete_signal(sig, p);
1089         return 0;
1090 }
1091
1092 /*
1093  * Nuke all other threads in the group.
1094  */
1095 void zap_other_threads(struct task_struct *p)
1096 {
1097         struct task_struct *t;
1098
1099         p->signal->flags = SIGNAL_GROUP_EXIT;
1100         p->signal->group_stop_count = 0;
1101
1102         if (thread_group_empty(p))
1103                 return;
1104
1105         for (t = next_thread(p); t != p; t = next_thread(t)) {
1106                 /*
1107                  * Don't bother with already dead threads
1108                  */
1109                 if (t->exit_state)
1110                         continue;
1111
1112                 /*
1113                  * We don't want to notify the parent, since we are
1114                  * killed as part of a thread group due to another
1115                  * thread doing an execve() or similar. So set the
1116                  * exit signal to -1 to allow immediate reaping of
1117                  * the process.  But don't detach the thread group
1118                  * leader.
1119                  */
1120                 if (t != p->group_leader)
1121                         t->exit_signal = -1;
1122
1123                 /* SIGKILL will be handled before any pending SIGSTOP */
1124                 sigaddset(&t->pending.signal, SIGKILL);
1125                 signal_wake_up(t, 1);
1126         }
1127 }
1128
1129 /*
1130  * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1131  */
1132 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1133 {
1134         unsigned long flags;
1135         struct sighand_struct *sp;
1136         int ret;
1137
1138 retry:
1139         ret = check_kill_permission(sig, info, p);
1140         if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
1141                 spin_lock_irqsave(&sp->siglock, flags);
1142                 if (p->sighand != sp) {
1143                         spin_unlock_irqrestore(&sp->siglock, flags);
1144                         goto retry;
1145                 }
1146                 if ((atomic_read(&sp->count) == 0) ||
1147                                 (atomic_read(&p->usage) == 0)) {
1148                         spin_unlock_irqrestore(&sp->siglock, flags);
1149                         return -ESRCH;
1150                 }
1151                 ret = __group_send_sig_info(sig, info, p);
1152                 spin_unlock_irqrestore(&sp->siglock, flags);
1153         }
1154
1155         return ret;
1156 }
1157
1158 /*
1159  * kill_pg_info() sends a signal to a process group: this is what the tty
1160  * control characters do (^C, ^Z etc)
1161  */
1162
1163 int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1164 {
1165         struct task_struct *p = NULL;
1166         int retval, success;
1167
1168         if (pgrp <= 0)
1169                 return -EINVAL;
1170
1171         success = 0;
1172         retval = -ESRCH;
1173         do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
1174                 int err = group_send_sig_info(sig, info, p);
1175                 success |= !err;
1176                 retval = err;
1177         } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
1178         return success ? 0 : retval;
1179 }
1180
1181 int
1182 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1183 {
1184         int retval;
1185
1186         read_lock(&tasklist_lock);
1187         retval = __kill_pg_info(sig, info, pgrp);
1188         read_unlock(&tasklist_lock);
1189
1190         return retval;
1191 }
1192
1193 int
1194 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1195 {
1196         int error;
1197         int acquired_tasklist_lock = 0;
1198         struct task_struct *p;
1199
1200         rcu_read_lock();
1201         if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
1202                 read_lock(&tasklist_lock);
1203                 acquired_tasklist_lock = 1;
1204         }
1205         p = find_task_by_pid(pid);
1206         error = -ESRCH;
1207         if (p && vx_check(vx_task_xid(p), VX_IDENT))
1208                 error = group_send_sig_info(sig, info, p);
1209         if (unlikely(acquired_tasklist_lock))
1210                 read_unlock(&tasklist_lock);
1211         rcu_read_unlock();
1212         return error;
1213 }
1214
1215 /* like kill_proc_info(), but doesn't use uid/euid of "current" */
1216 int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1217                       uid_t uid, uid_t euid)
1218 {
1219         int ret = -EINVAL;
1220         struct task_struct *p;
1221
1222         if (!valid_signal(sig))
1223                 return ret;
1224
1225         read_lock(&tasklist_lock);
1226         p = find_task_by_pid(pid);
1227         if (!p) {
1228                 ret = -ESRCH;
1229                 goto out_unlock;
1230         }
1231         if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1232             && (euid != p->suid) && (euid != p->uid)
1233             && (uid != p->suid) && (uid != p->uid)) {
1234                 ret = -EPERM;
1235                 goto out_unlock;
1236         }
1237         if (sig && p->sighand) {
1238                 unsigned long flags;
1239                 spin_lock_irqsave(&p->sighand->siglock, flags);
1240                 ret = __group_send_sig_info(sig, info, p);
1241                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1242         }
1243 out_unlock:
1244         read_unlock(&tasklist_lock);
1245         return ret;
1246 }
1247 EXPORT_SYMBOL_GPL(kill_proc_info_as_uid);
1248
1249 /*
1250  * kill_something_info() interprets pid in interesting ways just like kill(2).
1251  *
1252  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1253  * is probably wrong.  Should make it like BSD or SYSV.
1254  */
1255
1256 static int kill_something_info(int sig, struct siginfo *info, int pid)
1257 {
1258         if (!pid) {
1259                 return kill_pg_info(sig, info, process_group(current));
1260         } else if (pid == -1) {
1261                 int retval = 0, count = 0;
1262                 struct task_struct * p;
1263
1264                 read_lock(&tasklist_lock);
1265                 for_each_process(p) {
1266                         if (vx_check(vx_task_xid(p), VX_ADMIN|VX_IDENT)&&
1267                                 p->pid > 1 && p->tgid != current->tgid) {
1268                                 int err = group_send_sig_info(sig, info, p);
1269                                 ++count;
1270                                 if (err != -EPERM)
1271                                         retval = err;
1272                         }
1273                 }
1274                 read_unlock(&tasklist_lock);
1275                 return count ? retval : -ESRCH;
1276         } else if (pid < 0) {
1277                 return kill_pg_info(sig, info, -pid);
1278         } else {
1279                 return kill_proc_info(sig, info, pid);
1280         }
1281 }
1282
1283 /*
1284  * These are for backward compatibility with the rest of the kernel source.
1285  */
1286
1287 /*
1288  * These two are the most common entry points.  They send a signal
1289  * just to the specific thread.
1290  */
1291 int
1292 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1293 {
1294         int ret;
1295         unsigned long flags;
1296
1297         /*
1298          * Make sure legacy kernel users don't send in bad values
1299          * (normal paths check this in check_kill_permission).
1300          */
1301         if (!valid_signal(sig))
1302                 return -EINVAL;
1303
1304         /*
1305          * We need the tasklist lock even for the specific
1306          * thread case (when we don't need to follow the group
1307          * lists) in order to avoid races with "p->sighand"
1308          * going away or changing from under us.
1309          */
1310         read_lock(&tasklist_lock);  
1311         spin_lock_irqsave(&p->sighand->siglock, flags);
1312         ret = specific_send_sig_info(sig, info, p);
1313         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1314         read_unlock(&tasklist_lock);
1315         return ret;
1316 }
1317
1318 #define __si_special(priv) \
1319         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1320
1321 int
1322 send_sig(int sig, struct task_struct *p, int priv)
1323 {
1324         return send_sig_info(sig, __si_special(priv), p);
1325 }
1326
1327 /*
1328  * This is the entry point for "process-wide" signals.
1329  * They will go to an appropriate thread in the thread group.
1330  */
1331 int
1332 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1333 {
1334         int ret;
1335         read_lock(&tasklist_lock);
1336         ret = group_send_sig_info(sig, info, p);
1337         read_unlock(&tasklist_lock);
1338         return ret;
1339 }
1340
1341 void
1342 force_sig(int sig, struct task_struct *p)
1343 {
1344         force_sig_info(sig, SEND_SIG_PRIV, p);
1345 }
1346
1347 /*
1348  * When things go south during signal handling, we
1349  * will force a SIGSEGV. And if the signal that caused
1350  * the problem was already a SIGSEGV, we'll want to
1351  * make sure we don't even try to deliver the signal..
1352  */
1353 int
1354 force_sigsegv(int sig, struct task_struct *p)
1355 {
1356         if (sig == SIGSEGV) {
1357                 unsigned long flags;
1358                 spin_lock_irqsave(&p->sighand->siglock, flags);
1359                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1360                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1361         }
1362         force_sig(SIGSEGV, p);
1363         return 0;
1364 }
1365
1366 int
1367 kill_pg(pid_t pgrp, int sig, int priv)
1368 {
1369         return kill_pg_info(sig, __si_special(priv), pgrp);
1370 }
1371
1372 int
1373 kill_proc(pid_t pid, int sig, int priv)
1374 {
1375         return kill_proc_info(sig, __si_special(priv), pid);
1376 }
1377
1378 /*
1379  * These functions support sending signals using preallocated sigqueue
1380  * structures.  This is needed "because realtime applications cannot
1381  * afford to lose notifications of asynchronous events, like timer
1382  * expirations or I/O completions".  In the case of Posix Timers 
1383  * we allocate the sigqueue structure from the timer_create.  If this
1384  * allocation fails we are able to report the failure to the application
1385  * with an EAGAIN error.
1386  */
1387  
1388 struct sigqueue *sigqueue_alloc(void)
1389 {
1390         struct sigqueue *q;
1391
1392         if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1393                 q->flags |= SIGQUEUE_PREALLOC;
1394         return(q);
1395 }
1396
1397 void sigqueue_free(struct sigqueue *q)
1398 {
1399         unsigned long flags;
1400         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1401         /*
1402          * If the signal is still pending remove it from the
1403          * pending queue.
1404          */
1405         if (unlikely(!list_empty(&q->list))) {
1406                 spinlock_t *lock = &current->sighand->siglock;
1407                 read_lock(&tasklist_lock);
1408                 spin_lock_irqsave(lock, flags);
1409                 if (!list_empty(&q->list))
1410                         list_del_init(&q->list);
1411                 spin_unlock_irqrestore(lock, flags);
1412                 read_unlock(&tasklist_lock);
1413         }
1414         q->flags &= ~SIGQUEUE_PREALLOC;
1415         __sigqueue_free(q);
1416 }
1417
1418 int
1419 send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1420 {
1421         unsigned long flags;
1422         int ret = 0;
1423         struct sighand_struct *sh;
1424
1425         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1426
1427         /*
1428          * The rcu based delayed sighand destroy makes it possible to
1429          * run this without tasklist lock held. The task struct itself
1430          * cannot go away as create_timer did get_task_struct().
1431          *
1432          * We return -1, when the task is marked exiting, so
1433          * posix_timer_event can redirect it to the group leader
1434          */
1435         rcu_read_lock();
1436
1437         if (unlikely(p->flags & PF_EXITING)) {
1438                 ret = -1;
1439                 goto out_err;
1440         }
1441
1442 retry:
1443         sh = rcu_dereference(p->sighand);
1444
1445         spin_lock_irqsave(&sh->siglock, flags);
1446         if (p->sighand != sh) {
1447                 /* We raced with exec() in a multithreaded process... */
1448                 spin_unlock_irqrestore(&sh->siglock, flags);
1449                 goto retry;
1450         }
1451
1452         /*
1453          * We do the check here again to handle the following scenario:
1454          *
1455          * CPU 0                CPU 1
1456          * send_sigqueue
1457          * check PF_EXITING
1458          * interrupt            exit code running
1459          *                      __exit_signal
1460          *                      lock sighand->siglock
1461          *                      unlock sighand->siglock
1462          * lock sh->siglock
1463          * add(tsk->pending)    flush_sigqueue(tsk->pending)
1464          *
1465          */
1466
1467         if (unlikely(p->flags & PF_EXITING)) {
1468                 ret = -1;
1469                 goto out;
1470         }
1471
1472         if (unlikely(!list_empty(&q->list))) {
1473                 /*
1474                  * If an SI_TIMER entry is already queue just increment
1475                  * the overrun count.
1476                  */
1477                 if (q->info.si_code != SI_TIMER)
1478                         BUG();
1479                 q->info.si_overrun++;
1480                 goto out;
1481         }
1482         /* Short-circuit ignored signals.  */
1483         if (sig_ignored(p, sig)) {
1484                 ret = 1;
1485                 goto out;
1486         }
1487
1488         list_add_tail(&q->list, &p->pending.list);
1489         sigaddset(&p->pending.signal, sig);
1490         if (!sigismember(&p->blocked, sig))
1491                 signal_wake_up(p, sig == SIGKILL);
1492
1493 out:
1494         spin_unlock_irqrestore(&sh->siglock, flags);
1495 out_err:
1496         rcu_read_unlock();
1497
1498         return ret;
1499 }
1500
1501 int
1502 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1503 {
1504         unsigned long flags;
1505         int ret = 0;
1506
1507         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1508
1509         read_lock(&tasklist_lock);
1510         /* Since it_lock is held, p->sighand cannot be NULL. */
1511         spin_lock_irqsave(&p->sighand->siglock, flags);
1512         handle_stop_signal(sig, p);
1513
1514         /* Short-circuit ignored signals.  */
1515         if (sig_ignored(p, sig)) {
1516                 ret = 1;
1517                 goto out;
1518         }
1519
1520         if (unlikely(!list_empty(&q->list))) {
1521                 /*
1522                  * If an SI_TIMER entry is already queue just increment
1523                  * the overrun count.  Other uses should not try to
1524                  * send the signal multiple times.
1525                  */
1526                 if (q->info.si_code != SI_TIMER)
1527                         BUG();
1528                 q->info.si_overrun++;
1529                 goto out;
1530         } 
1531
1532         /*
1533          * Put this signal on the shared-pending queue.
1534          * We always use the shared queue for process-wide signals,
1535          * to avoid several races.
1536          */
1537         list_add_tail(&q->list, &p->signal->shared_pending.list);
1538         sigaddset(&p->signal->shared_pending.signal, sig);
1539
1540         __group_complete_signal(sig, p);
1541 out:
1542         spin_unlock_irqrestore(&p->sighand->siglock, flags);
1543         read_unlock(&tasklist_lock);
1544         return ret;
1545 }
1546
1547 /*
1548  * Wake up any threads in the parent blocked in wait* syscalls.
1549  */
1550 static inline void __wake_up_parent(struct task_struct *p,
1551                                     struct task_struct *parent)
1552 {
1553         wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1554 }
1555
1556 /*
1557  * Let a parent know about the death of a child.
1558  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1559  */
1560
1561 void do_notify_parent(struct task_struct *tsk, int sig)
1562 {
1563         struct siginfo info;
1564         unsigned long flags;
1565         struct sighand_struct *psig;
1566
1567         BUG_ON(sig == -1);
1568
1569         /* do_notify_parent_cldstop should have been called instead.  */
1570         BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1571
1572         BUG_ON(!tsk->ptrace &&
1573                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1574
1575         info.si_signo = sig;
1576         info.si_errno = 0;
1577         info.si_pid = tsk->pid;
1578         info.si_uid = tsk->uid;
1579
1580         /* FIXME: find out whether or not this is supposed to be c*time. */
1581         info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1582                                                        tsk->signal->utime));
1583         info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1584                                                        tsk->signal->stime));
1585
1586         info.si_status = tsk->exit_code & 0x7f;
1587         if (tsk->exit_code & 0x80)
1588                 info.si_code = CLD_DUMPED;
1589         else if (tsk->exit_code & 0x7f)
1590                 info.si_code = CLD_KILLED;
1591         else {
1592                 info.si_code = CLD_EXITED;
1593                 info.si_status = tsk->exit_code >> 8;
1594         }
1595
1596         psig = tsk->parent->sighand;
1597         spin_lock_irqsave(&psig->siglock, flags);
1598         if (!tsk->ptrace && sig == SIGCHLD &&
1599             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1600              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1601                 /*
1602                  * We are exiting and our parent doesn't care.  POSIX.1
1603                  * defines special semantics for setting SIGCHLD to SIG_IGN
1604                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1605                  * automatically and not left for our parent's wait4 call.
1606                  * Rather than having the parent do it as a magic kind of
1607                  * signal handler, we just set this to tell do_exit that we
1608                  * can be cleaned up without becoming a zombie.  Note that
1609                  * we still call __wake_up_parent in this case, because a
1610                  * blocked sys_wait4 might now return -ECHILD.
1611                  *
1612                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1613                  * is implementation-defined: we do (if you don't want
1614                  * it, just use SIG_IGN instead).
1615                  */
1616                 tsk->exit_signal = -1;
1617                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1618                         sig = 0;
1619         }
1620         if (valid_signal(sig) && sig > 0)
1621                 __group_send_sig_info(sig, &info, tsk->parent);
1622         __wake_up_parent(tsk, tsk->parent);
1623         spin_unlock_irqrestore(&psig->siglock, flags);
1624 }
1625
1626 static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1627 {
1628         struct siginfo info;
1629         unsigned long flags;
1630         struct task_struct *parent;
1631         struct sighand_struct *sighand;
1632
1633         if (to_self)
1634                 parent = tsk->parent;
1635         else {
1636                 tsk = tsk->group_leader;
1637                 parent = tsk->real_parent;
1638         }
1639
1640         info.si_signo = SIGCHLD;
1641         info.si_errno = 0;
1642         info.si_pid = tsk->pid;
1643         info.si_uid = tsk->uid;
1644
1645         /* FIXME: find out whether or not this is supposed to be c*time. */
1646         info.si_utime = cputime_to_jiffies(tsk->utime);
1647         info.si_stime = cputime_to_jiffies(tsk->stime);
1648
1649         info.si_code = why;
1650         switch (why) {
1651         case CLD_CONTINUED:
1652                 info.si_status = SIGCONT;
1653                 break;
1654         case CLD_STOPPED:
1655                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1656                 break;
1657         case CLD_TRAPPED:
1658                 info.si_status = tsk->exit_code & 0x7f;
1659                 break;
1660         default:
1661                 BUG();
1662         }
1663
1664         sighand = parent->sighand;
1665         spin_lock_irqsave(&sighand->siglock, flags);
1666         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1667             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1668                 __group_send_sig_info(SIGCHLD, &info, parent);
1669         /*
1670          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1671          */
1672         __wake_up_parent(tsk, parent);
1673         spin_unlock_irqrestore(&sighand->siglock, flags);
1674 }
1675
1676 /*
1677  * This must be called with current->sighand->siglock held.
1678  *
1679  * This should be the path for all ptrace stops.
1680  * We always set current->last_siginfo while stopped here.
1681  * That makes it a way to test a stopped process for
1682  * being ptrace-stopped vs being job-control-stopped.
1683  *
1684  * If we actually decide not to stop at all because the tracer is gone,
1685  * we leave nostop_code in current->exit_code.
1686  */
1687 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1688 {
1689         /*
1690          * If there is a group stop in progress,
1691          * we must participate in the bookkeeping.
1692          */
1693         if (current->signal->group_stop_count > 0)
1694                 --current->signal->group_stop_count;
1695
1696         current->last_siginfo = info;
1697         current->exit_code = exit_code;
1698
1699         /* Let the debugger run.  */
1700         set_current_state(TASK_TRACED);
1701         spin_unlock_irq(&current->sighand->siglock);
1702         try_to_freeze();
1703         read_lock(&tasklist_lock);
1704         if (likely(current->ptrace & PT_PTRACED) &&
1705             likely(current->parent != current->real_parent ||
1706                    !(current->ptrace & PT_ATTACHED)) &&
1707             (likely(current->parent->signal != current->signal) ||
1708              !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1709                 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1710                 read_unlock(&tasklist_lock);
1711                 schedule();
1712         } else {
1713                 /*
1714                  * By the time we got the lock, our tracer went away.
1715                  * Don't stop here.
1716                  */
1717                 read_unlock(&tasklist_lock);
1718                 set_current_state(TASK_RUNNING);
1719                 current->exit_code = nostop_code;
1720         }
1721
1722         /*
1723          * We are back.  Now reacquire the siglock before touching
1724          * last_siginfo, so that we are sure to have synchronized with
1725          * any signal-sending on another CPU that wants to examine it.
1726          */
1727         spin_lock_irq(&current->sighand->siglock);
1728         current->last_siginfo = NULL;
1729
1730         /*
1731          * Queued signals ignored us while we were stopped for tracing.
1732          * So check for any that we should take before resuming user mode.
1733          */
1734         recalc_sigpending();
1735 }
1736
1737 void ptrace_notify(int exit_code)
1738 {
1739         siginfo_t info;
1740
1741         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1742
1743         memset(&info, 0, sizeof info);
1744         info.si_signo = SIGTRAP;
1745         info.si_code = exit_code;
1746         info.si_pid = current->pid;
1747         info.si_uid = current->uid;
1748
1749         /* Let the debugger run.  */
1750         spin_lock_irq(&current->sighand->siglock);
1751         ptrace_stop(exit_code, 0, &info);
1752         spin_unlock_irq(&current->sighand->siglock);
1753 }
1754
1755 static void
1756 finish_stop(int stop_count)
1757 {
1758         int to_self;
1759
1760         /*
1761          * If there are no other threads in the group, or if there is
1762          * a group stop in progress and we are the last to stop,
1763          * report to the parent.  When ptraced, every thread reports itself.
1764          */
1765         if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1766                 to_self = 1;
1767         else if (stop_count == 0)
1768                 to_self = 0;
1769         else
1770                 goto out;
1771
1772         read_lock(&tasklist_lock);
1773         do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1774         read_unlock(&tasklist_lock);
1775
1776 out:
1777         schedule();
1778         /*
1779          * Now we don't run again until continued.
1780          */
1781         current->exit_code = 0;
1782 }
1783
1784 /*
1785  * This performs the stopping for SIGSTOP and other stop signals.
1786  * We have to stop all threads in the thread group.
1787  * Returns nonzero if we've actually stopped and released the siglock.
1788  * Returns zero if we didn't stop and still hold the siglock.
1789  */
1790 static int
1791 do_signal_stop(int signr)
1792 {
1793         struct signal_struct *sig = current->signal;
1794         struct sighand_struct *sighand = current->sighand;
1795         int stop_count = -1;
1796
1797         if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1798                 return 0;
1799
1800         if (sig->group_stop_count > 0) {
1801                 /*
1802                  * There is a group stop in progress.  We don't need to
1803                  * start another one.
1804                  */
1805                 signr = sig->group_exit_code;
1806                 stop_count = --sig->group_stop_count;
1807                 current->exit_code = signr;
1808                 set_current_state(TASK_STOPPED);
1809                 if (stop_count == 0)
1810                         sig->flags = SIGNAL_STOP_STOPPED;
1811                 spin_unlock_irq(&sighand->siglock);
1812         }
1813         else if (thread_group_empty(current)) {
1814                 /*
1815                  * Lock must be held through transition to stopped state.
1816                  */
1817                 current->exit_code = current->signal->group_exit_code = signr;
1818                 set_current_state(TASK_STOPPED);
1819                 sig->flags = SIGNAL_STOP_STOPPED;
1820                 spin_unlock_irq(&sighand->siglock);
1821         }
1822         else {
1823                 /*
1824                  * There is no group stop already in progress.
1825                  * We must initiate one now, but that requires
1826                  * dropping siglock to get both the tasklist lock
1827                  * and siglock again in the proper order.  Note that
1828                  * this allows an intervening SIGCONT to be posted.
1829                  * We need to check for that and bail out if necessary.
1830                  */
1831                 struct task_struct *t;
1832
1833                 spin_unlock_irq(&sighand->siglock);
1834
1835                 /* signals can be posted during this window */
1836
1837                 read_lock(&tasklist_lock);
1838                 spin_lock_irq(&sighand->siglock);
1839
1840                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1841                         /*
1842                          * Another stop or continue happened while we
1843                          * didn't have the lock.  We can just swallow this
1844                          * signal now.  If we raced with a SIGCONT, that
1845                          * should have just cleared it now.  If we raced
1846                          * with another processor delivering a stop signal,
1847                          * then the SIGCONT that wakes us up should clear it.
1848                          */
1849                         read_unlock(&tasklist_lock);
1850                         return 0;
1851                 }
1852
1853                 if (sig->group_stop_count == 0) {
1854                         sig->group_exit_code = signr;
1855                         stop_count = 0;
1856                         for (t = next_thread(current); t != current;
1857                              t = next_thread(t))
1858                                 /*
1859                                  * Setting state to TASK_STOPPED for a group
1860                                  * stop is always done with the siglock held,
1861                                  * so this check has no races.
1862                                  */
1863                                 if (!t->exit_state &&
1864                                     !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1865                                         stop_count++;
1866                                         signal_wake_up(t, 0);
1867                                 }
1868                         sig->group_stop_count = stop_count;
1869                 }
1870                 else {
1871                         /* A race with another thread while unlocked.  */
1872                         signr = sig->group_exit_code;
1873                         stop_count = --sig->group_stop_count;
1874                 }
1875
1876                 current->exit_code = signr;
1877                 set_current_state(TASK_STOPPED);
1878                 if (stop_count == 0)
1879                         sig->flags = SIGNAL_STOP_STOPPED;
1880
1881                 spin_unlock_irq(&sighand->siglock);
1882                 read_unlock(&tasklist_lock);
1883         }
1884
1885         finish_stop(stop_count);
1886         return 1;
1887 }
1888
1889 /*
1890  * Do appropriate magic when group_stop_count > 0.
1891  * We return nonzero if we stopped, after releasing the siglock.
1892  * We return zero if we still hold the siglock and should look
1893  * for another signal without checking group_stop_count again.
1894  */
1895 static int handle_group_stop(void)
1896 {
1897         int stop_count;
1898
1899         if (current->signal->group_exit_task == current) {
1900                 /*
1901                  * Group stop is so we can do a core dump,
1902                  * We are the initiating thread, so get on with it.
1903                  */
1904                 current->signal->group_exit_task = NULL;
1905                 return 0;
1906         }
1907
1908         if (current->signal->flags & SIGNAL_GROUP_EXIT)
1909                 /*
1910                  * Group stop is so another thread can do a core dump,
1911                  * or else we are racing against a death signal.
1912                  * Just punt the stop so we can get the next signal.
1913                  */
1914                 return 0;
1915
1916         /*
1917          * There is a group stop in progress.  We stop
1918          * without any associated signal being in our queue.
1919          */
1920         stop_count = --current->signal->group_stop_count;
1921         if (stop_count == 0)
1922                 current->signal->flags = SIGNAL_STOP_STOPPED;
1923         current->exit_code = current->signal->group_exit_code;
1924         set_current_state(TASK_STOPPED);
1925         spin_unlock_irq(&current->sighand->siglock);
1926         finish_stop(stop_count);
1927         return 1;
1928 }
1929
1930 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1931                           struct pt_regs *regs, void *cookie)
1932 {
1933         sigset_t *mask = &current->blocked;
1934         int signr = 0;
1935
1936 relock:
1937         spin_lock_irq(&current->sighand->siglock);
1938         for (;;) {
1939                 struct k_sigaction *ka;
1940
1941                 if (unlikely(current->signal->group_stop_count > 0) &&
1942                     handle_group_stop())
1943                         goto relock;
1944
1945                 signr = dequeue_signal(current, mask, info);
1946
1947                 if (!signr)
1948                         break; /* will return 0 */
1949
1950                 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1951                         ptrace_signal_deliver(regs, cookie);
1952
1953                         /* Let the debugger run.  */
1954                         ptrace_stop(signr, signr, info);
1955
1956                         /* We're back.  Did the debugger cancel the sig?  */
1957                         signr = current->exit_code;
1958                         if (signr == 0)
1959                                 continue;
1960
1961                         current->exit_code = 0;
1962
1963                         /* Update the siginfo structure if the signal has
1964                            changed.  If the debugger wanted something
1965                            specific in the siginfo structure then it should
1966                            have updated *info via PTRACE_SETSIGINFO.  */
1967                         if (signr != info->si_signo) {
1968                                 info->si_signo = signr;
1969                                 info->si_errno = 0;
1970                                 info->si_code = SI_USER;
1971                                 info->si_pid = current->parent->pid;
1972                                 info->si_uid = current->parent->uid;
1973                         }
1974
1975                         /* If the (new) signal is now blocked, requeue it.  */
1976                         if (sigismember(&current->blocked, signr)) {
1977                                 specific_send_sig_info(signr, info, current);
1978                                 continue;
1979                         }
1980                 }
1981
1982                 ka = &current->sighand->action[signr-1];
1983                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1984                         continue;
1985                 if (ka->sa.sa_handler != SIG_DFL) {
1986                         /* Run the handler.  */
1987                         *return_ka = *ka;
1988
1989                         if (ka->sa.sa_flags & SA_ONESHOT)
1990                                 ka->sa.sa_handler = SIG_DFL;
1991
1992                         break; /* will return non-zero "signr" value */
1993                 }
1994
1995                 /*
1996                  * Now we are doing the default action for this signal.
1997                  */
1998                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1999                         continue;
2000
2001                 /* Init gets no signals it doesn't want.  */
2002                 if (current->pid == 1)
2003                         continue;
2004
2005                 /* virtual init is protected against user signals */
2006                 if ((info->si_code == SI_USER) &&
2007                         vx_current_initpid(current->pid))
2008                         continue;
2009
2010                 if (sig_kernel_stop(signr)) {
2011                         /*
2012                          * The default action is to stop all threads in
2013                          * the thread group.  The job control signals
2014                          * do nothing in an orphaned pgrp, but SIGSTOP
2015                          * always works.  Note that siglock needs to be
2016                          * dropped during the call to is_orphaned_pgrp()
2017                          * because of lock ordering with tasklist_lock.
2018                          * This allows an intervening SIGCONT to be posted.
2019                          * We need to check for that and bail out if necessary.
2020                          */
2021                         if (signr != SIGSTOP) {
2022                                 spin_unlock_irq(&current->sighand->siglock);
2023
2024                                 /* signals can be posted during this window */
2025
2026                                 if (is_orphaned_pgrp(process_group(current)))
2027                                         goto relock;
2028
2029                                 spin_lock_irq(&current->sighand->siglock);
2030                         }
2031
2032                         if (likely(do_signal_stop(signr))) {
2033                                 /* It released the siglock.  */
2034                                 goto relock;
2035                         }
2036
2037                         /*
2038                          * We didn't actually stop, due to a race
2039                          * with SIGCONT or something like that.
2040                          */
2041                         continue;
2042                 }
2043
2044                 spin_unlock_irq(&current->sighand->siglock);
2045
2046                 /*
2047                  * Anything else is fatal, maybe with a core dump.
2048                  */
2049                 current->flags |= PF_SIGNALED;
2050                 if (sig_kernel_coredump(signr)) {
2051                         /*
2052                          * If it was able to dump core, this kills all
2053                          * other threads in the group and synchronizes with
2054                          * their demise.  If we lost the race with another
2055                          * thread getting here, it set group_exit_code
2056                          * first and our do_group_exit call below will use
2057                          * that value and ignore the one we pass it.
2058                          */
2059                         do_coredump((long)signr, signr, regs);
2060                 }
2061
2062                 /*
2063                  * Death signals, no core dump.
2064                  */
2065                 do_group_exit(signr);
2066                 /* NOTREACHED */
2067         }
2068         spin_unlock_irq(&current->sighand->siglock);
2069         return signr;
2070 }
2071
2072 EXPORT_SYMBOL(recalc_sigpending);
2073 EXPORT_SYMBOL_GPL(dequeue_signal);
2074 EXPORT_SYMBOL(flush_signals);
2075 EXPORT_SYMBOL(force_sig);
2076 EXPORT_SYMBOL(kill_pg);
2077 EXPORT_SYMBOL(kill_proc);
2078 EXPORT_SYMBOL(ptrace_notify);
2079 EXPORT_SYMBOL(send_sig);
2080 EXPORT_SYMBOL(send_sig_info);
2081 EXPORT_SYMBOL(sigprocmask);
2082 EXPORT_SYMBOL(block_all_signals);
2083 EXPORT_SYMBOL(unblock_all_signals);
2084
2085
2086 /*
2087  * System call entry points.
2088  */
2089
2090 asmlinkage long sys_restart_syscall(void)
2091 {
2092         struct restart_block *restart = &current_thread_info()->restart_block;
2093         return restart->fn(restart);
2094 }
2095
2096 long do_no_restart_syscall(struct restart_block *param)
2097 {
2098         return -EINTR;
2099 }
2100
2101 /*
2102  * We don't need to get the kernel lock - this is all local to this
2103  * particular thread.. (and that's good, because this is _heavily_
2104  * used by various programs)
2105  */
2106
2107 /*
2108  * This is also useful for kernel threads that want to temporarily
2109  * (or permanently) block certain signals.
2110  *
2111  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2112  * interface happily blocks "unblockable" signals like SIGKILL
2113  * and friends.
2114  */
2115 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2116 {
2117         int error;
2118         sigset_t old_block;
2119
2120         spin_lock_irq(&current->sighand->siglock);
2121         old_block = current->blocked;
2122         error = 0;
2123         switch (how) {
2124         case SIG_BLOCK:
2125                 sigorsets(&current->blocked, &current->blocked, set);
2126                 break;
2127         case SIG_UNBLOCK:
2128                 signandsets(&current->blocked, &current->blocked, set);
2129                 break;
2130         case SIG_SETMASK:
2131                 current->blocked = *set;
2132                 break;
2133         default:
2134                 error = -EINVAL;
2135         }
2136         recalc_sigpending();
2137         spin_unlock_irq(&current->sighand->siglock);
2138         if (oldset)
2139                 *oldset = old_block;
2140         return error;
2141 }
2142
2143 asmlinkage long
2144 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2145 {
2146         int error = -EINVAL;
2147         sigset_t old_set, new_set;
2148
2149         /* XXX: Don't preclude handling different sized sigset_t's.  */
2150         if (sigsetsize != sizeof(sigset_t))
2151                 goto out;
2152
2153         if (set) {
2154                 error = -EFAULT;
2155                 if (copy_from_user(&new_set, set, sizeof(*set)))
2156                         goto out;
2157                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2158
2159                 error = sigprocmask(how, &new_set, &old_set);
2160                 if (error)
2161                         goto out;
2162                 if (oset)
2163                         goto set_old;
2164         } else if (oset) {
2165                 spin_lock_irq(&current->sighand->siglock);
2166                 old_set = current->blocked;
2167                 spin_unlock_irq(&current->sighand->siglock);
2168
2169         set_old:
2170                 error = -EFAULT;
2171                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2172                         goto out;
2173         }
2174         error = 0;
2175 out:
2176         return error;
2177 }
2178
2179 long do_sigpending(void __user *set, unsigned long sigsetsize)
2180 {
2181         long error = -EINVAL;
2182         sigset_t pending;
2183
2184         if (sigsetsize > sizeof(sigset_t))
2185                 goto out;
2186
2187         spin_lock_irq(&current->sighand->siglock);
2188         sigorsets(&pending, &current->pending.signal,
2189                   &current->signal->shared_pending.signal);
2190         spin_unlock_irq(&current->sighand->siglock);
2191
2192         /* Outside the lock because only this thread touches it.  */
2193         sigandsets(&pending, &current->blocked, &pending);
2194
2195         error = -EFAULT;
2196         if (!copy_to_user(set, &pending, sigsetsize))
2197                 error = 0;
2198
2199 out:
2200         return error;
2201 }       
2202
2203 asmlinkage long
2204 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2205 {
2206         return do_sigpending(set, sigsetsize);
2207 }
2208
2209 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2210
2211 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2212 {
2213         int err;
2214
2215         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2216                 return -EFAULT;
2217         if (from->si_code < 0)
2218                 return __copy_to_user(to, from, sizeof(siginfo_t))
2219                         ? -EFAULT : 0;
2220         /*
2221          * If you change siginfo_t structure, please be sure
2222          * this code is fixed accordingly.
2223          * It should never copy any pad contained in the structure
2224          * to avoid security leaks, but must copy the generic
2225          * 3 ints plus the relevant union member.
2226          */
2227         err = __put_user(from->si_signo, &to->si_signo);
2228         err |= __put_user(from->si_errno, &to->si_errno);
2229         err |= __put_user((short)from->si_code, &to->si_code);
2230         switch (from->si_code & __SI_MASK) {
2231         case __SI_KILL:
2232                 err |= __put_user(from->si_pid, &to->si_pid);
2233                 err |= __put_user(from->si_uid, &to->si_uid);
2234                 break;
2235         case __SI_TIMER:
2236                  err |= __put_user(from->si_tid, &to->si_tid);
2237                  err |= __put_user(from->si_overrun, &to->si_overrun);
2238                  err |= __put_user(from->si_ptr, &to->si_ptr);
2239                 break;
2240         case __SI_POLL:
2241                 err |= __put_user(from->si_band, &to->si_band);
2242                 err |= __put_user(from->si_fd, &to->si_fd);
2243                 break;
2244         case __SI_FAULT:
2245                 err |= __put_user(from->si_addr, &to->si_addr);
2246 #ifdef __ARCH_SI_TRAPNO
2247                 err |= __put_user(from->si_trapno, &to->si_trapno);
2248 #endif
2249                 break;
2250         case __SI_CHLD:
2251                 err |= __put_user(from->si_pid, &to->si_pid);
2252                 err |= __put_user(from->si_uid, &to->si_uid);
2253                 err |= __put_user(from->si_status, &to->si_status);
2254                 err |= __put_user(from->si_utime, &to->si_utime);
2255                 err |= __put_user(from->si_stime, &to->si_stime);
2256                 break;
2257         case __SI_RT: /* This is not generated by the kernel as of now. */
2258         case __SI_MESGQ: /* But this is */
2259                 err |= __put_user(from->si_pid, &to->si_pid);
2260                 err |= __put_user(from->si_uid, &to->si_uid);
2261                 err |= __put_user(from->si_ptr, &to->si_ptr);
2262                 break;
2263         default: /* this is just in case for now ... */
2264                 err |= __put_user(from->si_pid, &to->si_pid);
2265                 err |= __put_user(from->si_uid, &to->si_uid);
2266                 break;
2267         }
2268         return err;
2269 }
2270
2271 #endif
2272
2273 asmlinkage long
2274 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2275                     siginfo_t __user *uinfo,
2276                     const struct timespec __user *uts,
2277                     size_t sigsetsize)
2278 {
2279         int ret, sig;
2280         sigset_t these;
2281         struct timespec ts;
2282         siginfo_t info;
2283         long timeout = 0;
2284
2285         /* XXX: Don't preclude handling different sized sigset_t's.  */
2286         if (sigsetsize != sizeof(sigset_t))
2287                 return -EINVAL;
2288
2289         if (copy_from_user(&these, uthese, sizeof(these)))
2290                 return -EFAULT;
2291                 
2292         /*
2293          * Invert the set of allowed signals to get those we
2294          * want to block.
2295          */
2296         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2297         signotset(&these);
2298
2299         if (uts) {
2300                 if (copy_from_user(&ts, uts, sizeof(ts)))
2301                         return -EFAULT;
2302                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2303                     || ts.tv_sec < 0)
2304                         return -EINVAL;
2305         }
2306
2307         spin_lock_irq(&current->sighand->siglock);
2308         sig = dequeue_signal(current, &these, &info);
2309         if (!sig) {
2310                 timeout = MAX_SCHEDULE_TIMEOUT;
2311                 if (uts)
2312                         timeout = (timespec_to_jiffies(&ts)
2313                                    + (ts.tv_sec || ts.tv_nsec));
2314
2315                 if (timeout) {
2316                         /* None ready -- temporarily unblock those we're
2317                          * interested while we are sleeping in so that we'll
2318                          * be awakened when they arrive.  */
2319                         current->real_blocked = current->blocked;
2320                         sigandsets(&current->blocked, &current->blocked, &these);
2321                         recalc_sigpending();
2322                         spin_unlock_irq(&current->sighand->siglock);
2323
2324                         timeout = schedule_timeout_interruptible(timeout);
2325
2326                         try_to_freeze();
2327                         spin_lock_irq(&current->sighand->siglock);
2328                         sig = dequeue_signal(current, &these, &info);
2329                         current->blocked = current->real_blocked;
2330                         siginitset(&current->real_blocked, 0);
2331                         recalc_sigpending();
2332                 }
2333         }
2334         spin_unlock_irq(&current->sighand->siglock);
2335
2336         if (sig) {
2337                 ret = sig;
2338                 if (uinfo) {
2339                         if (copy_siginfo_to_user(uinfo, &info))
2340                                 ret = -EFAULT;
2341                 }
2342         } else {
2343                 ret = -EAGAIN;
2344                 if (timeout)
2345                         ret = -EINTR;
2346         }
2347
2348         return ret;
2349 }
2350
2351 asmlinkage long
2352 sys_kill(int pid, int sig)
2353 {
2354         struct siginfo info;
2355
2356         info.si_signo = sig;
2357         info.si_errno = 0;
2358         info.si_code = SI_USER;
2359         info.si_pid = current->tgid;
2360         info.si_uid = current->uid;
2361
2362         return kill_something_info(sig, &info, pid);
2363 }
2364
2365 static int do_tkill(int tgid, int pid, int sig)
2366 {
2367         int error;
2368         struct siginfo info;
2369         struct task_struct *p;
2370
2371         error = -ESRCH;
2372         info.si_signo = sig;
2373         info.si_errno = 0;
2374         info.si_code = SI_TKILL;
2375         info.si_pid = current->tgid;
2376         info.si_uid = current->uid;
2377
2378         read_lock(&tasklist_lock);
2379         p = find_task_by_pid(pid);
2380         if (p && (tgid <= 0 || p->tgid == tgid)) {
2381                 error = check_kill_permission(sig, &info, p);
2382                 /*
2383                  * The null signal is a permissions and process existence
2384                  * probe.  No signal is actually delivered.
2385                  */
2386                 if (!error && sig && p->sighand) {
2387                         spin_lock_irq(&p->sighand->siglock);
2388                         handle_stop_signal(sig, p);
2389                         error = specific_send_sig_info(sig, &info, p);
2390                         spin_unlock_irq(&p->sighand->siglock);
2391                 }
2392         }
2393         read_unlock(&tasklist_lock);
2394
2395         return error;
2396 }
2397
2398 /**
2399  *  sys_tgkill - send signal to one specific thread
2400  *  @tgid: the thread group ID of the thread
2401  *  @pid: the PID of the thread
2402  *  @sig: signal to be sent
2403  *
2404  *  This syscall also checks the tgid and returns -ESRCH even if the PID
2405  *  exists but it's not belonging to the target process anymore. This
2406  *  method solves the problem of threads exiting and PIDs getting reused.
2407  */
2408 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2409 {
2410         /* This is only valid for single tasks */
2411         if (pid <= 0 || tgid <= 0)
2412                 return -EINVAL;
2413
2414         return do_tkill(tgid, pid, sig);
2415 }
2416
2417 /*
2418  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2419  */
2420 asmlinkage long
2421 sys_tkill(int pid, int sig)
2422 {
2423         /* This is only valid for single tasks */
2424         if (pid <= 0)
2425                 return -EINVAL;
2426
2427         return do_tkill(0, pid, sig);
2428 }
2429
2430 asmlinkage long
2431 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2432 {
2433         siginfo_t info;
2434
2435         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2436                 return -EFAULT;
2437
2438         /* Not even root can pretend to send signals from the kernel.
2439            Nor can they impersonate a kill(), which adds source info.  */
2440         if (info.si_code >= 0)
2441                 return -EPERM;
2442         info.si_signo = sig;
2443
2444         /* POSIX.1b doesn't mention process groups.  */
2445         return kill_proc_info(sig, &info, pid);
2446 }
2447
2448 int
2449 do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2450 {
2451         struct k_sigaction *k;
2452         sigset_t mask;
2453
2454         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2455                 return -EINVAL;
2456
2457         k = &current->sighand->action[sig-1];
2458
2459         spin_lock_irq(&current->sighand->siglock);
2460         if (signal_pending(current)) {
2461                 /*
2462                  * If there might be a fatal signal pending on multiple
2463                  * threads, make sure we take it before changing the action.
2464                  */
2465                 spin_unlock_irq(&current->sighand->siglock);
2466                 return -ERESTARTNOINTR;
2467         }
2468
2469         if (oact)
2470                 *oact = *k;
2471
2472         if (act) {
2473                 sigdelsetmask(&act->sa.sa_mask,
2474                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2475                 /*
2476                  * POSIX 3.3.1.3:
2477                  *  "Setting a signal action to SIG_IGN for a signal that is
2478                  *   pending shall cause the pending signal to be discarded,
2479                  *   whether or not it is blocked."
2480                  *
2481                  *  "Setting a signal action to SIG_DFL for a signal that is
2482                  *   pending and whose default action is to ignore the signal
2483                  *   (for example, SIGCHLD), shall cause the pending signal to
2484                  *   be discarded, whether or not it is blocked"
2485                  */
2486                 if (act->sa.sa_handler == SIG_IGN ||
2487                     (act->sa.sa_handler == SIG_DFL &&
2488                      sig_kernel_ignore(sig))) {
2489                         /*
2490                          * This is a fairly rare case, so we only take the
2491                          * tasklist_lock once we're sure we'll need it.
2492                          * Now we must do this little unlock and relock
2493                          * dance to maintain the lock hierarchy.
2494                          */
2495                         struct task_struct *t = current;
2496                         spin_unlock_irq(&t->sighand->siglock);
2497                         read_lock(&tasklist_lock);
2498                         spin_lock_irq(&t->sighand->siglock);
2499                         *k = *act;
2500                         sigemptyset(&mask);
2501                         sigaddset(&mask, sig);
2502                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2503                         do {
2504                                 rm_from_queue_full(&mask, &t->pending);
2505                                 recalc_sigpending_tsk(t);
2506                                 t = next_thread(t);
2507                         } while (t != current);
2508                         spin_unlock_irq(&current->sighand->siglock);
2509                         read_unlock(&tasklist_lock);
2510                         return 0;
2511                 }
2512
2513                 *k = *act;
2514         }
2515
2516         spin_unlock_irq(&current->sighand->siglock);
2517         return 0;
2518 }
2519
2520 int 
2521 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2522 {
2523         stack_t oss;
2524         int error;
2525
2526         if (uoss) {
2527                 oss.ss_sp = (void __user *) current->sas_ss_sp;
2528                 oss.ss_size = current->sas_ss_size;
2529                 oss.ss_flags = sas_ss_flags(sp);
2530         }
2531
2532         if (uss) {
2533                 void __user *ss_sp;
2534                 size_t ss_size;
2535                 int ss_flags;
2536
2537                 error = -EFAULT;
2538                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2539                     || __get_user(ss_sp, &uss->ss_sp)
2540                     || __get_user(ss_flags, &uss->ss_flags)
2541                     || __get_user(ss_size, &uss->ss_size))
2542                         goto out;
2543
2544                 error = -EPERM;
2545                 if (on_sig_stack(sp))
2546                         goto out;
2547
2548                 error = -EINVAL;
2549                 /*
2550                  *
2551                  * Note - this code used to test ss_flags incorrectly
2552                  *        old code may have been written using ss_flags==0
2553                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2554                  *        way that worked) - this fix preserves that older
2555                  *        mechanism
2556                  */
2557                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2558                         goto out;
2559
2560                 if (ss_flags == SS_DISABLE) {
2561                         ss_size = 0;
2562                         ss_sp = NULL;
2563                 } else {
2564                         error = -ENOMEM;
2565                         if (ss_size < MINSIGSTKSZ)
2566                                 goto out;
2567                 }
2568
2569                 current->sas_ss_sp = (unsigned long) ss_sp;
2570                 current->sas_ss_size = ss_size;
2571         }
2572
2573         if (uoss) {
2574                 error = -EFAULT;
2575                 if (copy_to_user(uoss, &oss, sizeof(oss)))
2576                         goto out;
2577         }
2578
2579         error = 0;
2580 out:
2581         return error;
2582 }
2583
2584 #ifdef __ARCH_WANT_SYS_SIGPENDING
2585
2586 asmlinkage long
2587 sys_sigpending(old_sigset_t __user *set)
2588 {
2589         return do_sigpending(set, sizeof(*set));
2590 }
2591
2592 #endif
2593
2594 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2595 /* Some platforms have their own version with special arguments others
2596    support only sys_rt_sigprocmask.  */
2597
2598 asmlinkage long
2599 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2600 {
2601         int error;
2602         old_sigset_t old_set, new_set;
2603
2604         if (set) {
2605                 error = -EFAULT;
2606                 if (copy_from_user(&new_set, set, sizeof(*set)))
2607                         goto out;
2608                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2609
2610                 spin_lock_irq(&current->sighand->siglock);
2611                 old_set = current->blocked.sig[0];
2612
2613                 error = 0;
2614                 switch (how) {
2615                 default:
2616                         error = -EINVAL;
2617                         break;
2618                 case SIG_BLOCK:
2619                         sigaddsetmask(&current->blocked, new_set);
2620                         break;
2621                 case SIG_UNBLOCK:
2622                         sigdelsetmask(&current->blocked, new_set);
2623                         break;
2624                 case SIG_SETMASK:
2625                         current->blocked.sig[0] = new_set;
2626                         break;
2627                 }
2628
2629                 recalc_sigpending();
2630                 spin_unlock_irq(&current->sighand->siglock);
2631                 if (error)
2632                         goto out;
2633                 if (oset)
2634                         goto set_old;
2635         } else if (oset) {
2636                 old_set = current->blocked.sig[0];
2637         set_old:
2638                 error = -EFAULT;
2639                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2640                         goto out;
2641         }
2642         error = 0;
2643 out:
2644         return error;
2645 }
2646 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2647
2648 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2649 asmlinkage long
2650 sys_rt_sigaction(int sig,
2651                  const struct sigaction __user *act,
2652                  struct sigaction __user *oact,
2653                  size_t sigsetsize)
2654 {
2655         struct k_sigaction new_sa, old_sa;
2656         int ret = -EINVAL;
2657
2658         /* XXX: Don't preclude handling different sized sigset_t's.  */
2659         if (sigsetsize != sizeof(sigset_t))
2660                 goto out;
2661
2662         if (act) {
2663                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2664                         return -EFAULT;
2665         }
2666
2667         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2668
2669         if (!ret && oact) {
2670                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2671                         return -EFAULT;
2672         }
2673 out:
2674         return ret;
2675 }
2676 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2677
2678 #ifdef __ARCH_WANT_SYS_SGETMASK
2679
2680 /*
2681  * For backwards compatibility.  Functionality superseded by sigprocmask.
2682  */
2683 asmlinkage long
2684 sys_sgetmask(void)
2685 {
2686         /* SMP safe */
2687         return current->blocked.sig[0];
2688 }
2689
2690 asmlinkage long
2691 sys_ssetmask(int newmask)
2692 {
2693         int old;
2694
2695         spin_lock_irq(&current->sighand->siglock);
2696         old = current->blocked.sig[0];
2697
2698         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2699                                                   sigmask(SIGSTOP)));
2700         recalc_sigpending();
2701         spin_unlock_irq(&current->sighand->siglock);
2702
2703         return old;
2704 }
2705 #endif /* __ARCH_WANT_SGETMASK */
2706
2707 #ifdef __ARCH_WANT_SYS_SIGNAL
2708 /*
2709  * For backwards compatibility.  Functionality superseded by sigaction.
2710  */
2711 asmlinkage unsigned long
2712 sys_signal(int sig, __sighandler_t handler)
2713 {
2714         struct k_sigaction new_sa, old_sa;
2715         int ret;
2716
2717         new_sa.sa.sa_handler = handler;
2718         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2719         sigemptyset(&new_sa.sa.sa_mask);
2720
2721         ret = do_sigaction(sig, &new_sa, &old_sa);
2722
2723         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2724 }
2725 #endif /* __ARCH_WANT_SYS_SIGNAL */
2726
2727 #ifdef __ARCH_WANT_SYS_PAUSE
2728
2729 asmlinkage long
2730 sys_pause(void)
2731 {
2732         current->state = TASK_INTERRUPTIBLE;
2733         schedule();
2734         return -ERESTARTNOHAND;
2735 }
2736
2737 #endif
2738
2739 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2740 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2741 {
2742         sigset_t newset;
2743
2744         /* XXX: Don't preclude handling different sized sigset_t's.  */
2745         if (sigsetsize != sizeof(sigset_t))
2746                 return -EINVAL;
2747
2748         if (copy_from_user(&newset, unewset, sizeof(newset)))
2749                 return -EFAULT;
2750         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2751
2752         spin_lock_irq(&current->sighand->siglock);
2753         current->saved_sigmask = current->blocked;
2754         current->blocked = newset;
2755         recalc_sigpending();
2756         spin_unlock_irq(&current->sighand->siglock);
2757
2758         current->state = TASK_INTERRUPTIBLE;
2759         schedule();
2760         set_thread_flag(TIF_RESTORE_SIGMASK);
2761         return -ERESTARTNOHAND;
2762 }
2763 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2764
2765 void __init signals_init(void)
2766 {
2767         sigqueue_cachep =
2768                 kmem_cache_create("sigqueue",
2769                                   sizeof(struct sigqueue),
2770                                   __alignof__(struct sigqueue),
2771                                   SLAB_PANIC, NULL, NULL);
2772 }