fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / signal.c
index a918155..c0afecc 100644 (file)
@@ -10,7 +10,6 @@
  *             to allow signals to be sent reliably.
  */
 
-#include <linux/config.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/smp_lock.h>
 #include <linux/tty.h>
 #include <linux/binfmts.h>
 #include <linux/security.h>
-#include <linux/ptrace.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/signal.h>
+#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/pid_namespace.h>
+#include <linux/nsproxy.h>
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
+
 #include <asm/param.h>
 #include <asm/uaccess.h>
+#include <asm/unistd.h>
 #include <asm/siginfo.h>
+#include "audit.h"     /* audit_signal_info() */
 
 /*
  * SLAB caches for signal bits.
  */
 
-static kmem_cache_t *sigqueue_cachep;
-
-atomic_t nr_queued_signals;
-int max_queued_signals = 1024;
+static struct kmem_cache *sigqueue_cachep;
 
 /*
  * In POSIX a signal is sent either to a specific thread (Linux task)
@@ -144,6 +151,8 @@ int max_queued_signals = 1024;
 #define sig_kernel_stop(sig) \
                (((sig) < SIGRTMIN)  && T(sig, SIG_KERNEL_STOP_MASK))
 
+#define sig_needs_tasklist(sig)        ((sig) == SIGCONT)
+
 #define sig_user_defined(t, signr) \
        (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
         ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
@@ -152,20 +161,9 @@ int max_queued_signals = 1024;
        (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
         (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
 
-#define sig_avoid_stop_race() \
-       (sigtestsetmask(&current->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
-        sigtestsetmask(&current->signal->shared_pending.signal, \
-                                                 M(SIGCONT) | M(SIGKILL)))
-
 static int sig_ignored(struct task_struct *t, int sig)
 {
-       void * handler;
-
-       /*
-        * Tracers always want to know about signals..
-        */
-       if (t->ptrace & PT_PTRACED)
-               return 0;
+       void __user * handler;
 
        /*
         * Blocked signals are never ignored, since the
@@ -177,8 +175,12 @@ static int sig_ignored(struct task_struct *t, int sig)
 
        /* Is it explicitly or implicitly ignored? */
        handler = t->sighand->action[sig-1].sa.sa_handler;
-       return   handler == SIG_IGN ||
-               (handler == SIG_DFL && sig_kernel_ignore(sig));
+       if (handler != SIG_IGN &&
+           (handler != SIG_DFL || !sig_kernel_ignore(sig)))
+               return 0;
+
+       /* It's ignored, we can short-circuit unless a debugger wants it.  */
+       return !tracehook_consider_ignored_signal(t, sig, handler);
 }
 
 /*
@@ -216,8 +218,10 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 fastcall void recalc_sigpending_tsk(struct task_struct *t)
 {
        if (t->signal->group_stop_count > 0 ||
+           (freezing(t)) ||
            PENDING(&t->pending, &t->blocked) ||
-           PENDING(&t->signal->shared_pending, &t->blocked))
+           PENDING(&t->signal->shared_pending, &t->blocked) ||
+           tracehook_induce_sigpending(t))
                set_tsk_thread_flag(t, TIF_SIGPENDING);
        else
                clear_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -264,30 +268,43 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
+                                        int override_rlimit)
 {
-       struct sigqueue *q = 0;
+       struct sigqueue *q = NULL;
+       struct user_struct *user;
 
-       if (atomic_read(&nr_queued_signals) < max_queued_signals)
-               q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
-       if (q) {
-               atomic_inc(&nr_queued_signals);
+       /*
+        * In order to avoid problems with "switch_user()", we want to make
+        * sure that the compiler doesn't re-load "t->user"
+        */
+       user = t->user;
+       barrier();
+       atomic_inc(&user->sigpending);
+       if (override_rlimit ||
+           atomic_read(&user->sigpending) <=
+                       t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
+               q = kmem_cache_alloc(sigqueue_cachep, flags);
+       if (unlikely(q == NULL)) {
+               atomic_dec(&user->sigpending);
+       } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = 0;
-               q->lock = 0;
+               q->user = get_uid(user);
        }
        return(q);
 }
 
-static inline void __sigqueue_free(struct sigqueue *q)
+static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
+       atomic_dec(&q->user->sigpending);
+       free_uid(q->user);
        kmem_cache_free(sigqueue_cachep, q);
-       atomic_dec(&nr_queued_signals);
 }
 
-static void flush_sigqueue(struct sigpending *queue)
+void flush_sigqueue(struct sigpending *queue)
 {
        struct sigqueue *q;
 
@@ -302,9 +319,7 @@ static void flush_sigqueue(struct sigpending *queue)
 /*
  * Flush all pending signals for a task.
  */
-
-void
-flush_signals(struct task_struct *t)
+void flush_signals(struct task_struct *t)
 {
        unsigned long flags;
 
@@ -315,89 +330,6 @@ flush_signals(struct task_struct *t)
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 }
 
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_sighand(struct task_struct *tsk)
-{
-       struct sighand_struct * sighand = tsk->sighand;
-
-       /* Ok, we're done with the signal handlers */
-       tsk->sighand = NULL;
-       if (atomic_dec_and_test(&sighand->count))
-               kmem_cache_free(sighand_cachep, sighand);
-}
-
-void exit_sighand(struct task_struct *tsk)
-{
-       write_lock_irq(&tasklist_lock);
-       __exit_sighand(tsk);
-       write_unlock_irq(&tasklist_lock);
-}
-
-/*
- * This function expects the tasklist_lock write-locked.
- */
-void __exit_signal(struct task_struct *tsk)
-{
-       struct signal_struct * sig = tsk->signal;
-       struct sighand_struct * sighand = tsk->sighand;
-
-       if (!sig)
-               BUG();
-       if (!atomic_read(&sig->count))
-               BUG();
-       spin_lock(&sighand->siglock);
-       if (atomic_dec_and_test(&sig->count)) {
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               spin_unlock(&sighand->siglock);
-               flush_sigqueue(&sig->shared_pending);
-       } else {
-               /*
-                * If there is any task waiting for the group exit
-                * then notify it:
-                */
-               if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
-                       wake_up_process(sig->group_exit_task);
-                       sig->group_exit_task = NULL;
-               }
-               if (tsk == sig->curr_target)
-                       sig->curr_target = next_thread(tsk);
-               tsk->signal = NULL;
-               spin_unlock(&sighand->siglock);
-               sig = NULL;     /* Marker for below.  */
-       }
-       clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
-       flush_sigqueue(&tsk->pending);
-       if (sig) {
-               /*
-                * We are cleaning up the signal_struct here.  We delayed
-                * calling exit_itimers until after flush_sigqueue, just in
-                * case our thread-local pending queue contained a queued
-                * timer signal that would have been cleared in
-                * exit_itimers.  When that called sigqueue_free, it would
-                * attempt to re-take the tasklist_lock and deadlock.  This
-                * can never happen if we ensure that all queues the
-                * timer's signal might be queued on have been flushed
-                * first.  The shared_pending queue, and our own pending
-                * queue are the only queues the timer could be on, since
-                * there are no other threads left in the group and timer
-                * signals are constrained to threads inside the group.
-                */
-               exit_itimers(sig);
-               kmem_cache_free(signal_cachep, sig);
-       }
-}
-
-void exit_signal(struct task_struct *tsk)
-{
-       write_lock_irq(&tasklist_lock);
-       __exit_signal(tsk);
-       write_unlock_irq(&tasklist_lock);
-}
-
 /*
  * Flush all handlers for a task.
  */
@@ -451,9 +383,9 @@ unblock_all_signals(void)
        spin_unlock_irqrestore(&current->sighand->siglock, flags);
 }
 
-static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 {
-       struct sigqueue *q, *first = 0;
+       struct sigqueue *q, *first = NULL;
        int still_pending = 0;
 
        if (unlikely(!sigismember(&list->signal, sig)))
@@ -497,9 +429,8 @@ static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *in
 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
                        siginfo_t *info)
 {
-       int sig = 0;
+       int sig = next_signal(pending, mask);
 
-       sig = next_signal(pending, mask);
        if (sig) {
                if (current->notifier) {
                        if (sigismember(current->notifier_mask, sig)) {
@@ -512,9 +443,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 
                if (!collect_signal(sig, pending, info))
                        sig = 0;
-                               
        }
-       recalc_sigpending();
 
        return sig;
 }
@@ -531,10 +460,35 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
        if (!signr)
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info);
+       recalc_sigpending_tsk(tsk);
+       if (signr && unlikely(sig_kernel_stop(signr))) {
+               /*
+                * Set a marker that we have dequeued a stop signal.  Our
+                * caller might release the siglock and then the pending
+                * stop signal it is about to process is no longer in the
+                * pending bitmasks, but must still be cleared by a SIGCONT
+                * (and overruled by a SIGKILL).  So those cases clear this
+                * shared flag after we've set it.  Note that this flag may
+                * remain set after the signal we return is ignored or
+                * handled.  That doesn't matter because its only purpose
+                * is to alert stop-signal processing code when another
+                * processor has come along and cleared the flag.
+                */
+               if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
+                       tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+       }
        if ( signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
             info->si_sys_private){
+               /*
+                * Release the siglock to ensure proper locking order
+                * of timer locks outside of siglocks.  Note, we leave
+                * irqs disabled here, since the posix-timers code is
+                * about to disable them again anyway.
+                */
+               spin_unlock(&tsk->sighand->siglock);
                do_schedule_next_timer(info);
+               spin_lock(&tsk->sighand->siglock);
        }
        return signr;
 }
@@ -557,19 +511,46 @@ void signal_wake_up(struct task_struct *t, int resume)
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
        /*
-        * If resume is set, we want to wake it up in the TASK_STOPPED case.
-        * We don't check for TASK_STOPPED because there is a race with it
+        * For SIGKILL, we want to wake it up in the stopped/traced case.
+        * We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
-        * By calling wake_up_process any time resume is set, we ensure
-        * the process will wake up and handle its stop or death signal.
+        * By using wake_up_state, we ensure the process will wake up and
+        * handle its death signal.
         */
        mask = TASK_INTERRUPTIBLE;
        if (resume)
-               mask |= TASK_STOPPED;
+               mask |= TASK_STOPPED | TASK_TRACED;
        if (!wake_up_state(t, mask))
                kick_process(t);
 }
 
+/*
+ * Remove signals in mask from the pending set and queue.
+ * Returns 1 if any signals were found.
+ *
+ * All callers must be holding the siglock.
+ *
+ * This version takes a sigset mask and looks at all signals,
+ * not just those in the first mask word.
+ */
+static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
+{
+       struct sigqueue *q, *n;
+       sigset_t m;
+
+       sigandsets(&m, mask, &s->signal);
+       if (sigisemptyset(&m))
+               return 0;
+
+       signandsets(&s->signal, &s->signal, mask);
+       list_for_each_entry_safe(q, n, &s->list, list) {
+               if (sigismember(mask, q->info.si_signo)) {
+                       list_del_init(&q->list);
+                       __sigqueue_free(q);
+               }
+       }
+       return 1;
+}
 /*
  * Remove signals in mask from the pending set and queue.
  * Returns 1 if any signals were found.
@@ -601,23 +582,40 @@ static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
        int error = -EINVAL;
-       if (sig < 0 || sig > _NSIG)
+
+       if (!valid_signal(sig))
                return error;
+
+       if ((info != SEND_SIG_NOINFO) &&
+               (is_si_special(info) || !SI_FROMUSER(info)))
+               goto skip;
+
+       vxdprintk(VXD_CBIT(misc, 7),
+               "check_kill_permission(%d,%p,%p[#%u,%u])",
+               sig, info, t, vx_task_xid(t), t->pid);
+
        error = -EPERM;
-       if ((!info || ((unsigned long)info != 1 &&
-                       (unsigned long)info != 2 && SI_FROMUSER(info)))
-           && ((sig != SIGCONT) ||
-               (current->signal->session != t->signal->session))
+       if (((sig != SIGCONT) ||
+               (process_session(current) != process_session(t)))
            && (current->euid ^ t->suid) && (current->euid ^ t->uid)
            && (current->uid ^ t->suid) && (current->uid ^ t->uid)
            && !capable(CAP_KILL))
                return error;
-       return security_task_kill(t, info, sig);
+
+       error = -ESRCH;
+       if (!vx_check(vx_task_xid(t), VS_WATCH_P|VS_IDENT)) {
+               vxdprintk(current->xid || VXD_CBIT(misc, 7),
+                       "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
+                       sig, info, t, vx_task_xid(t), t->pid, current->xid);
+               return error;
+       }
+skip:
+       error = security_task_kill(t, info, sig, 0);
+       if (!error)
+               audit_signal_info(sig, t); /* Let audit system see the signal */
+       return error;
 }
 
-/* forward decl */
-static void do_notify_parent_cldstop(struct task_struct *tsk,
-                                    struct task_struct *parent);
 
 /*
  * Handle magic process-wide effects of stop/continue signals.
@@ -630,6 +628,12 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 {
        struct task_struct *t;
 
+       if (p->signal->flags & SIGNAL_GROUP_EXIT)
+               /*
+                * The process is in the middle of dying already.
+                */
+               return;
+
        if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.
@@ -659,12 +663,10 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * the SIGCHLD was pending on entry to this kill.
                         */
                        p->signal->group_stop_count = 0;
-                       if (p->ptrace & PT_PTRACED)
-                               do_notify_parent_cldstop(p, p->parent);
-                       else
-                               do_notify_parent_cldstop(
-                                       p->group_leader,
-                                       p->group_leader->real_parent);
+                       p->signal->flags = SIGNAL_STOP_CONTINUED;
+                       spin_unlock(&p->sighand->siglock);
+                       do_notify_parent_cldstop(p, CLD_STOPPED);
+                       spin_lock(&p->sighand->siglock);
                }
                rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
                t = p;
@@ -695,10 +697,36 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 
                        t = next_thread(t);
                } while (t != p);
+
+               if (p->signal->flags & SIGNAL_STOP_STOPPED) {
+                       /*
+                        * We were in fact stopped, and are now continued.
+                        * Notify the parent with CLD_CONTINUED.
+                        */
+                       p->signal->flags = SIGNAL_STOP_CONTINUED;
+                       p->signal->group_exit_code = 0;
+                       spin_unlock(&p->sighand->siglock);
+                       do_notify_parent_cldstop(p, CLD_CONTINUED);
+                       spin_lock(&p->sighand->siglock);
+               } else {
+                       /*
+                        * We are not stopped, but there could be a stop
+                        * signal in the middle of being processed after
+                        * being removed from the queue.  Clear that too.
+                        */
+                       p->signal->flags = 0;
+               }
+       } else if (sig == SIGKILL) {
+               /*
+                * Make sure that any pending stop signal already dequeued
+                * is undone by the wakeup for SIGKILL.
+                */
+               p->signal->flags = 0;
        }
 }
 
-static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+                       struct sigpending *signals)
 {
        struct sigqueue * q = NULL;
        int ret = 0;
@@ -707,7 +735,7 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
         */
-       if ((unsigned long)info == 2)
+       if (info == SEND_SIG_FORCED)
                goto out_set;
 
        /* Real-time signals must be queued if sent by sigqueue, or
@@ -718,22 +746,20 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
           make sure at least one signal gets delivered and don't
           pass on the info struct.  */
 
-       if (atomic_read(&nr_queued_signals) < max_queued_signals)
-               q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
-
+       q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+                                            (is_si_special(info) ||
+                                             info->si_code >= 0)));
        if (q) {
-               atomic_inc(&nr_queued_signals);
-               q->flags = 0;
                list_add_tail(&q->list, &signals->list);
                switch ((unsigned long) info) {
-               case 0:
+               case (unsigned long) SEND_SIG_NOINFO:
                        q->info.si_signo = sig;
                        q->info.si_errno = 0;
                        q->info.si_code = SI_USER;
                        q->info.si_pid = current->pid;
                        q->info.si_uid = current->uid;
                        break;
-               case 1:
+               case (unsigned long) SEND_SIG_PRIV:
                        q->info.si_signo = sig;
                        q->info.si_errno = 0;
                        q->info.si_code = SI_KERNEL;
@@ -744,20 +770,13 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
                        copy_siginfo(&q->info, info);
                        break;
                }
-       } else {
-               if (sig >= SIGRTMIN && info && (unsigned long)info != 1
-                  && info->si_code != SI_USER)
+       } else if (!is_si_special(info)) {
+               if (sig >= SIGRTMIN && info->si_code != SI_USER)
                /*
                 * Queue overflow, abort.  We may abort if the signal was rt
                 * and sent by user using something other than kill().
                 */
                        return -EAGAIN;
-               if (((unsigned long)info > 1) && (info->si_code == SI_TIMER))
-                       /*
-                        * Set up a return to indicate that we dropped 
-                        * the signal.
-                        */
-                       ret = info->si_sys_private;
        }
 
 out_set:
@@ -768,24 +787,45 @@ out_set:
 #define LEGACY_QUEUE(sigptr, sig) \
        (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
 
+int print_fatal_signals = 0;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+               current->comm, current->pid, signr);
+
+#ifdef __i386__
+       printk("code at %08lx: ", regs->eip);
+       {
+               int i;
+               for (i = 0; i < 16; i++) {
+                       unsigned char insn;
+
+                       __get_user(insn, (unsigned char *)(regs->eip + i));
+                       printk("%02x ", insn);
+               }
+       }
+#endif
+       printk("\n");
+       show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+       get_option (&str, &print_fatal_signals);
+
+       return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
 
 static int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
        int ret = 0;
 
-       if (!irqs_disabled())
-               BUG();
-#ifdef CONFIG_SMP
-       if (!spin_is_locked(&t->sighand->siglock))
-               BUG();
-#endif
-
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-                */
-               ret = info->si_sys_private;
+       BUG_ON(!irqs_disabled());
+       assert_spin_locked(&t->sighand->siglock);
 
        /* Short-circuit ignored signals.  */
        if (sig_ignored(t, sig))
@@ -797,7 +837,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        if (LEGACY_QUEUE(&t->pending, sig))
                goto out;
 
-       ret = send_signal(sig, info, &t->pending);
+       ret = send_signal(sig, info, t, &t->pending);
        if (!ret && !sigismember(&t->blocked, sig))
                signal_wake_up(t, sig == SIGKILL);
 out:
@@ -807,19 +847,30 @@ out:
 /*
  * Force a signal that the process can't ignore: if necessary
  * we unblock the signal and change any SIG_IGN to SIG_DFL.
+ *
+ * Note: If we unblock the signal, we always reset it to SIG_DFL,
+ * since we do not want to have a signal handler that was blocked
+ * be invoked when user space had explicitly blocked it.
+ *
+ * We don't want to have recursive SIGSEGV's etc, for example.
  */
-
 int
 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
        unsigned long int flags;
-       int ret;
+       int ret, blocked, ignored;
+       struct k_sigaction *action;
 
        spin_lock_irqsave(&t->sighand->siglock, flags);
-       if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
-               t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
-               sigdelset(&t->blocked, sig);
-               recalc_sigpending_tsk(t);
+       action = &t->sighand->action[sig-1];
+       ignored = action->sa.sa_handler == SIG_IGN;
+       blocked = sigismember(&t->blocked, sig);
+       if (blocked || ignored) {
+               action->sa.sa_handler = SIG_DFL;
+               if (blocked) {
+                       sigdelset(&t->blocked, sig);
+                       recalc_sigpending_tsk(t);
+               }
        }
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
@@ -830,15 +881,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 void
 force_sig_specific(int sig, struct task_struct *t)
 {
-       unsigned long int flags;
-
-       spin_lock_irqsave(&t->sighand->siglock, flags);
-       if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN)
-               t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
-       sigdelset(&t->blocked, sig);
-       recalc_sigpending_tsk(t);
-       specific_send_sig_info(sig, (void *)2, t);
-       spin_unlock_irqrestore(&t->sighand->siglock, flags);
+       force_sig_info(sig, SEND_SIG_FORCED, t);
 }
 
 /*
@@ -849,15 +892,21 @@ force_sig_specific(int sig, struct task_struct *t)
  * as soon as they're available, so putting the signal on the shared queue
  * will be equivalent to sending it to one such thread.
  */
-#define wants_signal(sig, p, mask)                     \
-       (!sigismember(&(p)->blocked, sig)               \
-        && !((p)->state & mask)                        \
-        && !((p)->flags & PF_EXITING)                  \
-        && (task_curr(p) || !signal_pending(p)))
-
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (p->state & (TASK_STOPPED | TASK_TRACED))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
 
 static void
-__group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
+__group_complete_signal(int sig, struct task_struct *p)
 {
        struct task_struct *t;
 
@@ -867,7 +916,7 @@ __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
         * If the main thread wants the signal, it gets first crack.
         * Probably the least surprising to the average bear.
         */
-       if (wants_signal(sig, p, mask))
+       if (wants_signal(sig, p))
                t = p;
        else if (thread_group_empty(p))
                /*
@@ -883,9 +932,8 @@ __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
                if (t == NULL)
                        /* restart balancing at this thread */
                        t = p->signal->curr_target = p;
-               BUG_ON(t->tgid != p->tgid);
 
-               while (!wants_signal(sig, t, mask)) {
+               while (!wants_signal(sig, t)) {
                        t = next_thread(t);
                        if (t == p->signal->curr_target)
                                /*
@@ -902,9 +950,9 @@ __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
         * Found a killable thread.  If the signal will be fatal,
         * then start taking the whole group down immediately.
         */
-       if (sig_fatal(p, sig) && !p->signal->group_exit &&
+       if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
            !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
+           (sig == SIGKILL || !tracehook_consider_fatal_signal(t, sig))) {
                /*
                 * This signal will be fatal to the whole group.
                 */
@@ -915,7 +963,7 @@ __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
                         * running and doing things after a slower
                         * thread has the fatal signal pending.
                         */
-                       p->signal->group_exit = 1;
+                       p->signal->flags = SIGNAL_GROUP_EXIT;
                        p->signal->group_exit_code = sig;
                        p->signal->group_stop_count = 0;
                        t = p;
@@ -959,24 +1007,14 @@ __group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
        return;
 }
 
-static int
+int
 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
-       unsigned int mask;
        int ret = 0;
 
-#ifdef CONFIG_SMP
-       if (!spin_is_locked(&p->sighand->siglock))
-               BUG();
-#endif
+       assert_spin_locked(&p->sighand->siglock);
        handle_stop_signal(sig, p);
 
-       if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
-               /*
-                * Set up a return to indicate that we dropped the signal.
-                */
-               ret = info->si_sys_private;
-
        /* Short-circuit ignored signals.  */
        if (sig_ignored(p, sig))
                return ret;
@@ -985,24 +1023,16 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
                /* This is a non-RT signal and we already have one queued.  */
                return ret;
 
-       /*
-        * Don't bother zombies and stopped tasks (but
-        * SIGKILL will punch through stopped state)
-        */
-       mask = TASK_DEAD | TASK_ZOMBIE;
-       if (sig != SIGKILL)
-               mask |= TASK_STOPPED;
-
        /*
         * Put this signal on the shared-pending queue, or fail with EAGAIN.
         * We always use the shared queue for process-wide signals,
         * to avoid several races.
         */
-       ret = send_signal(sig, info, &p->signal->shared_pending);
+       ret = send_signal(sig, info, p, &p->signal->shared_pending);
        if (unlikely(ret))
                return ret;
 
-       __group_complete_signal(sig, p, mask);
+       __group_complete_signal(sig, p);
        return 0;
 }
 
@@ -1013,6 +1043,7 @@ void zap_other_threads(struct task_struct *p)
 {
        struct task_struct *t;
 
+       p->signal->flags = SIGNAL_GROUP_EXIT;
        p->signal->group_stop_count = 0;
 
        if (thread_group_empty(p))
@@ -1022,7 +1053,7 @@ void zap_other_threads(struct task_struct *p)
                /*
                 * Don't bother with already dead threads
                 */
-               if (t->state & (TASK_ZOMBIE|TASK_DEAD))
+               if (t->exit_state)
                        continue;
 
                /*
@@ -1036,57 +1067,88 @@ void zap_other_threads(struct task_struct *p)
                if (t != p->group_leader)
                        t->exit_signal = -1;
 
+               /* SIGKILL will be handled before any pending SIGSTOP */
                sigaddset(&t->pending.signal, SIGKILL);
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
                signal_wake_up(t, 1);
        }
 }
 
 /*
- * Must be called with the tasklist_lock held for reading!
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
  */
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
+{
+       struct sighand_struct *sighand;
+
+       for (;;) {
+               sighand = rcu_dereference(tsk->sighand);
+               if (unlikely(sighand == NULL))
+                       break;
+
+               spin_lock_irqsave(&sighand->siglock, *flags);
+               if (likely(sighand == tsk->sighand))
+                       break;
+               spin_unlock_irqrestore(&sighand->siglock, *flags);
+       }
+
+       return sighand;
+}
+
 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
        unsigned long flags;
        int ret;
 
        ret = check_kill_permission(sig, info, p);
-       if (!ret && sig && p->sighand) {
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               ret = __group_send_sig_info(sig, info, p);
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+
+       if (!ret && sig) {
+               ret = -ESRCH;
+               if (lock_task_sighand(p, &flags)) {
+                       ret = __group_send_sig_info(sig, info, p);
+                       unlock_task_sighand(p, &flags);
+               }
        }
 
        return ret;
 }
 
 /*
- * kill_pg_info() sends a signal to a process group: this is what the tty
+ * kill_pgrp_info() sends a signal to a process group: this is what the tty
  * control characters do (^C, ^Z etc)
  */
 
-int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
+{
+       struct task_struct *p = NULL;
+       int retval, success;
+
+       success = 0;
+       retval = -ESRCH;
+       do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
+               int err = group_send_sig_info(sig, info, p);
+               success |= !err;
+               retval = err;
+       } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
+       return success ? 0 : retval;
+}
+
+int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
 {
-       struct task_struct *p;
-       struct list_head *l;
-       struct pid *pid;
        int retval;
-       int found;
 
+       read_lock(&tasklist_lock);
+       retval = __kill_pgrp_info(sig, info, pgrp);
+       read_unlock(&tasklist_lock);
+
+       return retval;
+}
+
+int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
+{
        if (pgrp <= 0)
                return -EINVAL;
 
-       found = 0;
-       retval = 0;
-       for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
-               int err;
-
-               found = 1;
-               err = group_send_sig_info(sig, info, p);
-               if (!retval)
-                       retval = err;
-       }
-       return found ? retval : -ESRCH;
+       return __kill_pgrp_info(sig, info, find_pid(pgrp));
 }
 
 int
@@ -1101,53 +1163,72 @@ kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
        return retval;
 }
 
-/*
- * kill_sl_info() sends a signal to the session leader: this is used
- * to send SIGHUP to the controlling process of a terminal when
- * the connection is lost.
- */
-
-
-int
-kill_sl_info(int sig, struct siginfo *info, pid_t sid)
+int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
 {
-       int err, retval = -EINVAL;
-       struct pid *pid;
-       struct list_head *l;
+       int error;
+       int acquired_tasklist_lock = 0;
        struct task_struct *p;
 
-       if (sid <= 0)
-               goto out;
-
-       retval = -ESRCH;
-       read_lock(&tasklist_lock);
-       for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
-               if (!p->signal->leader)
-                       continue;
-               err = group_send_sig_info(sig, info, p);
-               if (retval)
-                       retval = err;
+       rcu_read_lock();
+       if (unlikely(sig_needs_tasklist(sig))) {
+               read_lock(&tasklist_lock);
+               acquired_tasklist_lock = 1;
        }
-       read_unlock(&tasklist_lock);
-out:
-       return retval;
+       p = pid_task(pid, PIDTYPE_PID);
+       error = -ESRCH;
+       if (p && vx_check(vx_task_xid(p), VS_IDENT))
+               error = group_send_sig_info(sig, info, p);
+       if (unlikely(acquired_tasklist_lock))
+               read_unlock(&tasklist_lock);
+       rcu_read_unlock();
+       return error;
 }
 
-int
-kill_proc_info(int sig, struct siginfo *info, pid_t pid)
+static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
 {
        int error;
+       rcu_read_lock();
+       error = kill_pid_info(sig, info, find_pid(vx_rmap_pid(pid)));
+       rcu_read_unlock();
+       return error;
+}
+
+/* like kill_pid_info(), but doesn't use uid/euid of "current" */
+int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
+                     uid_t uid, uid_t euid, u32 secid)
+{
+       int ret = -EINVAL;
        struct task_struct *p;
 
+       if (!valid_signal(sig))
+               return ret;
+
        read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p)
-               error = group_send_sig_info(sig, info, p);
+       p = pid_task(pid, PIDTYPE_PID);
+       if (!p) {
+               ret = -ESRCH;
+               goto out_unlock;
+       }
+       if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
+           && (euid != p->suid) && (euid != p->uid)
+           && (uid != p->suid) && (uid != p->uid)) {
+               ret = -EPERM;
+               goto out_unlock;
+       }
+       ret = security_task_kill(p, info, sig, secid);
+       if (ret)
+               goto out_unlock;
+       if (sig && p->sighand) {
+               unsigned long flags;
+               spin_lock_irqsave(&p->sighand->siglock, flags);
+               ret = __group_send_sig_info(sig, info, p);
+               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       }
+out_unlock:
        read_unlock(&tasklist_lock);
-       return error;
+       return ret;
 }
-
+EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1166,7 +1247,8 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
 
                read_lock(&tasklist_lock);
                for_each_process(p) {
-                       if (p->pid > 1 && p->tgid != current->tgid) {
+                       if (vx_check(vx_task_xid(p), VS_ADMIN_P|VS_IDENT) &&
+                               p->pid > 1 && p->tgid != current->tgid) {
                                int err = group_send_sig_info(sig, info, p);
                                ++count;
                                if (err != -EPERM)
@@ -1196,6 +1278,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        int ret;
        unsigned long flags;
 
+       /*
+        * Make sure legacy kernel users don't send in bad values
+        * (normal paths check this in check_kill_permission).
+        */
+       if (!valid_signal(sig))
+               return -EINVAL;
+
        /*
         * We need the tasklist lock even for the specific
         * thread case (when we don't need to follow the group
@@ -1210,10 +1299,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        return ret;
 }
 
+#define __si_special(priv) \
+       ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
+
 int
 send_sig(int sig, struct task_struct *p, int priv)
 {
-       return send_sig_info(sig, (void*)(long)(priv != 0), p);
+       return send_sig_info(sig, __si_special(priv), p);
 }
 
 /*
@@ -1233,25 +1325,50 @@ send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 void
 force_sig(int sig, struct task_struct *p)
 {
-       force_sig_info(sig, (void*)1L, p);
+       force_sig_info(sig, SEND_SIG_PRIV, p);
 }
 
+/*
+ * When things go south during signal handling, we
+ * will force a SIGSEGV. And if the signal that caused
+ * the problem was already a SIGSEGV, we'll want to
+ * make sure we don't even try to deliver the signal..
+ */
 int
-kill_pg(pid_t pgrp, int sig, int priv)
+force_sigsegv(int sig, struct task_struct *p)
 {
-       return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
+       if (sig == SIGSEGV) {
+               unsigned long flags;
+               spin_lock_irqsave(&p->sighand->siglock, flags);
+               p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
+               spin_unlock_irqrestore(&p->sighand->siglock, flags);
+       }
+       force_sig(SIGSEGV, p);
+       return 0;
 }
 
+int kill_pgrp(struct pid *pid, int sig, int priv)
+{
+       return kill_pgrp_info(sig, __si_special(priv), pid);
+}
+EXPORT_SYMBOL(kill_pgrp);
+
+int kill_pid(struct pid *pid, int sig, int priv)
+{
+       return kill_pid_info(sig, __si_special(priv), pid);
+}
+EXPORT_SYMBOL(kill_pid);
+
 int
-kill_sl(pid_t sess, int sig, int priv)
+kill_pg(pid_t pgrp, int sig, int priv)
 {
-       return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
+       return kill_pg_info(sig, __si_special(priv), pgrp);
 }
 
 int
 kill_proc(pid_t pid, int sig, int priv)
 {
-       return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
+       return kill_proc_info(sig, __si_special(priv), pid);
 }
 
 /*
@@ -1268,7 +1385,7 @@ struct sigqueue *sigqueue_alloc(void)
 {
        struct sigqueue *q;
 
-       if ((q = __sigqueue_alloc()))
+       if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
                q->flags |= SIGQUEUE_PREALLOC;
        return(q);
 }
@@ -1282,70 +1399,78 @@ void sigqueue_free(struct sigqueue *q)
         * pending queue.
         */
        if (unlikely(!list_empty(&q->list))) {
-               read_lock(&tasklist_lock);  
-               spin_lock_irqsave(q->lock, flags);
+               spinlock_t *lock = &current->sighand->siglock;
+               read_lock(&tasklist_lock);
+               spin_lock_irqsave(lock, flags);
                if (!list_empty(&q->list))
                        list_del_init(&q->list);
-               spin_unlock_irqrestore(q->lock, flags);
+               spin_unlock_irqrestore(lock, flags);
                read_unlock(&tasklist_lock);
        }
        q->flags &= ~SIGQUEUE_PREALLOC;
        __sigqueue_free(q);
 }
 
-int
-send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 {
        unsigned long flags;
        int ret = 0;
 
+       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
        /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
+        * The rcu based delayed sighand destroy makes it possible to
+        * run this without tasklist lock held. The task struct itself
+        * cannot go away as create_timer did get_task_struct().
+        *
+        * We return -1, when the task is marked exiting, so
+        * posix_timer_event can redirect it to the group leader
         */
-       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-       read_lock(&tasklist_lock);  
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-       
+       rcu_read_lock();
+
+       if (!likely(lock_task_sighand(p, &flags))) {
+               ret = -1;
+               goto out_err;
+       }
+
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
                 * the overrun count.
                 */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
+               BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
                goto out;
-       } 
+       }
        /* Short-circuit ignored signals.  */
        if (sig_ignored(p, sig)) {
                ret = 1;
                goto out;
        }
 
-       q->lock = &p->sighand->siglock;
        list_add_tail(&q->list, &p->pending.list);
        sigaddset(&p->pending.signal, sig);
        if (!sigismember(&p->blocked, sig))
                signal_wake_up(p, sig == SIGKILL);
 
 out:
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
-       return(ret);
+       unlock_task_sighand(p, &flags);
+out_err:
+       rcu_read_unlock();
+
+       return ret;
 }
 
 int
 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
 {
        unsigned long flags;
-       unsigned int mask;
        int ret = 0;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
        read_lock(&tasklist_lock);
+       /* Since it_lock is held, p->sighand cannot be NULL. */
        spin_lock_irqsave(&p->sighand->siglock, flags);
        handle_stop_signal(sig, p);
 
@@ -1361,76 +1486,52 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                 * the overrun count.  Other uses should not try to
                 * send the signal multiple times.
                 */
-               if (q->info.si_code != SI_TIMER)
-                       BUG();
+               BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
                goto out;
        } 
-       /*
-        * Don't bother zombies and stopped tasks (but
-        * SIGKILL will punch through stopped state)
-        */
-       mask = TASK_DEAD | TASK_ZOMBIE;
-       if (sig != SIGKILL)
-               mask |= TASK_STOPPED;
 
        /*
         * Put this signal on the shared-pending queue.
         * We always use the shared queue for process-wide signals,
         * to avoid several races.
         */
-       q->lock = &p->sighand->siglock;
        list_add_tail(&q->list, &p->signal->shared_pending.list);
        sigaddset(&p->signal->shared_pending.signal, sig);
 
-       __group_complete_signal(sig, p, mask);
+       __group_complete_signal(sig, p);
 out:
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
        read_unlock(&tasklist_lock);
-       return(ret);
+       return ret;
 }
 
 /*
- * Joy. Or not. Pthread wants us to wake up every thread
- * in our parent group.
+ * Wake up any threads in the parent blocked in wait* syscalls.
  */
-static void __wake_up_parent(struct task_struct *p,
+static inline void __wake_up_parent(struct task_struct *p,
                                    struct task_struct *parent)
 {
-       struct task_struct *tsk = parent;
-
-       /*
-        * Fortunately this is not necessary for thread groups:
-        */
-       if (p->tgid == tsk->tgid) {
-               wake_up_interruptible(&tsk->wait_chldexit);
-               return;
-       }
-
-       do {
-               wake_up_interruptible(&tsk->wait_chldexit);
-               tsk = next_thread(tsk);
-               if (tsk->signal != parent->signal)
-                       BUG();
-       } while (tsk != parent);
+       wake_up_interruptible_sync(&parent->signal->wait_chldexit);
 }
 
 /*
- * Let a parent know about a status change of a child.
+ * Let a parent know about the death of a child.
+ * For a stopped/continued status change, use do_notify_parent_cldstop instead.
  */
 
 void do_notify_parent(struct task_struct *tsk, int sig)
 {
        struct siginfo info;
        unsigned long flags;
-       int why, status;
        struct sighand_struct *psig;
 
-       if (sig == -1)
-               BUG();
+       BUG_ON(sig == -1);
+
+       /* do_notify_parent_cldstop should have been called instead.  */
+       BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
 
-       BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
-       BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
+       BUG_ON(tsk->group_leader != tsk || !thread_group_empty(tsk));
 
        info.si_signo = sig;
        info.si_errno = 0;
@@ -1438,37 +1539,24 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = tsk->utime;
-       info.si_stime = tsk->stime;
-
-       status = tsk->exit_code & 0x7f;
-       why = SI_KERNEL;        /* shouldn't happen */
-       switch (tsk->state) {
-       case TASK_STOPPED:
-               /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
-               if (tsk->ptrace & PT_PTRACED)
-                       why = CLD_TRAPPED;
-               else
-                       why = CLD_STOPPED;
-               break;
+       info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
+                                                      tsk->signal->utime));
+       info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
+                                                      tsk->signal->stime));
 
-       default:
-               if (tsk->exit_code & 0x80)
-                       why = CLD_DUMPED;
-               else if (tsk->exit_code & 0x7f)
-                       why = CLD_KILLED;
-               else {
-                       why = CLD_EXITED;
-                       status = tsk->exit_code >> 8;
-               }
-               break;
+       info.si_status = tsk->exit_code & 0x7f;
+       if (tsk->exit_code & 0x80)
+               info.si_code = CLD_DUMPED;
+       else if (tsk->exit_code & 0x7f)
+               info.si_code = CLD_KILLED;
+       else {
+               info.si_code = CLD_EXITED;
+               info.si_status = tsk->exit_code >> 8;
        }
-       info.si_code = why;
-       info.si_status = status;
 
        psig = tsk->parent->sighand;
        spin_lock_irqsave(&psig->siglock, flags);
-       if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
+       if (sig == SIGCHLD &&
            (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
             (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
                /*
@@ -1490,35 +1578,17 @@ void do_notify_parent(struct task_struct *tsk, int sig)
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
                        sig = 0;
        }
-       if (sig > 0 && sig <= _NSIG)
+       if (valid_signal(sig) && sig > 0)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
 }
 
-
-/*
- * We need the tasklist lock because it's the only
- * thing that protects out "parent" pointer.
- *
- * exit.c calls "do_notify_parent()" directly, because
- * it already has the tasklist lock.
- */
-void
-notify_parent(struct task_struct *tsk, int sig)
-{
-       if (sig != -1) {
-               read_lock(&tasklist_lock);
-               do_notify_parent(tsk, sig);
-               read_unlock(&tasklist_lock);
-       }
-}
-
-static void
-do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
+void do_notify_parent_cldstop(struct task_struct *tsk, int why)
 {
        struct siginfo info;
        unsigned long flags;
+       struct task_struct *parent;
        struct sighand_struct *sighand;
 
        info.si_signo = SIGCHLD;
@@ -1527,11 +1597,32 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = tsk->utime;
-       info.si_stime = tsk->stime;
+       info.si_utime = cputime_to_jiffies(tsk->utime);
+       info.si_stime = cputime_to_jiffies(tsk->stime);
+
+       info.si_code = why;
+       switch (why) {
+       case CLD_CONTINUED:
+               info.si_status = SIGCONT;
+               break;
+       case CLD_STOPPED:
+               info.si_status = tsk->signal->group_exit_code & 0x7f;
+               break;
+       case CLD_TRAPPED:
+               info.si_status = tsk->exit_code & 0x7f;
+               break;
+       default:
+               BUG();
+       }
 
-       info.si_status = tsk->exit_code & 0x7f;
-       info.si_code = CLD_STOPPED;
+       /*
+        * Tracing can decide that we should not do the normal notification.
+        */
+       if (tracehook_notify_cldstop(tsk, &info))
+               return;
+
+       tsk = tsk->group_leader;
+       parent = tsk->parent;
 
        sighand = parent->sighand;
        spin_lock_irqsave(&sighand->siglock, flags);
@@ -1545,9 +1636,6 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
        spin_unlock_irqrestore(&sighand->siglock, flags);
 }
 
-
-#ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
-
 static void
 finish_stop(int stop_count)
 {
@@ -1556,19 +1644,15 @@ finish_stop(int stop_count)
         * a group stop in progress and we are the last to stop,
         * report to the parent.  When ptraced, every thread reports itself.
         */
-       if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
+       if (!tracehook_finish_stop(stop_count <= 0) && stop_count <= 0) {
                read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current, current->parent);
-               read_unlock(&tasklist_lock);
-       }
-       else if (stop_count == 0) {
-               read_lock(&tasklist_lock);
-               do_notify_parent_cldstop(current->group_leader,
-                                        current->group_leader->real_parent);
+               do_notify_parent_cldstop(current, CLD_STOPPED);
                read_unlock(&tasklist_lock);
        }
 
-       schedule();
+       do {
+               schedule();
+       } while (try_to_freeze());
        /*
         * Now we don't run again until continued.
         */
@@ -1578,104 +1662,55 @@ finish_stop(int stop_count)
 /*
  * This performs the stopping for SIGSTOP and other stop signals.
  * We have to stop all threads in the thread group.
+ * Returns nonzero if we've actually stopped and released the siglock.
+ * Returns zero if we didn't stop and still hold the siglock.
  */
-static void
-do_signal_stop(int signr)
+static int do_signal_stop(int signr)
 {
        struct signal_struct *sig = current->signal;
-       struct sighand_struct *sighand = current->sighand;
-       int stop_count = -1;
+       int stop_count;
 
-       /* spin_lock_irq(&sighand->siglock) is now done in caller */
+       if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
+               return 0;
 
        if (sig->group_stop_count > 0) {
                /*
                 * There is a group stop in progress.  We don't need to
                 * start another one.
                 */
-               signr = sig->group_exit_code;
                stop_count = --sig->group_stop_count;
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else if (thread_group_empty(current)) {
-               /*
-                * Lock must be held through transition to stopped state.
-                */
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-               spin_unlock_irq(&sighand->siglock);
-       }
-       else {
+       } else {
                /*
                 * There is no group stop already in progress.
-                * We must initiate one now, but that requires
-                * dropping siglock to get both the tasklist lock
-                * and siglock again in the proper order.  Note that
-                * this allows an intervening SIGCONT to be posted.
-                * We need to check for that and bail out if necessary.
+                * We must initiate one now.
                 */
                struct task_struct *t;
 
-               spin_unlock_irq(&sighand->siglock);
-
-               /* signals can be posted during this window */
-
-               read_lock(&tasklist_lock);
-               spin_lock_irq(&sighand->siglock);
+               sig->group_exit_code = signr;
 
-               if (unlikely(sig->group_exit)) {
+               stop_count = 0;
+               for (t = next_thread(current); t != current; t = next_thread(t))
                        /*
-                        * There is a group exit in progress now.
-                        * We'll just ignore the stop and process the
-                        * associated fatal signal.
+                        * Setting state to TASK_STOPPED for a group
+                        * stop is always done with the siglock held,
+                        * so this check has no races.
                         */
-                       spin_unlock_irq(&sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return;
-               }
-
-               if (unlikely(sig_avoid_stop_race())) {
-                       /*
-                        * Either a SIGCONT or a SIGKILL signal was
-                        * posted in the siglock-not-held window.
-                        */
-                       spin_unlock_irq(&sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return;
-               }
-
-               if (sig->group_stop_count == 0) {
-                       sig->group_exit_code = signr;
-                       stop_count = 0;
-                       for (t = next_thread(current); t != current;
-                            t = next_thread(t))
-                               /*
-                                * Setting state to TASK_STOPPED for a group
-                                * stop is always done with the siglock held,
-                                * so this check has no races.
-                                */
-                               if (t->state < TASK_STOPPED) {
-                                       stop_count++;
-                                       signal_wake_up(t, 0);
-                               }
-                       sig->group_stop_count = stop_count;
-               }
-               else {
-                       /* A race with another thread while unlocked.  */
-                       signr = sig->group_exit_code;
-                       stop_count = --sig->group_stop_count;
-               }
-
-               current->exit_code = signr;
-               set_current_state(TASK_STOPPED);
-
-               spin_unlock_irq(&sighand->siglock);
-               read_unlock(&tasklist_lock);
+                       if (!t->exit_state &&
+                           !(t->state & (TASK_STOPPED|TASK_TRACED))) {
+                               stop_count++;
+                               signal_wake_up(t, 0);
+                       }
+               sig->group_stop_count = stop_count;
        }
 
+       if (stop_count == 0)
+               sig->flags = SIGNAL_STOP_STOPPED;
+       current->exit_code = sig->group_exit_code;
+       __set_current_state(TASK_STOPPED);
+
+       spin_unlock_irq(&current->sighand->siglock);
        finish_stop(stop_count);
+       return 1;
 }
 
 /*
@@ -1684,7 +1719,7 @@ do_signal_stop(int signr)
  * We return zero if we still hold the siglock and should look
  * for another signal without checking group_stop_count again.
  */
-static inline int handle_group_stop(void)
+static int handle_group_stop(void)
 {
        int stop_count;
 
@@ -1697,7 +1732,7 @@ static inline int handle_group_stop(void)
                return 0;
        }
 
-       if (current->signal->group_exit)
+       if (current->signal->flags & SIGNAL_GROUP_EXIT)
                /*
                 * Group stop is so another thread can do a core dump,
                 * or else we are racing against a death signal.
@@ -1710,6 +1745,8 @@ static inline int handle_group_stop(void)
         * without any associated signal being in our queue.
         */
        stop_count = --current->signal->group_stop_count;
+       if (stop_count == 0)
+               current->signal->flags = SIGNAL_STOP_STOPPED;
        current->exit_code = current->signal->group_exit_code;
        set_current_state(TASK_STOPPED);
        spin_unlock_irq(&current->sighand->siglock);
@@ -1717,11 +1754,14 @@ static inline int handle_group_stop(void)
        return 1;
 }
 
-int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
+int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
+                         struct pt_regs *regs, void *cookie)
 {
        sigset_t *mask = &current->blocked;
        int signr = 0;
 
+       try_to_freeze();
+
 relock:
        spin_lock_irq(&current->sighand->siglock);
        for (;;) {
@@ -1731,63 +1771,35 @@ relock:
                    handle_group_stop())
                        goto relock;
 
-               signr = dequeue_signal(current, mask, info);
-
-               if (!signr)
-                       break; /* will return 0 */
-
-               if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
-                       ptrace_signal_deliver(regs, cookie);
-
-                       /*
-                        * If there is a group stop in progress,
-                        * we must participate in the bookkeeping.
-                        */
-                       if (current->signal->group_stop_count > 0)
-                               --current->signal->group_stop_count;
-
-                       /* Let the debugger run.  */
-                       current->exit_code = signr;
-                       current->last_siginfo = info;
-                       set_current_state(TASK_STOPPED);
-                       spin_unlock_irq(&current->sighand->siglock);
-                       notify_parent(current, SIGCHLD);
-                       schedule();
-
-                       current->last_siginfo = NULL;
-
-                       /* We're back.  Did the debugger cancel the sig?  */
-                       spin_lock_irq(&current->sighand->siglock);
-                       signr = current->exit_code;
-                       if (signr == 0)
-                               continue;
-
-                       current->exit_code = 0;
-
-                       /* Update the siginfo structure if the signal has
-                          changed.  If the debugger wanted something
-                          specific in the siginfo structure then it should
-                          have updated *info via PTRACE_SETSIGINFO.  */
-                       if (signr != info->si_signo) {
-                               info->si_signo = signr;
-                               info->si_errno = 0;
-                               info->si_code = SI_USER;
-                               info->si_pid = current->parent->pid;
-                               info->si_uid = current->parent->uid;
-                       }
+               /*
+                * Tracing can induce an artifical signal and choose sigaction.
+                * The return value in signr determines the default action,
+                * but info->si_signo is the signal number we will report.
+                */
+               signr = tracehook_get_signal(current, regs, info, return_ka);
+               if (unlikely(signr < 0))
+                       goto relock;
+               if (unlikely(signr != 0))
+                       ka = return_ka;
+               else {
+                       signr = dequeue_signal(current, mask, info);
 
-                       /* If the (new) signal is now blocked, requeue it.  */
-                       if (sigismember(&current->blocked, signr)) {
-                               specific_send_sig_info(signr, info, current);
-                               continue;
-                       }
+                       if (!signr)
+                               break; /* will return 0 */
+                       ka = &current->sighand->action[signr-1];
                }
 
-               ka = &current->sighand->action[signr-1];
                if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
                        continue;
-               if (ka->sa.sa_handler != SIG_DFL) /* Run the handler.  */
+               if (ka->sa.sa_handler != SIG_DFL) {
+                       /* Run the handler.  */
+                       *return_ka = *ka;
+
+                       if (ka->sa.sa_flags & SA_ONESHOT)
+                               ka->sa.sa_handler = SIG_DFL;
+
                        break; /* will return non-zero "signr" value */
+               }
 
                /*
                 * Now we are doing the default action for this signal.
@@ -1795,8 +1807,17 @@ relock:
                if (sig_kernel_ignore(signr)) /* Default is nothing. */
                        continue;
 
-               /* Init gets no signals it doesn't want.  */
-               if (current->pid == 1)
+               /*
+                * Init of a pid space gets no signals it doesn't want from
+                * within that pid space. It can of course get signals from
+                * its parent pid space.
+                */
+               if (current == child_reaper(current))
+                       continue;
+
+               /* virtual init is protected against user signals */
+               if ((info->si_code == SI_USER) &&
+                       vx_current_initpid(current->pid))
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -1810,28 +1831,27 @@ relock:
                         * This allows an intervening SIGCONT to be posted.
                         * We need to check for that and bail out if necessary.
                         */
-                       if (signr == SIGSTOP) {
-                               do_signal_stop(signr); /* releases siglock */
-                               goto relock;
-                       }
-                       spin_unlock_irq(&current->sighand->siglock);
+                       if (signr != SIGSTOP) {
+                               spin_unlock_irq(&current->sighand->siglock);
 
-                       /* signals can be posted during this window */
+                               /* signals can be posted during this window */
 
-                       if (is_orphaned_pgrp(process_group(current)))
-                               goto relock;
+                               if (is_orphaned_pgrp(process_group(current)))
+                                       goto relock;
 
-                       spin_lock_irq(&current->sighand->siglock);
-                       if (unlikely(sig_avoid_stop_race())) {
-                               /*
-                                * Either a SIGCONT or a SIGKILL signal was
-                                * posted in the siglock-not-held window.
-                                */
-                               continue;
+                               spin_lock_irq(&current->sighand->siglock);
                        }
 
-                       do_signal_stop(signr); /* releases siglock */
-                       goto relock;
+                       if (likely(do_signal_stop(info->si_signo))) {
+                               /* It released the siglock.  */
+                               goto relock;
+                       }
+
+                       /*
+                        * We didn't actually stop, due to a race
+                        * with SIGCONT or something like that.
+                        */
+                       continue;
                }
 
                spin_unlock_irq(&current->sighand->siglock);
@@ -1840,55 +1860,38 @@ relock:
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
-               if (sig_kernel_coredump(signr) &&
-                   do_coredump((long)signr, signr, regs)) {
+               if (print_fatal_signals)
+                       print_fatal_signal(regs, signr);
+               if (sig_kernel_coredump(signr)) {
                        /*
-                        * That killed all other threads in the group and
-                        * synchronized with their demise, so there can't
-                        * be any more left to kill now.  The group_exit
-                        * flags are set by do_coredump.  Note that
-                        * thread_group_empty won't always be true yet,
-                        * because those threads were blocked in __exit_mm
-                        * and we just let them go to finish dying.
+                        * If it was able to dump core, this kills all
+                        * other threads in the group and synchronizes with
+                        * their demise.  If we lost the race with another
+                        * thread getting here, it set group_exit_code
+                        * first and our do_group_exit call below will use
+                        * that value and ignore the one we pass it.
                         */
-                       const int code = signr | 0x80;
-                       BUG_ON(!current->signal->group_exit);
-                       BUG_ON(current->signal->group_exit_code != code);
-                       do_exit(code);
-                       /* NOTREACHED */
+                       do_coredump(info->si_signo, info->si_signo, regs);
                }
 
                /*
                 * Death signals, no core dump.
                 */
-               do_group_exit(signr);
+               do_group_exit(info->si_signo);
                /* NOTREACHED */
        }
        spin_unlock_irq(&current->sighand->siglock);
        return signr;
 }
 
-#endif
-
 EXPORT_SYMBOL(recalc_sigpending);
 EXPORT_SYMBOL_GPL(dequeue_signal);
 EXPORT_SYMBOL(flush_signals);
 EXPORT_SYMBOL(force_sig);
-EXPORT_SYMBOL(force_sig_info);
 EXPORT_SYMBOL(kill_pg);
-EXPORT_SYMBOL(kill_pg_info);
 EXPORT_SYMBOL(kill_proc);
-EXPORT_SYMBOL(kill_proc_info);
-EXPORT_SYMBOL(kill_sl);
-EXPORT_SYMBOL(kill_sl_info);
-EXPORT_SYMBOL(notify_parent);
 EXPORT_SYMBOL(send_sig);
 EXPORT_SYMBOL(send_sig_info);
-EXPORT_SYMBOL(send_group_sig_info);
-EXPORT_SYMBOL(sigqueue_alloc);
-EXPORT_SYMBOL(sigqueue_free);
-EXPORT_SYMBOL(send_sigqueue);
-EXPORT_SYMBOL(send_group_sigqueue);
 EXPORT_SYMBOL(sigprocmask);
 EXPORT_SYMBOL(block_all_signals);
 EXPORT_SYMBOL(unblock_all_signals);
@@ -1926,10 +1929,11 @@ long do_no_restart_syscall(struct restart_block *param)
 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
 {
        int error;
-       sigset_t old_block;
 
        spin_lock_irq(&current->sighand->siglock);
-       old_block = current->blocked;
+       if (oldset)
+               *oldset = current->blocked;
+
        error = 0;
        switch (how) {
        case SIG_BLOCK:
@@ -1946,8 +1950,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
        }
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
-       if (oldset)
-               *oldset = old_block;
+
        return error;
 }
 
@@ -2132,8 +2135,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        recalc_sigpending();
                        spin_unlock_irq(&current->sighand->siglock);
 
-                       current->state = TASK_INTERRUPTIBLE;
-                       timeout = schedule_timeout(timeout);
+                       timeout = schedule_timeout_interruptible(timeout);
 
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &these, &info);
@@ -2173,26 +2175,13 @@ sys_kill(int pid, int sig)
        return kill_something_info(sig, &info, pid);
 }
 
-/**
- *  sys_tkill - send signal to one specific thread
- *  @tgid: the thread group ID of the thread
- *  @pid: the PID of the thread
- *  @sig: signal to be sent
- *
- *  This syscall also checks the tgid and returns -ESRCH even if the PID
- *  exists but it's not belonging to the target process anymore. This
- *  method solves the problem of threads exiting and PIDs getting reused.
- */
-asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+static int do_tkill(int tgid, int pid, int sig)
 {
-       struct siginfo info;
        int error;
+       struct siginfo info;
        struct task_struct *p;
 
-       /* This is only valid for single tasks */
-       if (pid <= 0 || tgid <= 0)
-               return -EINVAL;
-
+       error = -ESRCH;
        info.si_signo = sig;
        info.si_errno = 0;
        info.si_code = SI_TKILL;
@@ -2201,8 +2190,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
 
        read_lock(&tasklist_lock);
        p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p && (p->tgid == tgid)) {
+       if (p && (tgid <= 0 || p->tgid == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
@@ -2216,47 +2204,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig)
                }
        }
        read_unlock(&tasklist_lock);
+
        return error;
 }
 
+/**
+ *  sys_tgkill - send signal to one specific thread
+ *  @tgid: the thread group ID of the thread
+ *  @pid: the PID of the thread
+ *  @sig: signal to be sent
+ *
+ *  This syscall also checks the tgid and returns -ESRCH even if the PID
+ *  exists but it's not belonging to the target process anymore. This
+ *  method solves the problem of threads exiting and PIDs getting reused.
+ */
+asmlinkage long sys_tgkill(int tgid, int pid, int sig)
+{
+       /* This is only valid for single tasks */
+       if (pid <= 0 || tgid <= 0)
+               return -EINVAL;
+
+       return do_tkill(tgid, pid, sig);
+}
+
 /*
  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
  */
 asmlinkage long
 sys_tkill(int pid, int sig)
 {
-       struct siginfo info;
-       int error;
-       struct task_struct *p;
-
        /* This is only valid for single tasks */
        if (pid <= 0)
                return -EINVAL;
 
-       info.si_signo = sig;
-       info.si_errno = 0;
-       info.si_code = SI_TKILL;
-       info.si_pid = current->tgid;
-       info.si_uid = current->uid;
-
-       read_lock(&tasklist_lock);
-       p = find_task_by_pid(pid);
-       error = -ESRCH;
-       if (p) {
-               error = check_kill_permission(sig, &info, p);
-               /*
-                * The null signal is a permissions and process existence
-                * probe.  No signal is actually delivered.
-                */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
-                       error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
-               }
-       }
-       read_unlock(&tasklist_lock);
-       return error;
+       return do_tkill(0, pid, sig);
 }
 
 asmlinkage long
@@ -2277,12 +2258,12 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
        return kill_proc_info(sig, &info, pid);
 }
 
-int
-do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
 {
        struct k_sigaction *k;
+       sigset_t mask;
 
-       if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
+       if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
                return -EINVAL;
 
        k = &current->sighand->action[sig-1];
@@ -2301,6 +2282,9 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
                *oact = *k;
 
        if (act) {
+               sigdelsetmask(&act->sa.sa_mask,
+                             sigmask(SIGKILL) | sigmask(SIGSTOP));
+               *k = *act;
                /*
                 * POSIX 3.3.1.3:
                 *  "Setting a signal action to SIG_IGN for a signal that is
@@ -2313,35 +2297,17 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
                 *   be discarded, whether or not it is blocked"
                 */
                if (act->sa.sa_handler == SIG_IGN ||
-                   (act->sa.sa_handler == SIG_DFL &&
-                    sig_kernel_ignore(sig))) {
-                       /*
-                        * This is a fairly rare case, so we only take the
-                        * tasklist_lock once we're sure we'll need it.
-                        * Now we must do this little unlock and relock
-                        * dance to maintain the lock hierarchy.
-                        */
+                  (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
                        struct task_struct *t = current;
-                       spin_unlock_irq(&t->sighand->siglock);
-                       read_lock(&tasklist_lock);
-                       spin_lock_irq(&t->sighand->siglock);
-                       *k = *act;
-                       sigdelsetmask(&k->sa.sa_mask,
-                                     sigmask(SIGKILL) | sigmask(SIGSTOP));
-                       rm_from_queue(sigmask(sig), &t->signal->shared_pending);
+                       sigemptyset(&mask);
+                       sigaddset(&mask, sig);
+                       rm_from_queue_full(&mask, &t->signal->shared_pending);
                        do {
-                               rm_from_queue(sigmask(sig), &t->pending);
+                               rm_from_queue_full(&mask, &t->pending);
                                recalc_sigpending_tsk(t);
                                t = next_thread(t);
                        } while (t != current);
-                       spin_unlock_irq(&current->sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return 0;
                }
-
-               *k = *act;
-               sigdelsetmask(&k->sa.sa_mask,
-                             sigmask(SIGKILL) | sigmask(SIGSTOP));
        }
 
        spin_unlock_irq(&current->sighand->siglock);
@@ -2355,18 +2321,18 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
        int error;
 
        if (uoss) {
-               oss.ss_sp = (void *) current->sas_ss_sp;
+               oss.ss_sp = (void __user *) current->sas_ss_sp;
                oss.ss_size = current->sas_ss_size;
                oss.ss_flags = sas_ss_flags(sp);
        }
 
        if (uss) {
-               void *ss_sp;
+               void __user *ss_sp;
                size_t ss_size;
                int ss_flags;
 
                error = -EFAULT;
-               if (verify_area(VERIFY_READ, uss, sizeof(*uss))
+               if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
                    || __get_user(ss_sp, &uss->ss_sp)
                    || __get_user(ss_flags, &uss->ss_flags)
                    || __get_user(ss_size, &uss->ss_size))
@@ -2412,14 +2378,19 @@ out:
        return error;
 }
 
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+
 asmlinkage long
 sys_sigpending(old_sigset_t __user *set)
 {
        return do_sigpending(set, sizeof(*set));
 }
 
-#if !defined(__alpha__)
-/* Alpha has its own versions with special arguments.  */
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+/* Some platforms have their own version with special arguments others
+   support only sys_rt_sigprocmask.  */
 
 asmlinkage long
 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
@@ -2469,8 +2440,9 @@ sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
 out:
        return error;
 }
+#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
 
-#ifndef __sparc__
+#ifdef __ARCH_WANT_SYS_RT_SIGACTION
 asmlinkage long
 sys_rt_sigaction(int sig,
                 const struct sigaction __user *act,
@@ -2498,11 +2470,10 @@ sys_rt_sigaction(int sig,
 out:
        return ret;
 }
-#endif /* __sparc__ */
-#endif
+#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
+
+#ifdef __ARCH_WANT_SYS_SGETMASK
 
-#if !defined(__alpha__) && !defined(__ia64__) && \
-    !defined(__arm__) && !defined(__s390__)
 /*
  * For backwards compatibility.  Functionality superseded by sigprocmask.
  */
@@ -2528,10 +2499,9 @@ sys_ssetmask(int newmask)
 
        return old;
 }
-#endif /* !defined(__alpha__) */
+#endif /* __ARCH_WANT_SGETMASK */
 
-#if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
-    !defined(__arm__)
+#ifdef __ARCH_WANT_SYS_SIGNAL
 /*
  * For backwards compatibility.  Functionality superseded by sigaction.
  */
@@ -2543,14 +2513,15 @@ sys_signal(int sig, __sighandler_t handler)
 
        new_sa.sa.sa_handler = handler;
        new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
+       sigemptyset(&new_sa.sa.sa_mask);
 
        ret = do_sigaction(sig, &new_sa, &old_sa);
 
        return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
 }
-#endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
+#endif /* __ARCH_WANT_SYS_SIGNAL */
 
-#ifndef HAVE_ARCH_SYS_PAUSE
+#ifdef __ARCH_WANT_SYS_PAUSE
 
 asmlinkage long
 sys_pause(void)
@@ -2560,7 +2531,38 @@ sys_pause(void)
        return -ERESTARTNOHAND;
 }
 
-#endif /* HAVE_ARCH_SYS_PAUSE */
+#endif
+
+#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
+{
+       sigset_t newset;
+
+       /* XXX: Don't preclude handling different sized sigset_t's.  */
+       if (sigsetsize != sizeof(sigset_t))
+               return -EINVAL;
+
+       if (copy_from_user(&newset, unewset, sizeof(newset)))
+               return -EFAULT;
+       sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
+
+       spin_lock_irq(&current->sighand->siglock);
+       current->saved_sigmask = current->blocked;
+       current->blocked = newset;
+       recalc_sigpending();
+       spin_unlock_irq(&current->sighand->siglock);
+
+       current->state = TASK_INTERRUPTIBLE;
+       schedule();
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       return -ERESTARTNOHAND;
+}
+#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+
+__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       return NULL;
+}
 
 void __init signals_init(void)
 {
@@ -2568,7 +2570,5 @@ void __init signals_init(void)
                kmem_cache_create("sigqueue",
                                  sizeof(struct sigqueue),
                                  __alignof__(struct sigqueue),
-                                 0, NULL, NULL);
-       if (!sigqueue_cachep)
-               panic("signals_init(): cannot create sigqueue SLAB cache");
+                                 SLAB_PANIC, NULL, NULL);
 }