- sync fedora branch
[linux-2.6.git] / kernel / signal.c
index e74c821..a001981 100644 (file)
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/ptrace.h>
+#include <linux/posix-timers.h>
+#include <linux/signal.h>
+#include <linux/audit.h>
 #include <asm/param.h>
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 #include <asm/siginfo.h>
 
-extern void k_getrusage(struct task_struct *, int, struct rusage *);
-
 /*
  * SLAB caches for signal bits.
  */
@@ -153,11 +154,6 @@ static kmem_cache_t *sigqueue_cachep;
        (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
         (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
 
-#define sig_avoid_stop_race() \
-       (sigtestsetmask(&current->pending.signal, M(SIGCONT) | M(SIGKILL)) || \
-        sigtestsetmask(&current->signal->shared_pending.signal, \
-                                                 M(SIGCONT) | M(SIGKILL)))
-
 static int sig_ignored(struct task_struct *t, int sig)
 {
        void __user * handler;
@@ -265,19 +261,23 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags)
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
+                                        int override_rlimit)
 {
        struct sigqueue *q = NULL;
 
-       if (atomic_read(&t->user->sigpending) <
+       atomic_inc(&t->user->sigpending);
+       if (override_rlimit ||
+           atomic_read(&t->user->sigpending) <=
                        t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
                q = kmem_cache_alloc(sigqueue_cachep, flags);
-       if (q) {
+       if (unlikely(q == NULL)) {
+               atomic_dec(&t->user->sigpending);
+       } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = 0;
                q->lock = NULL;
                q->user = get_uid(t->user);
-               atomic_inc(&q->user->sigpending);
        }
        return(q);
 }
@@ -352,7 +352,9 @@ void __exit_signal(struct task_struct *tsk)
        if (!atomic_read(&sig->count))
                BUG();
        spin_lock(&sighand->siglock);
+       posix_cpu_timers_exit(tsk);
        if (atomic_dec_and_test(&sig->count)) {
+               posix_cpu_timers_exit_group(tsk);
                if (tsk == sig->curr_target)
                        sig->curr_target = next_thread(tsk);
                tsk->signal = NULL;
@@ -380,12 +382,13 @@ void __exit_signal(struct task_struct *tsk)
                 * We won't ever get here for the group leader, since it
                 * will have been the last reference on the signal_struct.
                 */
-               sig->utime += tsk->utime;
-               sig->stime += tsk->stime;
+               sig->utime = cputime_add(sig->utime, tsk->utime);
+               sig->stime = cputime_add(sig->stime, tsk->stime);
                sig->min_flt += tsk->min_flt;
                sig->maj_flt += tsk->maj_flt;
                sig->nvcsw += tsk->nvcsw;
                sig->nivcsw += tsk->nivcsw;
+               sig->sched_time += tsk->sched_time;
                spin_unlock(&sighand->siglock);
                sig = NULL;     /* Marker for below.  */
        }
@@ -407,6 +410,7 @@ void __exit_signal(struct task_struct *tsk)
                 * signals are constrained to threads inside the group.
                 */
                exit_itimers(sig);
+               exit_thread_group_keys(sig);
                kmem_cache_free(signal_cachep, sig);
        }
 }
@@ -520,7 +524,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
 {
        int sig = 0;
 
-       sig = next_signal(pending, mask);
+       /* SIGKILL must have priority, otherwise it is quite easy
+        * to create an unkillable process, sending sig < SIGKILL
+        * to self */
+       if (unlikely(sigismember(&pending->signal, SIGKILL))) {
+               if (!sigismember(mask, SIGKILL))
+                       sig = SIGKILL;
+       }
+
+       if (likely(!sig))
+               sig = next_signal(pending, mask);
        if (sig) {
                if (current->notifier) {
                        if (sigismember(current->notifier_mask, sig)) {
@@ -552,10 +565,33 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
        if (!signr)
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info);
+       if (signr && unlikely(sig_kernel_stop(signr))) {
+               /*
+                * Set a marker that we have dequeued a stop signal.  Our
+                * caller might release the siglock and then the pending
+                * stop signal it is about to process is no longer in the
+                * pending bitmasks, but must still be cleared by a SIGCONT
+                * (and overruled by a SIGKILL).  So those cases clear this
+                * shared flag after we've set it.  Note that this flag may
+                * remain set after the signal we return is ignored or
+                * handled.  That doesn't matter because its only purpose
+                * is to alert stop-signal processing code when another
+                * processor has come along and cleared the flag.
+                */
+               tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
+       }
        if ( signr &&
             ((info->si_code & __SI_MASK) == __SI_TIMER) &&
             info->si_sys_private){
+               /*
+                * Release the siglock to ensure proper locking order
+                * of timer locks outside of siglocks.  Note, we leave
+                * irqs disabled here, since the posix-timers code is
+                * about to disable them again anyway.
+                */
+               spin_unlock(&tsk->sighand->siglock);
                do_schedule_next_timer(info);
+               spin_lock(&tsk->sighand->siglock);
        }
        return signr;
 }
@@ -578,15 +614,15 @@ void signal_wake_up(struct task_struct *t, int resume)
        set_tsk_thread_flag(t, TIF_SIGPENDING);
 
        /*
-        * If resume is set, we want to wake it up in the TASK_STOPPED case.
-        * We don't check for TASK_STOPPED because there is a race with it
+        * For SIGKILL, we want to wake it up in the stopped/traced case.
+        * We don't check t->state here because there is a race with it
         * executing another processor and just now entering stopped state.
-        * By calling wake_up_process any time resume is set, we ensure
-        * the process will wake up and handle its stop or death signal.
+        * By using wake_up_state, we ensure the process will wake up and
+        * handle its death signal.
         */
        mask = TASK_INTERRUPTIBLE;
        if (resume)
-               mask |= TASK_STOPPED;
+               mask |= TASK_STOPPED | TASK_TRACED;
        if (!wake_up_state(t, mask))
                kick_process(t);
 }
@@ -624,16 +660,15 @@ static int check_kill_permission(int sig, struct siginfo *info,
        int error = -EINVAL;
        int user;
 
-       if (sig < 0 || sig > _NSIG)
+       if (!valid_signal(sig))
                return error;
-       user = (!info ||
-               (info != SEND_SIG_PRIV &&
-                info != SEND_SIG_FORCED &&
-                SI_FROMUSER(info)));
+
+       user = (!info || ((unsigned long)info != 1 &&
+               (unsigned long)info != 2 && SI_FROMUSER(info)));
 
        error = -EPERM;
-       if (user && (sig != SIGCONT ||
-                    current->signal->session != t->signal->session)
+       if (user && ((sig != SIGCONT) ||
+               (current->signal->session != t->signal->session))
            && (current->euid ^ t->suid) && (current->euid ^ t->uid)
            && (current->uid ^ t->suid) && (current->uid ^ t->uid)
            && !capable(CAP_KILL))
@@ -643,7 +678,10 @@ static int check_kill_permission(int sig, struct siginfo *info,
        if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
                return error;
 
-       return security_task_kill(t, info, sig);
+       error = security_task_kill(t, info, sig);
+       if (!error)
+               audit_signal_info(sig, t); /* Let audit system see the signal */
+       return error;
 }
 
 /* forward decl */
@@ -662,6 +700,12 @@ static void handle_stop_signal(int sig, struct task_struct *p)
 {
        struct task_struct *t;
 
+       if (p->flags & SIGNAL_GROUP_EXIT)
+               /*
+                * The process is in the middle of dying already.
+                */
+               return;
+
        if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.
@@ -691,7 +735,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * the SIGCHLD was pending on entry to this kill.
                         */
                        p->signal->group_stop_count = 0;
-                       p->signal->stop_state = 1;
+                       p->signal->flags = SIGNAL_STOP_CONTINUED;
                        spin_unlock(&p->sighand->siglock);
                        if (p->ptrace & PT_PTRACED)
                                do_notify_parent_cldstop(p, p->parent,
@@ -733,12 +777,12 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                        t = next_thread(t);
                } while (t != p);
 
-               if (p->signal->stop_state > 0) {
+               if (p->signal->flags & SIGNAL_STOP_STOPPED) {
                        /*
                         * We were in fact stopped, and are now continued.
                         * Notify the parent with CLD_CONTINUED.
                         */
-                       p->signal->stop_state = -1;
+                       p->signal->flags = SIGNAL_STOP_CONTINUED;
                        p->signal->group_exit_code = 0;
                        spin_unlock(&p->sighand->siglock);
                        if (p->ptrace & PT_PTRACED)
@@ -750,7 +794,20 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                                        p->group_leader->real_parent,
                                                         CLD_CONTINUED);
                        spin_lock(&p->sighand->siglock);
+               } else {
+                       /*
+                        * We are not stopped, but there could be a stop
+                        * signal in the middle of being processed after
+                        * being removed from the queue.  Clear that too.
+                        */
+                       p->signal->flags = 0;
                }
+       } else if (sig == SIGKILL) {
+               /*
+                * Make sure that any pending stop signal already dequeued
+                * is undone by the wakeup for SIGKILL.
+                */
+               p->signal->flags = 0;
        }
 }
 
@@ -775,7 +832,9 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
           make sure at least one signal gets delivered and don't
           pass on the info struct.  */
 
-       q = __sigqueue_alloc(t, GFP_ATOMIC);
+       q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
+                                            ((unsigned long) info < 2 ||
+                                             info->si_code >= 0)));
        if (q) {
                list_add_tail(&q->list, &signals->list);
                switch ((unsigned long) info) {
@@ -829,10 +888,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 
        if (!irqs_disabled())
                BUG();
-#ifdef CONFIG_SMP
-       if (!spin_is_locked(&t->sighand->siglock))
-               BUG();
-#endif
+       assert_spin_locked(&t->sighand->siglock);
 
        if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
                /*
@@ -916,12 +972,12 @@ __group_complete_signal(int sig, struct task_struct *p)
        struct task_struct *t;
 
        /*
-        * Don't bother zombies and stopped tasks (but
-        * SIGKILL will punch through stopped state)
+        * Don't bother traced and stopped tasks (but
+        * SIGKILL will punch through that).
         */
-       mask = EXIT_DEAD | EXIT_ZOMBIE | TASK_TRACED;
-       if (sig != SIGKILL)
-               mask |= TASK_STOPPED;
+       mask = TASK_STOPPED | TASK_TRACED;
+       if (sig == SIGKILL)
+               mask = 0;
 
        /*
         * Now find a thread we can wake up to take the signal off the queue.
@@ -964,7 +1020,7 @@ __group_complete_signal(int sig, struct task_struct *p)
         * Found a killable thread.  If the signal will be fatal,
         * then start taking the whole group down immediately.
         */
-       if (sig_fatal(p, sig) && !p->signal->group_exit &&
+       if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
            !sigismember(&t->real_blocked, sig) &&
            (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
                /*
@@ -977,7 +1033,7 @@ __group_complete_signal(int sig, struct task_struct *p)
                         * running and doing things after a slower
                         * thread has the fatal signal pending.
                         */
-                       p->signal->group_exit = 1;
+                       p->signal->flags = SIGNAL_GROUP_EXIT;
                        p->signal->group_exit_code = sig;
                        p->signal->group_stop_count = 0;
                        t = p;
@@ -1021,15 +1077,12 @@ __group_complete_signal(int sig, struct task_struct *p)
        return;
 }
 
-static int
+int
 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
 {
        int ret = 0;
 
-#ifdef CONFIG_SMP
-       if (!spin_is_locked(&p->sighand->siglock))
-               BUG();
-#endif
+       assert_spin_locked(&p->sighand->siglock);
        handle_stop_signal(sig, p);
 
        if (((unsigned long)info > 2) && (info->si_code == SI_TIMER))
@@ -1066,6 +1119,7 @@ void zap_other_threads(struct task_struct *p)
 {
        struct task_struct *t;
 
+       p->signal->flags = SIGNAL_GROUP_EXIT;
        p->signal->group_stop_count = 0;
 
        if (thread_group_empty(p))
@@ -1075,7 +1129,7 @@ void zap_other_threads(struct task_struct *p)
                /*
                 * Don't bother with already dead threads
                 */
-               if (t->exit_state & (EXIT_ZOMBIE|EXIT_DEAD))
+               if (t->exit_state)
                        continue;
 
                /*
@@ -1163,6 +1217,37 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
        return error;
 }
 
+int print_fatal_signals = 0;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+               current->comm, current->pid, signr);
+               
+#ifdef __i386__
+       printk("code at %08lx: ", regs->eip);
+       {
+               int i;
+               for (i = 0; i < 16; i++) {
+                       unsigned char insn;
+
+                       __get_user(insn, (unsigned char *)(regs->eip + i));
+                       printk("%02x ", insn);
+               }
+       }
+#endif 
+       printk("\n");
+       show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+       get_option (&str, &print_fatal_signals);
+
+       return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
 
 /*
  * kill_something_info() interprets pid in interesting ways just like kill(2).
@@ -1215,7 +1300,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
         * Make sure legacy kernel users don't send in bad values
         * (normal paths check this in check_kill_permission).
         */
-       if (sig < 0 || sig > _NSIG)
+       if (!valid_signal(sig))
                return -EINVAL;
 
        /*
@@ -1303,7 +1388,7 @@ struct sigqueue *sigqueue_alloc(void)
 {
        struct sigqueue *q;
 
-       if ((q = __sigqueue_alloc(current, GFP_KERNEL)))
+       if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
                q->flags |= SIGQUEUE_PREALLOC;
        return(q);
 }
@@ -1418,28 +1503,12 @@ out:
 }
 
 /*
- * Joy. Or not. Pthread wants us to wake up every thread
- * in our parent group.
+ * Wake up any threads in the parent blocked in wait* syscalls.
  */
-static void __wake_up_parent(struct task_struct *p,
+static inline void __wake_up_parent(struct task_struct *p,
                                    struct task_struct *parent)
 {
-       struct task_struct *tsk = parent;
-
-       /*
-        * Fortunately this is not necessary for thread groups:
-        */
-       if (p->tgid == tsk->tgid) {
-               wake_up_interruptible_sync(&tsk->wait_chldexit);
-               return;
-       }
-
-       do {
-               wake_up_interruptible_sync(&tsk->wait_chldexit);
-               tsk = next_thread(tsk);
-               if (tsk->signal != parent->signal)
-                       BUG();
-       } while (tsk != parent);
+       wake_up_interruptible_sync(&parent->signal->wait_chldexit);
 }
 
 /*
@@ -1453,8 +1522,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        unsigned long flags;
        struct sighand_struct *psig;
 
-       if (sig == -1)
-               BUG();
+       BUG_ON(sig == -1);
 
        /* do_notify_parent_cldstop should have been called instead.  */
        BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
@@ -1468,8 +1536,10 @@ void do_notify_parent(struct task_struct *tsk, int sig)
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = tsk->utime + tsk->signal->utime;
-       info.si_stime = tsk->stime + tsk->signal->stime;
+       info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
+                                                      tsk->signal->utime));
+       info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
+                                                      tsk->signal->stime));
 
        info.si_status = tsk->exit_code & 0x7f;
        if (tsk->exit_code & 0x80)
@@ -1505,7 +1575,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
                if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
                        sig = 0;
        }
-       if (sig > 0 && sig <= _NSIG)
+       if (valid_signal(sig) && sig > 0)
                __group_send_sig_info(sig, &info, tsk->parent);
        __wake_up_parent(tsk, tsk->parent);
        spin_unlock_irqrestore(&psig->siglock, flags);
@@ -1525,8 +1595,8 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
        info.si_uid = tsk->uid;
 
        /* FIXME: find out whether or not this is supposed to be c*time. */
-       info.si_utime = tsk->utime;
-       info.si_stime = tsk->stime;
+       info.si_utime = cputime_to_jiffies(tsk->utime);
+       info.si_stime = cputime_to_jiffies(tsk->stime);
 
        info.si_code = why;
        switch (why) {
@@ -1562,11 +1632,12 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
  * We always set current->last_siginfo while stopped here.
  * That makes it a way to test a stopped process for
  * being ptrace-stopped vs being job-control-stopped.
+ *
+ * If we actually decide not to stop at all because the tracer is gone,
+ * we leave nostop_code in current->exit_code.
  */
-static void ptrace_stop(int exit_code, siginfo_t *info)
+static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
 {
-       BUG_ON(!(current->ptrace & PT_PTRACED));
-
        /*
         * If there is a group stop in progress,
         * we must participate in the bookkeeping.
@@ -1581,9 +1652,24 @@ static void ptrace_stop(int exit_code, siginfo_t *info)
        set_current_state(TASK_TRACED);
        spin_unlock_irq(&current->sighand->siglock);
        read_lock(&tasklist_lock);
-       do_notify_parent_cldstop(current, current->parent, CLD_TRAPPED);
-       read_unlock(&tasklist_lock);
-       schedule();
+       if (likely(current->ptrace & PT_PTRACED) &&
+           likely(current->parent != current->real_parent ||
+                  !(current->ptrace & PT_ATTACHED)) &&
+           (likely(current->parent->signal != current->signal) ||
+            !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
+               do_notify_parent_cldstop(current, current->parent,
+                                        CLD_TRAPPED);
+               read_unlock(&tasklist_lock);
+               schedule();
+       } else {
+               /*
+                * By the time we got the lock, our tracer went away.
+                * Don't stop here.
+                */
+               read_unlock(&tasklist_lock);
+               set_current_state(TASK_RUNNING);
+               current->exit_code = nostop_code;
+       }
 
        /*
         * We are back.  Now reacquire the siglock before touching
@@ -1614,41 +1700,10 @@ void ptrace_notify(int exit_code)
 
        /* Let the debugger run.  */
        spin_lock_irq(&current->sighand->siglock);
-       ptrace_stop(exit_code, &info);
+       ptrace_stop(exit_code, 0, &info);
        spin_unlock_irq(&current->sighand->siglock);
 }
 
-int print_fatal_signals = 0;
-
-static void print_fatal_signal(struct pt_regs *regs, int signr)
-{
-       int i;
-       unsigned char insn;
-       printk("%s/%d: potentially unexpected fatal signal %d.\n",
-               current->comm, current->pid, signr);
-
-#ifdef __i386__
-       printk("code at %08lx: ", regs->eip);
-       for (i = 0; i < 16; i++) {
-               __get_user(insn, (unsigned char *)(regs->eip + i));
-               printk("%02x ", insn);
-       }
-#endif
-       printk("\n");
-       show_regs(regs);
-}
-
-static int __init setup_print_fatal_signals(char *str)
-{
-       get_option (&str, &print_fatal_signals);
-
-       return 1;
-}
-
-__setup("print-fatal-signals=", setup_print_fatal_signals);
-
-#ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
-
 static void
 finish_stop(int stop_count)
 {
@@ -1681,15 +1736,18 @@ finish_stop(int stop_count)
 /*
  * This performs the stopping for SIGSTOP and other stop signals.
  * We have to stop all threads in the thread group.
+ * Returns nonzero if we've actually stopped and released the siglock.
+ * Returns zero if we didn't stop and still hold the siglock.
  */
-static void
+static int
 do_signal_stop(int signr)
 {
        struct signal_struct *sig = current->signal;
        struct sighand_struct *sighand = current->sighand;
        int stop_count = -1;
 
-       /* spin_lock_irq(&sighand->siglock) is now done in caller */
+       if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
+               return 0;
 
        if (sig->group_stop_count > 0) {
                /*
@@ -1701,7 +1759,7 @@ do_signal_stop(int signr)
                current->exit_code = signr;
                set_current_state(TASK_STOPPED);
                if (stop_count == 0)
-                       sig->stop_state = 1;
+                       sig->flags = SIGNAL_STOP_STOPPED;
                spin_unlock_irq(&sighand->siglock);
        }
        else if (thread_group_empty(current)) {
@@ -1710,7 +1768,7 @@ do_signal_stop(int signr)
                 */
                current->exit_code = current->signal->group_exit_code = signr;
                set_current_state(TASK_STOPPED);
-               sig->stop_state = 1;
+               sig->flags = SIGNAL_STOP_STOPPED;
                spin_unlock_irq(&sighand->siglock);
        }
        else {
@@ -1731,25 +1789,17 @@ do_signal_stop(int signr)
                read_lock(&tasklist_lock);
                spin_lock_irq(&sighand->siglock);
 
-               if (unlikely(sig->group_exit)) {
-                       /*
-                        * There is a group exit in progress now.
-                        * We'll just ignore the stop and process the
-                        * associated fatal signal.
-                        */
-                       spin_unlock_irq(&sighand->siglock);
-                       read_unlock(&tasklist_lock);
-                       return;
-               }
-
-               if (unlikely(sig_avoid_stop_race())) {
+               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
                        /*
-                        * Either a SIGCONT or a SIGKILL signal was
-                        * posted in the siglock-not-held window.
+                        * Another stop or continue happened while we
+                        * didn't have the lock.  We can just swallow this
+                        * signal now.  If we raced with a SIGCONT, that
+                        * should have just cleared it now.  If we raced
+                        * with another processor delivering a stop signal,
+                        * then the SIGCONT that wakes us up should clear it.
                         */
-                       spin_unlock_irq(&sighand->siglock);
                        read_unlock(&tasklist_lock);
-                       return;
+                       return 0;
                }
 
                if (sig->group_stop_count == 0) {
@@ -1777,13 +1827,14 @@ do_signal_stop(int signr)
                current->exit_code = signr;
                set_current_state(TASK_STOPPED);
                if (stop_count == 0)
-                       sig->stop_state = 1;
+                       sig->flags = SIGNAL_STOP_STOPPED;
 
                spin_unlock_irq(&sighand->siglock);
                read_unlock(&tasklist_lock);
        }
 
        finish_stop(stop_count);
+       return 1;
 }
 
 /*
@@ -1805,7 +1856,7 @@ static inline int handle_group_stop(void)
                return 0;
        }
 
-       if (current->signal->group_exit)
+       if (current->signal->flags & SIGNAL_GROUP_EXIT)
                /*
                 * Group stop is so another thread can do a core dump,
                 * or else we are racing against a death signal.
@@ -1819,7 +1870,7 @@ static inline int handle_group_stop(void)
         */
        stop_count = --current->signal->group_stop_count;
        if (stop_count == 0)
-               current->signal->stop_state = 1;
+               current->signal->flags = SIGNAL_STOP_STOPPED;
        current->exit_code = current->signal->group_exit_code;
        set_current_state(TASK_STOPPED);
        spin_unlock_irq(&current->sighand->siglock);
@@ -1852,12 +1903,11 @@ relock:
                        print_fatal_signal(regs, signr);
                        spin_lock_irq(&current->sighand->siglock);
                }
-
                if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
                        ptrace_signal_deliver(regs, cookie);
 
                        /* Let the debugger run.  */
-                       ptrace_stop(signr, info);
+                       ptrace_stop(signr, signr, info);
 
                        /* We're back.  Did the debugger cancel the sig?  */
                        signr = current->exit_code;
@@ -1908,6 +1958,11 @@ relock:
                if (current->pid == 1)
                        continue;
 
+               /* virtual init is protected against user signals */
+               if ((info->si_code == SI_USER) &&
+                       vx_current_initpid(current->pid))
+                       continue;
+
                if (sig_kernel_stop(signr)) {
                        /*
                         * The default action is to stop all threads in
@@ -1919,28 +1974,27 @@ relock:
                         * This allows an intervening SIGCONT to be posted.
                         * We need to check for that and bail out if necessary.
                         */
-                       if (signr == SIGSTOP) {
-                               do_signal_stop(signr); /* releases siglock */
-                               goto relock;
-                       }
-                       spin_unlock_irq(&current->sighand->siglock);
+                       if (signr != SIGSTOP) {
+                               spin_unlock_irq(&current->sighand->siglock);
 
-                       /* signals can be posted during this window */
+                               /* signals can be posted during this window */
 
-                       if (is_orphaned_pgrp(process_group(current)))
-                               goto relock;
+                               if (is_orphaned_pgrp(process_group(current)))
+                                       goto relock;
 
-                       spin_lock_irq(&current->sighand->siglock);
-                       if (unlikely(sig_avoid_stop_race())) {
-                               /*
-                                * Either a SIGCONT or a SIGKILL signal was
-                                * posted in the siglock-not-held window.
-                                */
-                               continue;
+                               spin_lock_irq(&current->sighand->siglock);
                        }
 
-                       do_signal_stop(signr); /* releases siglock */
-                       goto relock;
+                       if (likely(do_signal_stop(signr))) {
+                               /* It released the siglock.  */
+                               goto relock;
+                       }
+
+                       /*
+                        * We didn't actually stop, due to a race
+                        * with SIGCONT or something like that.
+                        */
+                       continue;
                }
 
                spin_unlock_irq(&current->sighand->siglock);
@@ -1973,8 +2027,6 @@ relock:
        return signr;
 }
 
-#endif
-
 EXPORT_SYMBOL(recalc_sigpending);
 EXPORT_SYMBOL_GPL(dequeue_signal);
 EXPORT_SYMBOL(flush_signals);
@@ -1988,6 +2040,7 @@ EXPORT_SYMBOL(sigprocmask);
 EXPORT_SYMBOL(block_all_signals);
 EXPORT_SYMBOL(unblock_all_signals);
 
+
 /*
  * System call entry points.
  */
@@ -2229,6 +2282,8 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
                        current->state = TASK_INTERRUPTIBLE;
                        timeout = schedule_timeout(timeout);
 
+                       if (current->flags & PF_FREEZE)
+                               refrigerator(PF_FREEZE);
                        spin_lock_irq(&current->sighand->siglock);
                        sig = dequeue_signal(current, &these, &info);
                        current->blocked = current->real_blocked;
@@ -2376,7 +2431,7 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
 {
        struct k_sigaction *k;
 
-       if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
+       if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
                return -EINVAL;
 
        k = &current->sighand->action[sig-1];
@@ -2460,7 +2515,7 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
                int ss_flags;
 
                error = -EFAULT;
-               if (verify_area(VERIFY_READ, uss, sizeof(*uss))
+               if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
                    || __get_user(ss_sp, &uss->ss_sp)
                    || __get_user(ss_flags, &uss->ss_flags)
                    || __get_user(ss_size, &uss->ss_size))