This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / kernel / signal.c
index e734221..b3574b0 100644 (file)
@@ -32,9 +32,6 @@
 
 static kmem_cache_t *sigqueue_cachep;
 
-atomic_t nr_queued_signals;
-int max_queued_signals = 1024;
-
 /*
  * In POSIX a signal is sent either to a specific thread (Linux task)
  * or to the process as a whole (Linux thread group).  How the signal
@@ -160,7 +157,7 @@ int max_queued_signals = 1024;
 
 static int sig_ignored(struct task_struct *t, int sig)
 {
-       void * handler;
+       void __user * handler;
 
        /*
         * Tracers always want to know about signals..
@@ -265,17 +262,19 @@ next_signal(struct sigpending *pending, sigset_t *mask)
        return sig;
 }
 
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
 {
-       struct sigqueue *q = 0;
+       struct sigqueue *q = NULL;
 
-       if (atomic_read(&nr_queued_signals) < max_queued_signals)
+       if (atomic_read(&current->user->sigpending) <
+                       current->rlim[RLIMIT_SIGPENDING].rlim_cur)
                q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
        if (q) {
-               atomic_inc(&nr_queued_signals);
                INIT_LIST_HEAD(&q->list);
                q->flags = 0;
-               q->lock = 0;
+               q->lock = NULL;
+               q->user = get_uid(current->user);
+               atomic_inc(&q->user->sigpending);
        }
        return(q);
 }
@@ -284,8 +283,9 @@ static inline void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
+       atomic_dec(&q->user->sigpending);
+       free_uid(q->user);
        kmem_cache_free(sigqueue_cachep, q);
-       atomic_dec(&nr_queued_signals);
 }
 
 static void flush_sigqueue(struct sigpending *queue)
@@ -417,6 +417,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
        }
 }
 
+EXPORT_SYMBOL_GPL(flush_signal_handlers);
 
 /* Notify the system that a driver wants to block all signals for this
  * process, and wants to be notified if any signals at all were to be
@@ -454,7 +455,7 @@ unblock_all_signals(void)
 
 static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
 {
-       struct sigqueue *q, *first = 0;
+       struct sigqueue *q, *first = NULL;
        int still_pending = 0;
 
        if (unlikely(!sigismember(&list->signal, sig)))
@@ -699,7 +700,8 @@ static void handle_stop_signal(int sig, struct task_struct *p)
        }
 }
 
-static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+                       struct sigpending *signals)
 {
        struct sigqueue * q = NULL;
        int ret = 0;
@@ -719,12 +721,14 @@ static int send_signal(int sig, struct siginfo *info, struct sigpending *signals
           make sure at least one signal gets delivered and don't
           pass on the info struct.  */
 
-       if (atomic_read(&nr_queued_signals) < max_queued_signals)
+       if (atomic_read(&t->user->sigpending) <
+                       t->rlim[RLIMIT_SIGPENDING].rlim_cur)
                q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
 
        if (q) {
-               atomic_inc(&nr_queued_signals);
                q->flags = 0;
+               q->user = get_uid(t->user);
+               atomic_inc(&q->user->sigpending);
                list_add_tail(&q->list, &signals->list);
                switch ((unsigned long) info) {
                case 0:
@@ -798,7 +802,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
        if (LEGACY_QUEUE(&t->pending, sig))
                goto out;
 
-       ret = send_signal(sig, info, &t->pending);
+       ret = send_signal(sig, info, t, &t->pending);
        if (!ret && !sigismember(&t->blocked, sig))
                signal_wake_up(t, sig == SIGKILL);
 out:
@@ -999,7 +1003,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
         * We always use the shared queue for process-wide signals,
         * to avoid several races.
         */
-       ret = send_signal(sig, info, &p->signal->shared_pending);
+       ret = send_signal(sig, info, p, &p->signal->shared_pending);
        if (unlikely(ret))
                return ret;
 
@@ -1051,6 +1055,9 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        unsigned long flags;
        int ret;
 
+       if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
+               return -ESRCH;
+
        ret = check_kill_permission(sig, info, p);
        if (!ret && sig && p->sighand) {
                spin_lock_irqsave(&p->sighand->siglock, flags);
@@ -1071,23 +1078,19 @@ int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
        struct task_struct *p;
        struct list_head *l;
        struct pid *pid;
-       int retval;
-       int found;
+       int retval, success;
 
        if (pgrp <= 0)
                return -EINVAL;
 
-       found = 0;
-       retval = 0;
+       success = 0;
+       retval = -ESRCH;
        for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
-               int err;
-
-               found = 1;
-               err = group_send_sig_info(sig, info, p);
-               if (!retval)
-                       retval = err;
+               int err = group_send_sig_info(sig, info, p);
+               success |= !err;
+               retval = err;
        }
-       return found ? retval : -ESRCH;
+       return success ? 0 : retval;
 }
 
 int
@@ -1197,6 +1200,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        int ret;
        unsigned long flags;
 
+       /*
+        * Make sure legacy kernel users don't send in bad values
+        * (normal paths check this in check_kill_permission).
+        */
+       if (sig < 0 || sig > _NSIG)
+               return -EINVAL;
+
        /*
         * We need the tasklist lock even for the specific
         * thread case (when we don't need to follow the group
@@ -1546,6 +1556,34 @@ do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
        spin_unlock_irqrestore(&sighand->siglock, flags);
 }
 
+int print_fatal_signals = 0;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+       int i;
+       unsigned char insn;
+       printk("%s/%d: potentially unexpected fatal signal %d.\n",
+               current->comm, current->pid, signr);
+               
+#ifdef __i386__
+       printk("code at %08lx: ", regs->eip);
+       for (i = 0; i < 16; i++) {
+               __get_user(insn, (unsigned char *)(regs->eip + i));
+               printk("%02x ", insn);
+       }
+#endif 
+       printk("\n");
+       show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+       get_option (&str, &print_fatal_signals);
+
+       return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
 
 #ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
 
@@ -1737,6 +1775,11 @@ relock:
                if (!signr)
                        break; /* will return 0 */
 
+               if ((signr == SIGSEGV) && print_fatal_signals) {
+                       spin_unlock_irq(&current->sighand->siglock);
+                       print_fatal_signal(regs, signr);
+                       spin_lock_irq(&current->sighand->siglock);
+               }
                if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
                        ptrace_signal_deliver(regs, cookie);
 
@@ -1841,6 +1884,8 @@ relock:
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
+               if (print_fatal_signals)
+                       print_fatal_signal(regs, signr);
                if (sig_kernel_coredump(signr) &&
                    do_coredump((long)signr, signr, regs)) {
                        /*
@@ -2356,13 +2401,13 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
        int error;
 
        if (uoss) {
-               oss.ss_sp = (void *) current->sas_ss_sp;
+               oss.ss_sp = (void __user *) current->sas_ss_sp;
                oss.ss_size = current->sas_ss_size;
                oss.ss_flags = sas_ss_flags(sp);
        }
 
        if (uss) {
-               void *ss_sp;
+               void __user *ss_sp;
                size_t ss_size;
                int ss_flags;