X-Git-Url: http://git.onelab.eu/?p=linux-2.6.git;a=blobdiff_plain;f=kernel%2Fsignal.c;fp=kernel%2Fsignal.c;h=2d4bae2f88a50e04f001946b6a0a9b531ed7e2da;hp=58c0977eb3d72206870633ca3d1033f739fdaf4b;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hpb=cee37fe97739d85991964371c1f3a745c00dd236 diff --git a/kernel/signal.c b/kernel/signal.c index 58c0977eb..2d4bae2f8 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -22,8 +22,10 @@ #include #include #include -#include #include +#include +#include +#include #include #include #include @@ -145,6 +147,8 @@ static kmem_cache_t *sigqueue_cachep; #define sig_kernel_stop(sig) \ (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) +#define sig_needs_tasklist(sig) ((sig) == SIGCONT) + #define sig_user_defined(t, signr) \ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) @@ -212,6 +216,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) fastcall void recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || + (freezing(t)) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -260,7 +265,7 @@ next_signal(struct sigpending *pending, sigset_t *mask) return sig; } -static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, +static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; @@ -275,13 +280,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __n } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->lock = NULL; q->user = get_uid(t->user); } return(q); } -static inline void __sigqueue_free(struct sigqueue *q) +static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) return; @@ -290,7 +294,7 @@ static inline void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } -static void flush_sigqueue(struct sigpending *queue) +void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -305,9 +309,7 @@ static void flush_sigqueue(struct sigpending *queue) /* * Flush all pending signals for a task. */ - -void -flush_signals(struct task_struct *t) +void flush_signals(struct task_struct *t) { unsigned long flags; @@ -318,109 +320,6 @@ flush_signals(struct task_struct *t) spin_unlock_irqrestore(&t->sighand->siglock, flags); } -/* - * This function expects the tasklist_lock write-locked. - */ -void __exit_sighand(struct task_struct *tsk) -{ - struct sighand_struct * sighand = tsk->sighand; - - /* Ok, we're done with the signal handlers */ - tsk->sighand = NULL; - if (atomic_dec_and_test(&sighand->count)) - kmem_cache_free(sighand_cachep, sighand); -} - -void exit_sighand(struct task_struct *tsk) -{ - write_lock_irq(&tasklist_lock); - __exit_sighand(tsk); - write_unlock_irq(&tasklist_lock); -} - -/* - * This function expects the tasklist_lock write-locked. - */ -void __exit_signal(struct task_struct *tsk) -{ - struct signal_struct * sig = tsk->signal; - struct sighand_struct * sighand = tsk->sighand; - - if (!sig) - BUG(); - if (!atomic_read(&sig->count)) - BUG(); - spin_lock(&sighand->siglock); - posix_cpu_timers_exit(tsk); - if (atomic_dec_and_test(&sig->count)) { - posix_cpu_timers_exit_group(tsk); - if (tsk == sig->curr_target) - sig->curr_target = next_thread(tsk); - tsk->signal = NULL; - spin_unlock(&sighand->siglock); - flush_sigqueue(&sig->shared_pending); - } else { - /* - * If there is any task waiting for the group exit - * then notify it: - */ - if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { - wake_up_process(sig->group_exit_task); - sig->group_exit_task = NULL; - } - if (tsk == sig->curr_target) - sig->curr_target = next_thread(tsk); - tsk->signal = NULL; - /* - * Accumulate here the counters for all threads but the - * group leader as they die, so they can be added into - * the process-wide totals when those are taken. - * The group leader stays around as a zombie as long - * as there are other threads. When it gets reaped, - * the exit.c code will add its counts into these totals. - * We won't ever get here for the group leader, since it - * will have been the last reference on the signal_struct. - */ - sig->utime = cputime_add(sig->utime, tsk->utime); - sig->stime = cputime_add(sig->stime, tsk->stime); - sig->min_flt += tsk->min_flt; - sig->maj_flt += tsk->maj_flt; - sig->nvcsw += tsk->nvcsw; - sig->nivcsw += tsk->nivcsw; - sig->sched_time += tsk->sched_time; - spin_unlock(&sighand->siglock); - sig = NULL; /* Marker for below. */ - } - clear_tsk_thread_flag(tsk,TIF_SIGPENDING); - flush_sigqueue(&tsk->pending); - if (sig) { - /* - * We are cleaning up the signal_struct here. We delayed - * calling exit_itimers until after flush_sigqueue, just in - * case our thread-local pending queue contained a queued - * timer signal that would have been cleared in - * exit_itimers. When that called sigqueue_free, it would - * attempt to re-take the tasklist_lock and deadlock. This - * can never happen if we ensure that all queues the - * timer's signal might be queued on have been flushed - * first. The shared_pending queue, and our own pending - * queue are the only queues the timer could be on, since - * there are no other threads left in the group and timer - * signals are constrained to threads inside the group. - */ - exit_itimers(sig); - exit_thread_group_keys(sig); - kmem_cache_free(signal_cachep, sig); - } -} - -void exit_signal(struct task_struct *tsk) -{ - write_lock_irq(&tasklist_lock); - __exit_signal(tsk); - write_unlock_irq(&tasklist_lock); -} - /* * Flush all handlers for a task. */ @@ -439,6 +338,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) } } +EXPORT_SYMBOL_GPL(flush_signal_handlers); /* Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be @@ -474,7 +374,7 @@ unblock_all_signals(void) spin_unlock_irqrestore(¤t->sighand->siglock, flags); } -static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static int collect_signal(int sig, struct sigpending *list, siginfo_t *info) { struct sigqueue *q, *first = NULL; int still_pending = 0; @@ -522,16 +422,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, { int sig = 0; - /* SIGKILL must have priority, otherwise it is quite easy - * to create an unkillable process, sending sig < SIGKILL - * to self */ - if (unlikely(sigismember(&pending->signal, SIGKILL))) { - if (!sigismember(mask, SIGKILL)) - sig = SIGKILL; - } - - if (likely(!sig)) - sig = next_signal(pending, mask); + sig = next_signal(pending, mask); if (sig) { if (current->notifier) { if (sigismember(current->notifier_mask, sig)) { @@ -576,7 +467,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; } if ( signr && ((info->si_code & __SI_MASK) == __SI_TIMER) && @@ -625,6 +517,33 @@ void signal_wake_up(struct task_struct *t, int resume) kick_process(t); } +/* + * Remove signals in mask from the pending set and queue. + * Returns 1 if any signals were found. + * + * All callers must be holding the siglock. + * + * This version takes a sigset mask and looks at all signals, + * not just those in the first mask word. + */ +static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) +{ + struct sigqueue *q, *n; + sigset_t m; + + sigandsets(&m, mask, &s->signal); + if (sigisemptyset(&m)) + return 0; + + signandsets(&s->signal, &s->signal, mask); + list_for_each_entry_safe(q, n, &s->list, list) { + if (sigismember(mask, q->info.si_signo)) { + list_del_init(&q->list); + __sigqueue_free(q); + } + } + return 1; +} /* * Remove signals in mask from the pending set and queue. * Returns 1 if any signals were found. @@ -655,14 +574,14 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { - int error = -EINVAL; int user; + int error = -EINVAL; if (!valid_signal(sig)) return error; - user = (!info || ((unsigned long)info != 1 && - (unsigned long)info != 2 && SI_FROMUSER(info))); + user = ((info == SEND_SIG_NOINFO) || + (!is_si_special(info) && SI_FROMUSER(info))); error = -EPERM; if (user && ((sig != SIGCONT) || @@ -676,13 +595,14 @@ static int check_kill_permission(int sig, struct siginfo *info, if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT)) return error; - return security_task_kill(t, info, sig); + error = security_task_kill(t, info, sig); + if (!error) + audit_signal_info(sig, t); /* Let audit system see the signal */ + return error; } /* forward decl */ -static void do_notify_parent_cldstop(struct task_struct *tsk, - struct task_struct *parent, - int why); +static void do_notify_parent_cldstop(struct task_struct *tsk, int why); /* * Handle magic process-wide effects of stop/continue signals. @@ -695,7 +615,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) { struct task_struct *t; - if (p->flags & SIGNAL_GROUP_EXIT) + if (p->signal->flags & SIGNAL_GROUP_EXIT) /* * The process is in the middle of dying already. */ @@ -732,14 +652,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) p->signal->group_stop_count = 0; p->signal->flags = SIGNAL_STOP_CONTINUED; spin_unlock(&p->sighand->siglock); - if (p->ptrace & PT_PTRACED) - do_notify_parent_cldstop(p, p->parent, - CLD_STOPPED); - else - do_notify_parent_cldstop( - p->group_leader, - p->group_leader->real_parent, - CLD_STOPPED); + do_notify_parent_cldstop(p, CLD_STOPPED); spin_lock(&p->sighand->siglock); } rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); @@ -780,14 +693,7 @@ static void handle_stop_signal(int sig, struct task_struct *p) p->signal->flags = SIGNAL_STOP_CONTINUED; p->signal->group_exit_code = 0; spin_unlock(&p->sighand->siglock); - if (p->ptrace & PT_PTRACED) - do_notify_parent_cldstop(p, p->parent, - CLD_CONTINUED); - else - do_notify_parent_cldstop( - p->group_leader, - p->group_leader->real_parent, - CLD_CONTINUED); + do_notify_parent_cldstop(p, CLD_CONTINUED); spin_lock(&p->sighand->siglock); } else { /* @@ -816,7 +722,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, * fast-pathed signals for kernel-internal things like SIGSTOP * or SIGKILL. */ - if ((unsigned long)info == 2) + if (info == SEND_SIG_FORCED) goto out_set; /* Real-time signals must be queued if sent by sigqueue, or @@ -828,19 +734,19 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, pass on the info struct. */ q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && - ((unsigned long) info < 2 || + (is_si_special(info) || info->si_code >= 0))); if (q) { list_add_tail(&q->list, &signals->list); switch ((unsigned long) info) { - case 0: + case (unsigned long) SEND_SIG_NOINFO: q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; q->info.si_pid = current->pid; q->info.si_uid = current->uid; break; - case 1: + case (unsigned long) SEND_SIG_PRIV: q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_KERNEL; @@ -851,20 +757,13 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, copy_siginfo(&q->info, info); break; } - } else { - if (sig >= SIGRTMIN && info && (unsigned long)info != 1 - && info->si_code != SI_USER) + } else if (!is_si_special(info)) { + if (sig >= SIGRTMIN && info->si_code != SI_USER) /* * Queue overflow, abort. We may abort if the signal was rt * and sent by user using something other than kill(). */ return -EAGAIN; - if (((unsigned long)info > 1) && (info->si_code == SI_TIMER)) - /* - * Set up a return to indicate that we dropped - * the signal. - */ - ret = info->si_sys_private; } out_set: @@ -875,22 +774,46 @@ out_set: #define LEGACY_QUEUE(sigptr, sig) \ (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) +int print_fatal_signals = 0; + +static void print_fatal_signal(struct pt_regs *regs, int signr) +{ + printk("%s/%d: potentially unexpected fatal signal %d.\n", + current->comm, current->pid, signr); + +#ifdef __i386__ + printk("code at %08lx: ", regs->eip); + { + int i; + for (i = 0; i < 16; i++) { + unsigned char insn; + + __get_user(insn, (unsigned char *)(regs->eip + i)); + printk("%02x ", insn); + } + } +#endif + printk("\n"); + show_regs(regs); +} + +static int __init setup_print_fatal_signals(char *str) +{ + get_option (&str, &print_fatal_signals); + + return 1; +} + +__setup("print-fatal-signals=", setup_print_fatal_signals); static int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) { int ret = 0; - if (!irqs_disabled()) - BUG(); + BUG_ON(!irqs_disabled()); assert_spin_locked(&t->sighand->siglock); - if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) - /* - * Set up a return to indicate that we dropped the signal. - */ - ret = info->si_sys_private; - /* Short-circuit ignored signals. */ if (sig_ignored(t, sig)) goto out; @@ -920,11 +843,13 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) int ret; spin_lock_irqsave(&t->sighand->siglock, flags); - if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { + if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; + } + if (sigismember(&t->blocked, sig)) { sigdelset(&t->blocked, sig); - recalc_sigpending_tsk(t); } + recalc_sigpending_tsk(t); ret = specific_send_sig_info(sig, info, t); spin_unlock_irqrestore(&t->sighand->siglock, flags); @@ -934,15 +859,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) void force_sig_specific(int sig, struct task_struct *t) { - unsigned long int flags; - - spin_lock_irqsave(&t->sighand->siglock, flags); - if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) - t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; - sigdelset(&t->blocked, sig); - recalc_sigpending_tsk(t); - specific_send_sig_info(sig, (void *)2, t); - spin_unlock_irqrestore(&t->sighand->siglock, flags); + force_sig_info(sig, SEND_SIG_FORCED, t); } /* @@ -953,34 +870,31 @@ force_sig_specific(int sig, struct task_struct *t) * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */ -#define wants_signal(sig, p, mask) \ - (!sigismember(&(p)->blocked, sig) \ - && !((p)->state & mask) \ - && !((p)->flags & PF_EXITING) \ - && (task_curr(p) || !signal_pending(p))) - +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (p->state & (TASK_STOPPED | TASK_TRACED)) + return 0; + return task_curr(p) || !signal_pending(p); +} static void __group_complete_signal(int sig, struct task_struct *p) { - unsigned int mask; struct task_struct *t; - /* - * Don't bother traced and stopped tasks (but - * SIGKILL will punch through that). - */ - mask = TASK_STOPPED | TASK_TRACED; - if (sig == SIGKILL) - mask = 0; - /* * Now find a thread we can wake up to take the signal off the queue. * * If the main thread wants the signal, it gets first crack. * Probably the least surprising to the average bear. */ - if (wants_signal(sig, p, mask)) + if (wants_signal(sig, p)) t = p; else if (thread_group_empty(p)) /* @@ -996,9 +910,8 @@ __group_complete_signal(int sig, struct task_struct *p) if (t == NULL) /* restart balancing at this thread */ t = p->signal->curr_target = p; - BUG_ON(t->tgid != p->tgid); - while (!wants_signal(sig, t, mask)) { + while (!wants_signal(sig, t)) { t = next_thread(t); if (t == p->signal->curr_target) /* @@ -1080,12 +993,6 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) assert_spin_locked(&p->sighand->siglock); handle_stop_signal(sig, p); - if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) - /* - * Set up a return to indicate that we dropped the signal. - */ - ret = info->si_sys_private; - /* Short-circuit ignored signals. */ if (sig_ignored(p, sig)) return ret; @@ -1138,25 +1045,46 @@ void zap_other_threads(struct task_struct *p) if (t != p->group_leader) t->exit_signal = -1; + /* SIGKILL will be handled before any pending SIGSTOP */ sigaddset(&t->pending.signal, SIGKILL); - rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); signal_wake_up(t, 1); } } /* - * Must be called with the tasklist_lock held for reading! + * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ +struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags) +{ + struct sighand_struct *sighand; + + for (;;) { + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) + break; + + spin_lock_irqsave(&sighand->siglock, *flags); + if (likely(sighand == tsk->sighand)) + break; + spin_unlock_irqrestore(&sighand->siglock, *flags); + } + + return sighand; +} + int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { unsigned long flags; int ret; ret = check_kill_permission(sig, info, p); - if (!ret && sig && p->sighand) { - spin_lock_irqsave(&p->sighand->siglock, flags); - ret = __group_send_sig_info(sig, info, p); - spin_unlock_irqrestore(&p->sighand->siglock, flags); + + if (!ret && sig) { + ret = -ESRCH; + if (lock_task_sighand(p, &flags)) { + ret = __group_send_sig_info(sig, info, p); + unlock_task_sighand(p, &flags); + } } return ret; @@ -1201,17 +1129,57 @@ int kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; + int acquired_tasklist_lock = 0; struct task_struct *p; - read_lock(&tasklist_lock); + rcu_read_lock(); + if (unlikely(sig_needs_tasklist(sig))) { + read_lock(&tasklist_lock); + acquired_tasklist_lock = 1; + } p = find_task_by_pid(pid); error = -ESRCH; if (p) error = group_send_sig_info(sig, info, p); - read_unlock(&tasklist_lock); + if (unlikely(acquired_tasklist_lock)) + read_unlock(&tasklist_lock); + rcu_read_unlock(); return error; } +/* like kill_proc_info(), but doesn't use uid/euid of "current" */ +int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, + uid_t uid, uid_t euid) +{ + int ret = -EINVAL; + struct task_struct *p; + + if (!valid_signal(sig)) + return ret; + + read_lock(&tasklist_lock); + p = find_task_by_pid(pid); + if (!p) { + ret = -ESRCH; + goto out_unlock; + } + if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) + && (euid != p->suid) && (euid != p->uid) + && (uid != p->suid) && (uid != p->uid)) { + ret = -EPERM; + goto out_unlock; + } + if (sig && p->sighand) { + unsigned long flags; + spin_lock_irqsave(&p->sighand->siglock, flags); + ret = __group_send_sig_info(sig, info, p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +out_unlock: + read_unlock(&tasklist_lock); + return ret; +} +EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); /* * kill_something_info() interprets pid in interesting ways just like kill(2). @@ -1281,10 +1249,13 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p) return ret; } +#define __si_special(priv) \ + ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) + int send_sig(int sig, struct task_struct *p, int priv) { - return send_sig_info(sig, (void*)(long)(priv != 0), p); + return send_sig_info(sig, __si_special(priv), p); } /* @@ -1304,7 +1275,7 @@ send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) void force_sig(int sig, struct task_struct *p) { - force_sig_info(sig, (void*)1L, p); + force_sig_info(sig, SEND_SIG_PRIV, p); } /* @@ -1329,13 +1300,13 @@ force_sigsegv(int sig, struct task_struct *p) int kill_pg(pid_t pgrp, int sig, int priv) { - return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp); + return kill_pg_info(sig, __si_special(priv), pgrp); } int kill_proc(pid_t pid, int sig, int priv) { - return kill_proc_info(sig, (void *)(long)(priv != 0), pid); + return kill_proc_info(sig, __si_special(priv), pid); } /* @@ -1366,59 +1337,66 @@ void sigqueue_free(struct sigqueue *q) * pending queue. */ if (unlikely(!list_empty(&q->list))) { - read_lock(&tasklist_lock); - spin_lock_irqsave(q->lock, flags); + spinlock_t *lock = ¤t->sighand->siglock; + read_lock(&tasklist_lock); + spin_lock_irqsave(lock, flags); if (!list_empty(&q->list)) list_del_init(&q->list); - spin_unlock_irqrestore(q->lock, flags); + spin_unlock_irqrestore(lock, flags); read_unlock(&tasklist_lock); } q->flags &= ~SIGQUEUE_PREALLOC; __sigqueue_free(q); } -int -send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) +int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) { unsigned long flags; int ret = 0; + BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + /* - * We need the tasklist lock even for the specific - * thread case (when we don't need to follow the group - * lists) in order to avoid races with "p->sighand" - * going away or changing from under us. + * The rcu based delayed sighand destroy makes it possible to + * run this without tasklist lock held. The task struct itself + * cannot go away as create_timer did get_task_struct(). + * + * We return -1, when the task is marked exiting, so + * posix_timer_event can redirect it to the group leader */ - BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); - read_lock(&tasklist_lock); - spin_lock_irqsave(&p->sighand->siglock, flags); - + rcu_read_lock(); + + if (!likely(lock_task_sighand(p, &flags))) { + ret = -1; + goto out_err; + } + if (unlikely(!list_empty(&q->list))) { /* * If an SI_TIMER entry is already queue just increment * the overrun count. */ - if (q->info.si_code != SI_TIMER) - BUG(); + BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; goto out; - } + } /* Short-circuit ignored signals. */ if (sig_ignored(p, sig)) { ret = 1; goto out; } - q->lock = &p->sighand->siglock; list_add_tail(&q->list, &p->pending.list); sigaddset(&p->pending.signal, sig); if (!sigismember(&p->blocked, sig)) signal_wake_up(p, sig == SIGKILL); out: - spin_unlock_irqrestore(&p->sighand->siglock, flags); - read_unlock(&tasklist_lock); - return(ret); + unlock_task_sighand(p, &flags); +out_err: + rcu_read_unlock(); + + return ret; } int @@ -1428,7 +1406,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) int ret = 0; BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); + read_lock(&tasklist_lock); + /* Since it_lock is held, p->sighand cannot be NULL. */ spin_lock_irqsave(&p->sighand->siglock, flags); handle_stop_signal(sig, p); @@ -1444,8 +1424,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) * the overrun count. Other uses should not try to * send the signal multiple times. */ - if (q->info.si_code != SI_TIMER) - BUG(); + BUG_ON(q->info.si_code != SI_TIMER); q->info.si_overrun++; goto out; } @@ -1455,7 +1434,6 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) * We always use the shared queue for process-wide signals, * to avoid several races. */ - q->lock = &p->sighand->siglock; list_add_tail(&q->list, &p->signal->shared_pending.list); sigaddset(&p->signal->shared_pending.signal, sig); @@ -1463,7 +1441,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) out: spin_unlock_irqrestore(&p->sighand->siglock, flags); read_unlock(&tasklist_lock); - return(ret); + return ret; } /* @@ -1517,7 +1495,7 @@ void do_notify_parent(struct task_struct *tsk, int sig) psig = tsk->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); - if (sig == SIGCHLD && + if (!tsk->ptrace && sig == SIGCHLD && (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { /* @@ -1545,14 +1523,20 @@ void do_notify_parent(struct task_struct *tsk, int sig) spin_unlock_irqrestore(&psig->siglock, flags); } -static void -do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent, - int why) +static void do_notify_parent_cldstop(struct task_struct *tsk, int why) { struct siginfo info; unsigned long flags; + struct task_struct *parent; struct sighand_struct *sighand; + if (tsk->ptrace & PT_PTRACED) + parent = tsk->parent; + else { + tsk = tsk->group_leader; + parent = tsk->real_parent; + } + info.si_signo = SIGCHLD; info.si_errno = 0; info.si_pid = tsk->pid; @@ -1615,14 +1599,14 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) /* Let the debugger run. */ set_current_state(TASK_TRACED); spin_unlock_irq(¤t->sighand->siglock); + try_to_freeze(); read_lock(&tasklist_lock); if (likely(current->ptrace & PT_PTRACED) && likely(current->parent != current->real_parent || !(current->ptrace & PT_ATTACHED)) && (likely(current->parent->signal != current->signal) || !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { - do_notify_parent_cldstop(current, current->parent, - CLD_TRAPPED); + do_notify_parent_cldstop(current, CLD_TRAPPED); read_unlock(&tasklist_lock); schedule(); } else { @@ -1676,17 +1660,9 @@ finish_stop(int stop_count) * a group stop in progress and we are the last to stop, * report to the parent. When ptraced, every thread reports itself. */ - if (stop_count < 0 || (current->ptrace & PT_PTRACED)) { + if (stop_count == 0 || (current->ptrace & PT_PTRACED)) { read_lock(&tasklist_lock); - do_notify_parent_cldstop(current, current->parent, - CLD_STOPPED); - read_unlock(&tasklist_lock); - } - else if (stop_count == 0) { - read_lock(&tasklist_lock); - do_notify_parent_cldstop(current->group_leader, - current->group_leader->real_parent, - CLD_STOPPED); + do_notify_parent_cldstop(current, CLD_STOPPED); read_unlock(&tasklist_lock); } @@ -1703,12 +1679,10 @@ finish_stop(int stop_count) * Returns nonzero if we've actually stopped and released the siglock. * Returns zero if we didn't stop and still hold the siglock. */ -static int -do_signal_stop(int signr) +static int do_signal_stop(int signr) { struct signal_struct *sig = current->signal; - struct sighand_struct *sighand = current->sighand; - int stop_count = -1; + int stop_count; if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) return 0; @@ -1718,85 +1692,37 @@ do_signal_stop(int signr) * There is a group stop in progress. We don't need to * start another one. */ - signr = sig->group_exit_code; stop_count = --sig->group_stop_count; - current->exit_code = signr; - set_current_state(TASK_STOPPED); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - spin_unlock_irq(&sighand->siglock); - } - else if (thread_group_empty(current)) { - /* - * Lock must be held through transition to stopped state. - */ - current->exit_code = current->signal->group_exit_code = signr; - set_current_state(TASK_STOPPED); - sig->flags = SIGNAL_STOP_STOPPED; - spin_unlock_irq(&sighand->siglock); - } - else { + } else { /* * There is no group stop already in progress. - * We must initiate one now, but that requires - * dropping siglock to get both the tasklist lock - * and siglock again in the proper order. Note that - * this allows an intervening SIGCONT to be posted. - * We need to check for that and bail out if necessary. + * We must initiate one now. */ struct task_struct *t; - spin_unlock_irq(&sighand->siglock); + sig->group_exit_code = signr; - /* signals can be posted during this window */ - - read_lock(&tasklist_lock); - spin_lock_irq(&sighand->siglock); - - if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) { + stop_count = 0; + for (t = next_thread(current); t != current; t = next_thread(t)) /* - * Another stop or continue happened while we - * didn't have the lock. We can just swallow this - * signal now. If we raced with a SIGCONT, that - * should have just cleared it now. If we raced - * with another processor delivering a stop signal, - * then the SIGCONT that wakes us up should clear it. + * Setting state to TASK_STOPPED for a group + * stop is always done with the siglock held, + * so this check has no races. */ - read_unlock(&tasklist_lock); - return 0; - } - - if (sig->group_stop_count == 0) { - sig->group_exit_code = signr; - stop_count = 0; - for (t = next_thread(current); t != current; - t = next_thread(t)) - /* - * Setting state to TASK_STOPPED for a group - * stop is always done with the siglock held, - * so this check has no races. - */ - if (t->state < TASK_STOPPED) { - stop_count++; - signal_wake_up(t, 0); - } - sig->group_stop_count = stop_count; - } - else { - /* A race with another thread while unlocked. */ - signr = sig->group_exit_code; - stop_count = --sig->group_stop_count; - } - - current->exit_code = signr; - set_current_state(TASK_STOPPED); - if (stop_count == 0) - sig->flags = SIGNAL_STOP_STOPPED; - - spin_unlock_irq(&sighand->siglock); - read_unlock(&tasklist_lock); + if (!t->exit_state && + !(t->state & (TASK_STOPPED|TASK_TRACED))) { + stop_count++; + signal_wake_up(t, 0); + } + sig->group_stop_count = stop_count; } + if (stop_count == 0) + sig->flags = SIGNAL_STOP_STOPPED; + current->exit_code = sig->group_exit_code; + __set_current_state(TASK_STOPPED); + + spin_unlock_irq(¤t->sighand->siglock); finish_stop(stop_count); return 1; } @@ -1807,7 +1733,7 @@ do_signal_stop(int signr) * We return zero if we still hold the siglock and should look * for another signal without checking group_stop_count again. */ -static inline int handle_group_stop(void) +static int handle_group_stop(void) { int stop_count; @@ -1848,6 +1774,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, sigset_t *mask = ¤t->blocked; int signr = 0; + try_to_freeze(); + relock: spin_lock_irq(¤t->sighand->siglock); for (;;) { @@ -1862,6 +1790,11 @@ relock: if (!signr) break; /* will return 0 */ + if ((signr == SIGSEGV) && print_fatal_signals) { + spin_unlock_irq(¤t->sighand->siglock); + print_fatal_signal(regs, signr); + spin_lock_irq(¤t->sighand->siglock); + } if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { ptrace_signal_deliver(regs, cookie); @@ -1914,7 +1847,7 @@ relock: continue; /* Init gets no signals it doesn't want. */ - if (current->pid == 1) + if (current == child_reaper) continue; /* virtual init is protected against user signals */ @@ -1962,6 +1895,8 @@ relock: * Anything else is fatal, maybe with a core dump. */ current->flags |= PF_SIGNALED; + if (print_fatal_signals) + print_fatal_signal(regs, signr); if (sig_kernel_coredump(signr)) { /* * If it was able to dump core, this kills all @@ -2030,10 +1965,11 @@ long do_no_restart_syscall(struct restart_block *param) int sigprocmask(int how, sigset_t *set, sigset_t *oldset) { int error; - sigset_t old_block; spin_lock_irq(¤t->sighand->siglock); - old_block = current->blocked; + if (oldset) + *oldset = current->blocked; + error = 0; switch (how) { case SIG_BLOCK: @@ -2050,8 +1986,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) } recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - if (oldset) - *oldset = old_block; + return error; } @@ -2236,11 +2171,8 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese, recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - current->state = TASK_INTERRUPTIBLE; - timeout = schedule_timeout(timeout); + timeout = schedule_timeout_interruptible(timeout); - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); spin_lock_irq(¤t->sighand->siglock); sig = dequeue_signal(current, &these, &info); current->blocked = current->real_blocked; @@ -2279,26 +2211,13 @@ sys_kill(int pid, int sig) return kill_something_info(sig, &info, pid); } -/** - * sys_tgkill - send signal to one specific thread - * @tgid: the thread group ID of the thread - * @pid: the PID of the thread - * @sig: signal to be sent - * - * This syscall also checks the tgid and returns -ESRCH even if the PID - * exists but it's not belonging to the target process anymore. This - * method solves the problem of threads exiting and PIDs getting reused. - */ -asmlinkage long sys_tgkill(int tgid, int pid, int sig) +static int do_tkill(int tgid, int pid, int sig) { - struct siginfo info; int error; + struct siginfo info; struct task_struct *p; - /* This is only valid for single tasks */ - if (pid <= 0 || tgid <= 0) - return -EINVAL; - + error = -ESRCH; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_TKILL; @@ -2307,8 +2226,7 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) read_lock(&tasklist_lock); p = find_task_by_pid(pid); - error = -ESRCH; - if (p && (p->tgid == tgid)) { + if (p && (tgid <= 0 || p->tgid == tgid)) { error = check_kill_permission(sig, &info, p); /* * The null signal is a permissions and process existence @@ -2322,47 +2240,40 @@ asmlinkage long sys_tgkill(int tgid, int pid, int sig) } } read_unlock(&tasklist_lock); + return error; } +/** + * sys_tgkill - send signal to one specific thread + * @tgid: the thread group ID of the thread + * @pid: the PID of the thread + * @sig: signal to be sent + * + * This syscall also checks the tgid and returns -ESRCH even if the PID + * exists but it's not belonging to the target process anymore. This + * method solves the problem of threads exiting and PIDs getting reused. + */ +asmlinkage long sys_tgkill(int tgid, int pid, int sig) +{ + /* This is only valid for single tasks */ + if (pid <= 0 || tgid <= 0) + return -EINVAL; + + return do_tkill(tgid, pid, sig); +} + /* * Send a signal to only one task, even if it's a CLONE_THREAD task. */ asmlinkage long sys_tkill(int pid, int sig) { - struct siginfo info; - int error; - struct task_struct *p; - /* This is only valid for single tasks */ if (pid <= 0) return -EINVAL; - info.si_signo = sig; - info.si_errno = 0; - info.si_code = SI_TKILL; - info.si_pid = current->tgid; - info.si_uid = current->uid; - - read_lock(&tasklist_lock); - p = find_task_by_pid(pid); - error = -ESRCH; - if (p) { - error = check_kill_permission(sig, &info, p); - /* - * The null signal is a permissions and process existence - * probe. No signal is actually delivered. - */ - if (!error && sig && p->sighand) { - spin_lock_irq(&p->sighand->siglock); - handle_stop_signal(sig, p); - error = specific_send_sig_info(sig, &info, p); - spin_unlock_irq(&p->sighand->siglock); - } - } - read_unlock(&tasklist_lock); - return error; + return do_tkill(0, pid, sig); } asmlinkage long @@ -2383,10 +2294,10 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) return kill_proc_info(sig, &info, pid); } -int -do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) +int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) { struct k_sigaction *k; + sigset_t mask; if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) return -EINVAL; @@ -2407,6 +2318,9 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) *oact = *k; if (act) { + sigdelsetmask(&act->sa.sa_mask, + sigmask(SIGKILL) | sigmask(SIGSTOP)); + *k = *act; /* * POSIX 3.3.1.3: * "Setting a signal action to SIG_IGN for a signal that is @@ -2419,35 +2333,17 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) * be discarded, whether or not it is blocked" */ if (act->sa.sa_handler == SIG_IGN || - (act->sa.sa_handler == SIG_DFL && - sig_kernel_ignore(sig))) { - /* - * This is a fairly rare case, so we only take the - * tasklist_lock once we're sure we'll need it. - * Now we must do this little unlock and relock - * dance to maintain the lock hierarchy. - */ + (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) { struct task_struct *t = current; - spin_unlock_irq(&t->sighand->siglock); - read_lock(&tasklist_lock); - spin_lock_irq(&t->sighand->siglock); - *k = *act; - sigdelsetmask(&k->sa.sa_mask, - sigmask(SIGKILL) | sigmask(SIGSTOP)); - rm_from_queue(sigmask(sig), &t->signal->shared_pending); + sigemptyset(&mask); + sigaddset(&mask, sig); + rm_from_queue_full(&mask, &t->signal->shared_pending); do { - rm_from_queue(sigmask(sig), &t->pending); + rm_from_queue_full(&mask, &t->pending); recalc_sigpending_tsk(t); t = next_thread(t); } while (t != current); - spin_unlock_irq(¤t->sighand->siglock); - read_unlock(&tasklist_lock); - return 0; } - - *k = *act; - sigdelsetmask(&k->sa.sa_mask, - sigmask(SIGKILL) | sigmask(SIGSTOP)); } spin_unlock_irq(¤t->sighand->siglock); @@ -2653,6 +2549,7 @@ sys_signal(int sig, __sighandler_t handler) new_sa.sa.sa_handler = handler; new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; + sigemptyset(&new_sa.sa.sa_mask); ret = do_sigaction(sig, &new_sa, &old_sa); @@ -2672,6 +2569,32 @@ sys_pause(void) #endif +#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND +asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) +{ + sigset_t newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); + + spin_lock_irq(¤t->sighand->siglock); + current->saved_sigmask = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} +#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ + void __init signals_init(void) { sigqueue_cachep =