* Copyright (C) 1991, 1992 Linus Torvalds
*/
+#include <linux/config.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/file.h>
#include <linux/binfmts.h>
#include <linux/ptrace.h>
-#include <linux/tracehook.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/mempolicy.h>
-#include <linux/taskstats_kern.h>
-#include <linux/delayacct.h>
#include <linux/cpuset.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
-#include <linux/posix-timers.h>
#include <linux/cn_proc.h>
#include <linux/mutex.h>
-#include <linux/futex.h>
-#include <linux/compat.h>
-#include <linux/pipe_fs_i.h>
-#include <linux/audit.h> /* for audit_free() */
-#include <linux/resource.h>
#include <linux/vs_limit.h>
#include <linux/vs_context.h>
#include <linux/vs_network.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
extern void sem_exit (void);
extern struct task_struct *child_reaper;
+int getrusage(struct task_struct *, int, struct rusage __user *);
+
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
+ detach_pid(p, PIDTYPE_TGID);
if (thread_group_leader(p)) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
-
- list_del_rcu(&p->tasks);
- __get_cpu_var(process_counts)--;
- }
- list_del_rcu(&p->thread_group);
- remove_parent(p);
-}
-
-/*
- * This function expects the tasklist_lock write-locked.
- */
-static void __exit_signal(struct task_struct *tsk)
-{
- struct signal_struct *sig = tsk->signal;
- struct sighand_struct *sighand;
-
- BUG_ON(!sig);
- BUG_ON(!atomic_read(&sig->count));
-
- rcu_read_lock();
- sighand = rcu_dereference(tsk->sighand);
- spin_lock(&sighand->siglock);
-
- posix_cpu_timers_exit(tsk);
- if (atomic_dec_and_test(&sig->count))
- posix_cpu_timers_exit_group(tsk);
- else {
- /*
- * If there is any task waiting for the group exit
- * then notify it:
- */
- if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
- wake_up_process(sig->group_exit_task);
- sig->group_exit_task = NULL;
- }
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
- /*
- * Accumulate here the counters for all threads but the
- * group leader as they die, so they can be added into
- * the process-wide totals when those are taken.
- * The group leader stays around as a zombie as long
- * as there are other threads. When it gets reaped,
- * the exit.c code will add its counts into these totals.
- * We won't ever get here for the group leader, since it
- * will have been the last reference on the signal_struct.
- */
- sig->utime = cputime_add(sig->utime, tsk->utime);
- sig->stime = cputime_add(sig->stime, tsk->stime);
- sig->min_flt += tsk->min_flt;
- sig->maj_flt += tsk->maj_flt;
- sig->nvcsw += tsk->nvcsw;
- sig->nivcsw += tsk->nivcsw;
- sig->sched_time += tsk->sched_time;
- sig = NULL; /* Marker for below. */
- }
-
- __unhash_process(tsk);
-
- tsk->signal = NULL;
- tsk->sighand = NULL;
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
-
- __cleanup_sighand(sighand);
- clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
- flush_sigqueue(&tsk->pending);
- if (sig) {
- flush_sigqueue(&sig->shared_pending);
- __cleanup_signal(sig);
+ if (p->pid)
+ __get_cpu_var(process_counts)--;
}
-}
-static void delayed_put_task_struct(struct rcu_head *rhp)
-{
- put_task_struct(container_of(rhp, struct task_struct, rcu));
+ REMOVE_LINKS(p);
}
void release_task(struct task_struct * p)
{
- struct task_struct *leader;
int zap_leader;
-repeat:
- tracehook_release_task(p);
+ task_t *leader;
+ struct dentry *proc_dentry;
+
+repeat:
atomic_dec(&p->user->processes);
+ spin_lock(&p->proc_lock);
+ proc_dentry = proc_pid_unhash(p);
write_lock_irq(&tasklist_lock);
- BUG_ON(tracehook_check_released(p));
+ if (unlikely(p->ptrace))
+ __ptrace_unlink(p);
+ BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
+ /*
+ * Note that the fastpath in sys_times depends on __exit_signal having
+ * updated the counters before a task is removed from the tasklist of
+ * the process by __unhash_process.
+ */
+ __unhash_process(p);
/*
* If we are the last non-leader member of the thread
sched_exit(p);
write_unlock_irq(&tasklist_lock);
- proc_flush_task(p);
+ spin_unlock(&p->proc_lock);
+ proc_pid_flush(proc_dentry);
release_thread(p);
- call_rcu(&p->rcu, delayed_put_task_struct);
+ put_task_struct(p);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
+/* we are using it only for SMP init */
+
+void unhash_process(struct task_struct *p)
+{
+ struct dentry *proc_dentry;
+
+ spin_lock(&p->proc_lock);
+ proc_dentry = proc_pid_unhash(p);
+ write_lock_irq(&tasklist_lock);
+ __unhash_process(p);
+ write_unlock_irq(&tasklist_lock);
+ spin_unlock(&p->proc_lock);
+ proc_pid_flush(proc_dentry);
+}
+
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
-static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)
+static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
{
struct task_struct *p;
int ret = 1;
do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
if (p == ignored_task
|| p->exit_state
- || p->parent->pid == 1)
+ || p->real_parent->pid == 1)
continue;
- if (process_group(p->parent) != pgrp
- && p->parent->signal->session == p->signal->session) {
+ if (process_group(p->real_parent) != pgrp
+ && p->real_parent->signal->session == p->signal->session) {
ret = 0;
break;
}
if (p->state != TASK_STOPPED)
continue;
+ /* If p is stopped by a debugger on a signal that won't
+ stop it, then don't count p as stopped. This isn't
+ perfect but it's a good approximation. */
+ if (unlikely (p->ptrace)
+ && p->exit_code != SIGSTOP
+ && p->exit_code != SIGTSTP
+ && p->exit_code != SIGTTOU
+ && p->exit_code != SIGTTIN)
+ continue;
+
retval = 1;
break;
} while_each_task_pid(pgrp, PIDTYPE_PGID, p);
{
write_lock_irq(&tasklist_lock);
+ ptrace_unlink(current);
/* Reparent to init */
- remove_parent(current);
+ REMOVE_LINKS(current);
current->parent = child_reaper;
- add_parent(current);
+ current->real_parent = child_reaper;
+ SET_LINKS(current);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
exit_mm(current);
set_special_pids(1, 1);
- proc_clear_tty(current);
+ down(&tty_sem);
+ current->signal->tty = NULL;
+ up(&tty_sem);
/* Block and flush all signals */
sigfillset(&blocked);
}
i++;
set >>= 1;
- cond_resched();
}
}
}
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
- BUG_ON(mm != tsk->active_mm);
+ if (mm != tsk->active_mm) BUG();
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
mmput(mm);
}
-static inline void
-choose_new_parent(struct task_struct *p, struct task_struct *reaper)
+static inline void choose_new_parent(task_t *p, task_t *reaper)
{
/* check for reaper context */
vxwprintk((p->xid != reaper->xid) && (reaper != child_reaper),
* Make sure we're not reparenting to ourselves and that
* the parent is not a zombie.
*/
- BUG_ON(p == reaper || reaper->exit_state);
- p->parent = reaper;
+ BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
+ p->real_parent = reaper;
}
-static void
-reparent_thread(struct task_struct *p, struct task_struct *father)
+static void reparent_thread(task_t *p, task_t *father, int traced)
{
/* We don't want people slaying init. */
if (p->exit_signal != -1)
group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
/* Move the child from its dying parent to the new one. */
- list_move_tail(&p->sibling, &p->parent->children);
+ if (unlikely(traced)) {
+ /* Preserve ptrace links if someone else is tracing this child. */
+ list_del_init(&p->ptrace_list);
+ if (p->parent != p->real_parent)
+ list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
+ } else {
+ /* If this child is being traced, then we're the one tracing it
+ * anyway, so let go of it.
+ */
+ p->ptrace = 0;
+ list_del_init(&p->sibling);
+ p->parent = p->real_parent;
+ list_add_tail(&p->sibling, &p->parent->children);
- /* If we'd notified the old parent about this child's death,
- * also notify the new parent.
- */
- if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
- thread_group_empty(p))
- do_notify_parent(p, p->exit_signal);
+ /* If we'd notified the old parent about this child's death,
+ * also notify the new parent.
+ */
+ if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+ thread_group_empty(p))
+ do_notify_parent(p, p->exit_signal);
+ else if (p->state == TASK_TRACED) {
+ /*
+ * If it was at a trace stop, turn it into
+ * a normal stop since it's no longer being
+ * traced.
+ */
+ ptrace_untrace(p);
+ }
+ }
/*
* process group orphan check
* group, and if no such member exists, give it to
* the global child reaper process (ie "init")
*/
-static void
-forget_original_parent(struct task_struct *father)
+static void forget_original_parent(struct task_struct * father,
+ struct list_head *to_release)
{
struct task_struct *p, *reaper = father;
struct list_head *_p, *_n;
}
} while (reaper->exit_state);
+ /*
+ * There are only two places where our children can be:
+ *
+ * - in our child list
+ * - in our ptraced child list
+ *
+ * Search them and reparent children.
+ */
list_for_each_safe(_p, _n, &father->children) {
- p = list_entry(_p, struct task_struct, sibling);
- choose_new_parent(p, vx_child_reaper(p));
- reparent_thread(p, father);
+ int ptrace;
+ p = list_entry(_p,struct task_struct,sibling);
+
+ ptrace = p->ptrace;
+
+ /* if father isn't the real parent, then ptrace must be enabled */
+ BUG_ON(father != p->real_parent && !ptrace);
+
+ if (father == p->real_parent) {
+ /* reparent with a reaper, real father it's us */
+ choose_new_parent(p, vx_child_reaper(p));
+ reparent_thread(p, father, 0);
+ } else {
+ /* reparent ptraced task to its real parent */
+ __ptrace_unlink (p);
+ if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+ thread_group_empty(p))
+ do_notify_parent(p, p->exit_signal);
+ }
+
+ /*
+ * if the ptraced child is a zombie with exit_signal == -1
+ * we must collect it before we exit, or it will remain
+ * zombie forever since we prevented it from self-reap itself
+ * while it was being traced by us, to be able to see it in wait4.
+ */
+ if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
+ list_add(&p->ptrace_list, to_release);
+ }
+ list_for_each_safe(_p, _n, &father->ptrace_children) {
+ p = list_entry(_p,struct task_struct,ptrace_list);
+
+ choose_new_parent(p, reaper);
+ reparent_thread(p, father, 1);
}
}
{
int state;
struct task_struct *t;
- int noreap;
- void *cookie;
+ struct list_head ptrace_dead, *_p, *_n;
if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
&& !thread_group_empty(tsk)) {
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
- forget_original_parent(tsk);
+ INIT_LIST_HEAD(&ptrace_dead);
+ forget_original_parent(tsk, &ptrace_dead);
BUG_ON(!list_empty(&tsk->children));
+ BUG_ON(!list_empty(&tsk->ptrace_children));
/*
* Check to see if any process groups have become orphaned
* is about to become orphaned.
*/
- t = tsk->parent;
+ t = tsk->real_parent;
if ((process_group(t) != process_group(tsk)) &&
(t->signal->session == tsk->signal->session) &&
&& !capable(CAP_KILL))
tsk->exit_signal = SIGCHLD;
- if (!tracehook_notify_death(tsk, &noreap, &cookie)
- && tsk->exit_signal != -1 && thread_group_empty(tsk))
- do_notify_parent(tsk, tsk->exit_signal);
+
+ /* If something other than our normal parent is ptracing us, then
+ * send it a SIGCHLD instead of honoring exit_signal. exit_signal
+ * only has special meaning to our real parent.
+ */
+ if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
+ int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
+ do_notify_parent(tsk, signal);
+ } else if (tsk->ptrace) {
+ do_notify_parent(tsk, SIGCHLD);
+ }
state = EXIT_ZOMBIE;
- if (tsk->exit_signal == -1 && !noreap)
+ if (tsk->exit_signal == -1 &&
+ (likely(tsk->ptrace == 0) ||
+ unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT)))
state = EXIT_DEAD;
tsk->exit_state = state;
write_unlock_irq(&tasklist_lock);
- tracehook_report_death(tsk, state, cookie);
+ list_for_each_safe(_p, _n, &ptrace_dead) {
+ list_del_init(_p);
+ t = list_entry(_p,struct task_struct,ptrace_list);
+ release_task(t);
+ }
/* If the process is dead, release it - nobody will wait for it */
if (state == EXIT_DEAD)
fastcall NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
- struct taskstats *tidstats;
int group_dead;
- unsigned int mycpu;
profile_task_exit(tsk);
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
- if (unlikely(tsk == child_reaper))
+ if (unlikely(tsk->pid == 1))
panic("Attempted to kill init!");
+ if (tsk->io_context)
+ exit_io_context();
- tracehook_report_exit(&code);
+ if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
+ current->ptrace_message = code;
+ ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
+ }
/*
* We're taking recursive faults here in do_exit. Safest is to just
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
- if (tsk->io_context)
- exit_io_context();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
tsk->flags |= PF_EXITING;
- ptrace_exit(tsk);
-
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
preempt_count());
- taskstats_exit_alloc(&tidstats, &mycpu);
-
acct_update_integrals(tsk);
if (tsk->mm) {
update_hiwater_rss(tsk->mm);
if (group_dead) {
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
+ acct_process(code);
}
-
- if (current->tux_info) {
-#ifdef CONFIG_TUX_DEBUG
- printk("Possibly unexpected TUX-thread exit(%ld) at %p?\n",
- code, __builtin_return_address(0));
-#endif
- current->tux_exit();
- }
-
- acct_collect(code, group_dead);
- if (unlikely(tsk->robust_list))
- exit_robust_list(tsk);
-#if defined(CONFIG_FUTEX) && defined(CONFIG_COMPAT)
- if (unlikely(tsk->compat_robust_list))
- compat_exit_robust_list(tsk);
-#endif
- if (unlikely(tsk->audit_context))
- audit_free(tsk);
- taskstats_exit_send(tsk, tidstats, group_dead, mycpu);
- taskstats_exit_free(tidstats);
-
exit_mm(tsk);
- if (group_dead)
- acct_process();
exit_sem(tsk);
__exit_files(tsk);
__exit_fs(tsk);
tsk->mempolicy = NULL;
#endif
/*
- * This must happen late, after the PID is not
- * hashed anymore:
- */
- if (unlikely(!list_empty(&tsk->pi_state_list)))
- exit_pi_state_list(tsk);
- if (unlikely(current->pi_state_cache))
- kfree(current->pi_state_cache);
- /*
- * Make sure we are holding no locks:
+ * If DEBUG_MUTEXES is on, make sure we are holding no locks:
*/
- debug_check_no_locks_held(tsk);
-
- if (tsk->io_context)
- exit_io_context();
-
- if (tsk->splice_pipe)
- __free_pipe_info(tsk->splice_pipe);
+ mutex_debug_check_no_locks_held(tsk);
/* needs to stay after exit_notify() */
exit_vx_info(tsk, code);
do_exit((error_code&0xff)<<8);
}
+task_t fastcall *next_thread(const task_t *p)
+{
+ return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
+}
+
+EXPORT_SYMBOL(next_thread);
+
/*
* Take down every thread in the group. This is called by fatal signals
* as well as by sys_exit_group (below).
else if (!thread_group_empty(current)) {
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
+ read_lock(&tasklist_lock);
spin_lock_irq(&sighand->siglock);
if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
+ read_unlock(&tasklist_lock);
}
do_exit(exit_code);
do_group_exit((error_code & 0xff) << 8);
}
-static int eligible_child(pid_t pid, int options, struct task_struct *p)
+static int eligible_child(pid_t pid, int options, task_t *p)
{
if (pid > 0) {
if (p->pid != pid)
}
/*
- * Do not consider detached threads.
+ * Do not consider detached threads that are
+ * not ptraced:
*/
- if (p->exit_signal == -1)
+ if (p->exit_signal == -1 && !p->ptrace)
return 0;
/* Wait for all children (clone and not) if __WALL is set;
* Do not consider thread group leaders that are
* in a non-empty thread group:
*/
- if (delay_group_leader(p))
+ if (current->tgid != p->tgid && delay_group_leader(p))
return 2;
if (security_task_wait(p))
return 1;
}
-static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
+static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,
int why, int status,
struct siginfo __user *infop,
struct rusage __user *rusagep)
{
int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0;
-
put_task_struct(p);
if (!retval)
retval = put_user(SIGCHLD, &infop->si_signo);
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_zombie(struct task_struct *p, int noreap,
+static int wait_task_zombie(task_t *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
if (unlikely(p->exit_state != EXIT_ZOMBIE))
return 0;
- if (unlikely(p->exit_signal == -1))
+ if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
return 0;
get_task_struct(p);
read_unlock(&tasklist_lock);
BUG_ON(state != EXIT_DEAD);
return 0;
}
- BUG_ON(p->exit_signal == -1);
+ if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) {
+ /*
+ * This can only happen in a race with a ptraced thread
+ * dying on another processor.
+ */
+ return 0;
+ }
- if (likely(p->signal)) {
+ if (likely(p->real_parent == p->parent) && likely(p->signal)) {
struct signal_struct *psig;
struct signal_struct *sig;
return retval;
}
retval = p->pid;
- release_task(p);
-
+ if (p->real_parent != p->parent) {
+ write_lock_irq(&tasklist_lock);
+ /* Double-check with lock held. */
+ if (p->real_parent != p->parent) {
+ __ptrace_unlink(p);
+ // TODO: is this safe?
+ p->exit_state = EXIT_ZOMBIE;
+ /*
+ * If this is not a detached task, notify the parent.
+ * If it's still not detached after that, don't release
+ * it now.
+ */
+ if (p->exit_signal != -1) {
+ do_notify_parent(p, p->exit_signal);
+ if (p->exit_signal != -1)
+ p = NULL;
+ }
+ }
+ write_unlock_irq(&tasklist_lock);
+ }
+ if (p != NULL)
+ release_task(p);
BUG_ON(!retval);
return retval;
}
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
- int noreap, struct siginfo __user *infop,
+static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap,
+ struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
if (!p->exit_code)
return 0;
- if (delayed_group_leader &&
+ if (delayed_group_leader && !(p->ptrace & PT_PTRACED) &&
p->signal && p->signal->group_stop_count > 0)
/*
* A group stop is in progress and this is the group leader.
if (unlikely(noreap)) {
pid_t pid = p->pid;
uid_t uid = p->uid;
+ int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
exit_code = p->exit_code;
if (unlikely(!exit_code) ||
unlikely(p->state & TASK_TRACED))
goto bail_ref;
- return wait_noreap_copyout(p, pid, uid, CLD_STOPPED,
- (exit_code << 8) | 0x7f,
+ return wait_noreap_copyout(p, pid, uid,
+ why, (exit_code << 8) | 0x7f,
infop, ru);
}
/* move to end of parent's list to avoid starvation */
remove_parent(p);
- add_parent(p);
+ add_parent(p, p->parent);
write_unlock_irq(&tasklist_lock);
if (!retval && infop)
retval = put_user(0, &infop->si_errno);
if (!retval && infop)
- retval = put_user((short)CLD_STOPPED, &infop->si_code);
+ retval = put_user((short)((p->ptrace & PT_PTRACED)
+ ? CLD_TRAPPED : CLD_STOPPED),
+ &infop->si_code);
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
* the lock and this task is uninteresting. If we return nonzero, we have
* released the lock and the system call should return.
*/
-static int wait_task_continued(struct task_struct *p, int noreap,
+static int wait_task_continued(task_t *p, int noreap,
struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
}
+static inline int my_ptrace_child(struct task_struct *p)
+{
+ if (!(p->ptrace & PT_PTRACED))
+ return 0;
+ if (!(p->ptrace & PT_ATTACHED))
+ return 1;
+ /*
+ * This child was PTRACE_ATTACH'd. We should be seeing it only if
+ * we are the attacher. If we are the real parent, this is a race
+ * inside ptrace_attach. It is waiting for the tasklist_lock,
+ * which we have to switch the parent links, but has already set
+ * the flags in p->ptrace.
+ */
+ return (p->parent != p->real_parent);
+}
+
static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
int __user *stat_addr, struct rusage __user *ru)
{
int ret;
list_for_each(_p,&tsk->children) {
- p = list_entry(_p, struct task_struct, sibling);
+ p = list_entry(_p,struct task_struct,sibling);
ret = eligible_child(pid, options, p);
if (!ret)
switch (p->state) {
case TASK_TRACED:
+ /*
+ * When we hit the race with PTRACE_ATTACH,
+ * we will not report this child. But the
+ * race means it has not yet been moved to
+ * our ptrace_children list, so we need to
+ * set the flag here to avoid a spurious ECHILD
+ * when the race happens with the only child.
+ */
flag = 1;
- continue;
+ if (!my_ptrace_child(p))
+ continue;
+ /*FALLTHROUGH*/
case TASK_STOPPED:
/*
* It's stopped now, so it might later
* continue, exit, or stop again.
*/
flag = 1;
- if (!(options & WUNTRACED))
- continue;
- if (tracehook_inhibit_wait_stopped(p))
+ if (!(options & WUNTRACED) &&
+ !my_ptrace_child(p))
continue;
retval = wait_task_stopped(p, ret == 2,
(options & WNOWAIT),
goto check_continued;
if (!likely(options & WEXITED))
continue;
- if (tracehook_inhibit_wait_zombie(p)) {
- flag = 1;
- continue;
- }
retval = wait_task_zombie(
p, (options & WNOWAIT),
infop, stat_addr, ru);
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
- if (tracehook_inhibit_wait_continued(p))
- continue;
retval = wait_task_continued(
p, (options & WNOWAIT),
infop, stat_addr, ru);
break;
}
}
-
- retval = ptrace_do_wait(tsk, pid, options,
- infop, stat_addr, ru);
- if (retval != -ECHILD) {
- flag = 1;
- if (retval != 0) /* He released the lock. */
- goto end;
+ if (!flag) {
+ list_for_each(_p, &tsk->ptrace_children) {
+ p = list_entry(_p, struct task_struct,
+ ptrace_list);
+ if (!eligible_child(pid, options, p))
+ continue;
+ flag = 1;
+ break;
+ }
}
-
if (options & __WNOTHREAD)
break;
tsk = next_thread(tsk);
- BUG_ON(tsk->signal != current->signal);
+ if (tsk->signal != current->signal)
+ BUG();
} while (tsk != current);
read_unlock(&tasklist_lock);
remove_wait_queue(¤t->signal->wait_chldexit,&wait);
if (infop) {
if (retval > 0)
- retval = 0;
+ retval = 0;
else {
/*
* For a WNOHANG return, clear out all the fields