- /*
- * ptrace always inhibits normal parent reaping.
- * But for a corner case we sometimes see the REAP event instead.
- */
- flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
-
- state->u.live.stopped = (flags & UTRACE_ACTION_QUIESCE) != 0;
- if (!state->u.live.stopped) {
- if (!state->u.live.have_eventmsg)
- state->u.live.u.siginfo = NULL;
- if (!(target->flags & PF_EXITING))
- target->exit_code = 0;
- }
- utrace_set_flags(target, engine, flags);
-}
-
-static int ptrace_traceme(void)
-{
- struct utrace_attached_engine *engine;
- int retval;
-
- engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
- | UTRACE_ATTACH_EXCLUSIVE
- | UTRACE_ATTACH_MATCH_OPS),
- &ptrace_utrace_ops, 0UL);
-
- if (IS_ERR(engine)) {
- retval = PTR_ERR(engine);
- if (retval == -EEXIST)
- retval = -EPERM;
- }
- else {
- task_lock(current);
- retval = security_ptrace(current->parent, current);
- task_unlock(current);
- if (!retval)
- retval = ptrace_setup(current, engine,
- current->parent, 0, 0);
- if (retval)
- utrace_detach(current, engine);
- else
- ptrace_update(current, engine, 0);
- }
-
- return retval;
-}
-
-static int ptrace_attach(struct task_struct *task)
-{
- struct utrace_attached_engine *engine;
- int retval;
-
- retval = -EPERM;
- if (task->pid <= 1)
- goto bad;
- if (task->tgid == current->tgid)
- goto bad;
- if (!task->mm) /* kernel threads */
- goto bad;
-
- engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
- | UTRACE_ATTACH_EXCLUSIVE
- | UTRACE_ATTACH_MATCH_OPS),
- &ptrace_utrace_ops, 0);
- if (IS_ERR(engine)) {
- retval = PTR_ERR(engine);
- if (retval == -EEXIST)
- retval = -EPERM;
- goto bad;
- }
-
- if (ptrace_may_attach(task))
- retval = ptrace_setup(task, engine, current, 0,
- capable(CAP_SYS_PTRACE));
- if (retval)
- utrace_detach(task, engine);
- else {
- int stopped;
-
- /* Go */
- ptrace_update(task, engine, 0);
- force_sig_specific(SIGSTOP, task);
-
- spin_lock_irq(&task->sighand->siglock);
- stopped = (task->state == TASK_STOPPED);
- spin_unlock_irq(&task->sighand->siglock);
-
- if (stopped) {
- /*
- * Do now the regset 0 writeback that we do on every
- * stop, since it's never been done. On register
- * window machines, this makes sure the user memory
- * backing the register data is up to date.
- */
- const struct utrace_regset *regset;
- regset = utrace_regset(task, engine,
- utrace_native_view(task), 0);
- if (regset->writeback)
- (*regset->writeback)(task, regset, 1);
- }
- }
-
-bad:
- return retval;
-}
-
-static int ptrace_detach(struct task_struct *task,
- struct utrace_attached_engine *engine)
-{
- struct ptrace_state *state = (struct ptrace_state *) engine->data;
- /*
- * Clearing ->data before detach makes sure an unrelated task
- * calling into ptrace_tracer_task won't try to touch stale state.
- */
- rcu_assign_pointer(engine->data, 0UL);
- utrace_detach(task, engine);
- ptrace_state_unlink(state);
- ptrace_done(state);
- return 0;
-}
-
-
-/*
- * This is called when we are exiting. We must stop all our ptracing.
- */
-void
-ptrace_exit(struct task_struct *tsk)
-{
- rcu_read_lock();
- if (unlikely(!list_empty(&tsk->ptracees))) {
- struct ptrace_state *state, *next;
-
- /*
- * First detach the utrace layer from all the tasks.
- * We don't want to hold any locks while calling utrace_detach.
- */
- list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
- rcu_assign_pointer(state->engine->data, 0UL);
- utrace_detach(state->task, state->engine);
- }
-
- /*
- * Now clear out our list and clean up our data structures.
- * The task_lock protects our list structure.
- */
- task_lock(tsk);
- list_for_each_entry_safe(state, next, &tsk->ptracees, entry) {
- list_del_rcu(&state->entry);
- ptrace_done(state);