+struct ptrace_state
+{
+ /*
+ * These elements are always available, even when the struct is
+ * awaiting destruction at the next RCU callback point.
+ */
+ struct utrace_attached_engine *engine;
+ struct task_struct *task; /* Target task. */
+ struct task_struct *parent; /* Whom we report to. */
+ struct list_head entry; /* Entry on parent->ptracees list. */
+
+ union {
+ struct rcu_head dead;
+ struct {
+ u8 options; /* PTRACE_SETOPTIONS bits. */
+ unsigned int stopped:1; /* Stopped for report. */
+ unsigned int reported:1; /* wait already reported. */
+ unsigned int syscall:1; /* Reporting for syscall. */
+#ifdef PTRACE_SYSEMU
+ unsigned int sysemu:1; /* PTRACE_SYSEMU in progress. */
+#endif
+ unsigned int have_eventmsg:1; /* u.eventmsg valid. */
+ unsigned int cap_sys_ptrace:1; /* Tracer capable. */
+
+ union
+ {
+ unsigned long eventmsg;
+ siginfo_t *siginfo;
+ } u;
+ } live;
+ } u;
+};
+
+static const struct utrace_engine_ops ptrace_utrace_ops; /* Initialized below. */
+
+
+static void
+ptrace_state_link(struct ptrace_state *state)
+{
+ task_lock(state->parent);
+ list_add_rcu(&state->entry, &state->parent->ptracees);
+ task_unlock(state->parent);
+}
+
+static void
+ptrace_state_unlink(struct ptrace_state *state)
+{
+ task_lock(state->parent);
+ list_del_rcu(&state->entry);
+ task_unlock(state->parent);
+}
+
+static int
+ptrace_setup(struct task_struct *target, struct utrace_attached_engine *engine,
+ struct task_struct *parent, u8 options, int cap_sys_ptrace)
+{
+ struct ptrace_state *state = kzalloc(sizeof *state, GFP_USER);
+ if (unlikely(state == NULL))
+ return -ENOMEM;
+
+ state->engine = engine;
+ state->task = target;
+ state->parent = parent;
+ state->u.live.options = options;
+ state->u.live.cap_sys_ptrace = cap_sys_ptrace;
+ ptrace_state_link(state);
+
+ BUG_ON(engine->data != 0);
+ rcu_assign_pointer(engine->data, (unsigned long) state);
+
+ return 0;
+}
+
+static void
+ptrace_state_free(struct rcu_head *rhead)
+{
+ struct ptrace_state *state = container_of(rhead,
+ struct ptrace_state, u.dead);
+ kfree(state);
+}
+
+static void
+ptrace_done(struct ptrace_state *state)
+{
+ INIT_RCU_HEAD(&state->u.dead);
+ call_rcu(&state->u.dead, ptrace_state_free);
+}
+
+/*
+ * Update the tracing engine state to match the new ptrace state.
+ */
+static void
+ptrace_update(struct task_struct *target, struct utrace_attached_engine *engine,
+ unsigned long flags)
+{
+ struct ptrace_state *state = (struct ptrace_state *) engine->data;
+
+ /*
+ * These events are always reported.
+ */
+ flags |= (UTRACE_EVENT(DEATH) | UTRACE_EVENT(EXEC)
+ | UTRACE_EVENT_SIGNAL_ALL);
+
+ /*
+ * We always have to examine clone events to check for CLONE_PTRACE.
+ */
+ flags |= UTRACE_EVENT(CLONE);
+
+ /*
+ * PTRACE_SETOPTIONS can request more events.
+ */
+ if (state->u.live.options & PTRACE_O_TRACEEXIT)
+ flags |= UTRACE_EVENT(EXIT);
+ if (state->u.live.options & PTRACE_O_TRACEVFORKDONE)
+ flags |= UTRACE_EVENT(VFORK_DONE);
+
+ /*
+ * ptrace always inhibits normal parent reaping.
+ * But for a corner case we sometimes see the REAP event instead.
+ */
+ flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
+
+ state->u.live.stopped = (flags & UTRACE_ACTION_QUIESCE) != 0;
+ if (!state->u.live.stopped) {
+ if (!state->u.live.have_eventmsg)
+ state->u.live.u.siginfo = NULL;
+ if (!(target->flags & PF_EXITING))
+ target->exit_code = 0;
+ }
+ utrace_set_flags(target, engine, flags);
+}
+
+static int ptrace_traceme(void)
+{
+ struct utrace_attached_engine *engine;
+ int retval;
+
+ engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
+ | UTRACE_ATTACH_EXCLUSIVE
+ | UTRACE_ATTACH_MATCH_OPS),
+ &ptrace_utrace_ops, 0UL);
+
+ if (IS_ERR(engine)) {
+ retval = PTR_ERR(engine);
+ if (retval == -EEXIST)
+ retval = -EPERM;
+ }
+ else {
+ task_lock(current);
+ retval = security_ptrace(current->parent, current);
+ task_unlock(current);
+ if (!retval)
+ retval = ptrace_setup(current, engine,
+ current->parent, 0, 0);
+ if (retval)
+ utrace_detach(current, engine);
+ else
+ ptrace_update(current, engine, 0);
+ }
+
+ return retval;
+}
+
+static int ptrace_attach(struct task_struct *task)
+{
+ struct utrace_attached_engine *engine;
+ int retval;
+
+ retval = -EPERM;
+ if (task->pid <= 1)
+ goto bad;
+ if (task->tgid == current->tgid)
+ goto bad;
+ if (!task->mm) /* kernel threads */
+ goto bad;
+
+ engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
+ | UTRACE_ATTACH_EXCLUSIVE
+ | UTRACE_ATTACH_MATCH_OPS),
+ &ptrace_utrace_ops, 0);
+ if (IS_ERR(engine)) {
+ retval = PTR_ERR(engine);
+ if (retval == -EEXIST)
+ retval = -EPERM;
+ goto bad;
+ }
+
+ if (ptrace_may_attach(task))
+ retval = ptrace_setup(task, engine, current, 0,
+ capable(CAP_SYS_PTRACE));
+ if (retval)
+ utrace_detach(task, engine);
+ else {
+ int stopped;
+
+ /* Go */
+ ptrace_update(task, engine, 0);
+ force_sig_specific(SIGSTOP, task);
+
+ spin_lock_irq(&task->sighand->siglock);
+ stopped = (task->state == TASK_STOPPED);
+ spin_unlock_irq(&task->sighand->siglock);
+
+ if (stopped) {
+ /*
+ * Do now the regset 0 writeback that we do on every
+ * stop, since it's never been done. On register
+ * window machines, this makes sure the user memory
+ * backing the register data is up to date.
+ */
+ const struct utrace_regset *regset;
+ regset = utrace_regset(task, engine,
+ utrace_native_view(task), 0);
+ if (regset->writeback)
+ (*regset->writeback)(task, regset, 1);