+#endif
+
+
+/*
+ * Detach the zombie being reported for wait.
+ */
+static inline void
+detach_zombie(struct task_struct *tsk,
+ struct task_struct *p, struct ptrace_state *state)
+{
+ int detach_error;
+ struct utrace_attached_engine *engine;
+
+restart:
+ detach_error = 0;
+ rcu_read_lock();
+ if (tsk == current)
+ engine = state->engine;
+ else {
+ /*
+ * We've excluded other ptrace_do_wait calls. But the
+ * ptracer itself might have done ptrace_detach while we
+ * did not have rcu_read_lock. So double-check that state
+ * is still valid.
+ */
+ engine = utrace_attach(
+ p, (UTRACE_ATTACH_MATCH_OPS
+ | UTRACE_ATTACH_MATCH_DATA),
+ &ptrace_utrace_ops,
+ (unsigned long) state);
+ if (IS_ERR(engine) || state->parent != tsk)
+ detach_error = -ESRCH;
+ else
+ BUG_ON(state->engine != engine);
+ }
+ rcu_read_unlock();
+ if (likely(!detach_error))
+ detach_error = ptrace_detach(p, engine, state);
+ if (unlikely(detach_error == -EALREADY)) {
+ /*
+ * It's still doing report_death callbacks.
+ * Just wait for it to settle down.
+ */
+ wait_task_inactive(p); /* Might block. */
+ goto restart;
+ }
+ /*
+ * A failure with -ESRCH means that report_reap is
+ * already running and will do the cleanup, or that
+ * we lost a race with ptrace_detach in another
+ * thread or with the automatic detach in
+ * report_death.
+ */
+ if (detach_error)
+ BUG_ON(detach_error != -ESRCH);
+}
+
+/*
+ * We're called with tasklist_lock held for reading.
+ * If we return -ECHILD or zero, next_thread(tsk) must still be valid to use.
+ * If we return another error code, or a successful PID value, we
+ * release tasklist_lock first.
+ */
+int
+ptrace_do_wait(struct task_struct *tsk,
+ pid_t pid, int options, struct siginfo __user *infop,
+ int __user *stat_addr, struct rusage __user *rusagep)
+{
+ struct ptrace_state *state;
+ struct task_struct *p;
+ int err = -ECHILD;
+ int exit_code, why, status;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
+ p = state->task;
+
+ if (pid > 0) {
+ if (p->pid != pid)
+ continue;
+ } else if (!pid) {
+ if (process_group(p) != process_group(current))
+ continue;
+ } else if (pid != -1) {
+ if (process_group(p) != -pid)
+ continue;
+ }
+ if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
+ && !(options & __WALL))
+ continue;
+ if (security_task_wait(p))
+ continue;
+
+ /*
+ * This is a matching child. If we don't win now, tell
+ * our caller to block and repeat. From this point we
+ * must ensure that wait_chldexit will get a wakeup for
+ * any tracee stopping, dying, or being detached.
+ * For death, tasklist_lock guarantees this already.
+ */
+ err = 0;
+
+ switch (p->exit_state) {
+ case EXIT_ZOMBIE:
+ if (!likely(options & WEXITED))
+ continue;
+ if (delay_group_leader(p)) {
+ struct task_struct *next = next_thread(p);
+ pr_debug("%d ptrace_do_wait leaving %d "
+ "zombie code %x "
+ "delay_group_leader (%d/%lu)\n",
+ current->pid, p->pid, p->exit_code,
+ next->pid, next->state);
+ continue;
+ }
+ exit_code = p->exit_code;
+ goto found;
+ case EXIT_DEAD:
+ continue;
+ default:
+ /*
+ * tasklist_lock holds up any transitions to
+ * EXIT_ZOMBIE. After releasing it we are
+ * guaranteed a wakeup on wait_chldexit after
+ * any new deaths.
+ */
+ if (p->flags & PF_EXITING)
+ /*
+ * It's in do_exit and might have set
+ * p->exit_code already, but it's not quite
+ * dead yet. It will get to report_death
+ * and wakes us up when it finishes.
+ */
+ continue;
+ break;
+ }
+
+ /*
+ * This xchg atomically ensures that only one do_wait
+ * call can report this thread. Because exit_code is
+ * always set before do_notify wakes us up, after this
+ * check fails we are sure to get a wakeup if it stops.
+ */
+ exit_code = xchg(&p->exit_code, 0);
+ if (exit_code)
+ goto found;
+
+ // XXX should handle WCONTINUED
+
+ pr_debug("%d ptrace_do_wait leaving %d state %lu code %x\n",
+ current->pid, p->pid, p->state, p->exit_code);
+ }
+ rcu_read_unlock();
+ if (err == 0)
+ pr_debug("%d ptrace_do_wait blocking\n", current->pid);
+
+ return err;
+
+found:
+ BUG_ON(state->parent != tsk);
+ rcu_read_unlock();
+
+ pr_debug("%d ptrace_do_wait (%d) found %d code %x (%lu/%d)\n",
+ current->pid, tsk->pid, p->pid, exit_code,
+ p->exit_state, p->exit_signal);
+
+ /*
+ * If there was a group exit in progress, all threads report that
+ * status. Most will have SIGKILL in their own exit_code.
+ */
+ if (p->signal->flags & SIGNAL_GROUP_EXIT)
+ exit_code = p->signal->group_exit_code;
+
+ if (p->exit_state) {
+ if (unlikely(p->parent == tsk && p->exit_signal != -1))
+ /*
+ * This is our natural child we were ptracing.
+ * When it dies it detaches (see ptrace_report_death).
+ * So we're seeing it here in a race. When it
+ * finishes detaching it will become reapable in
+ * the normal wait_task_zombie path instead.
+ */
+ return 0;
+ if ((exit_code & 0x7f) == 0) {
+ why = CLD_EXITED;
+ status = exit_code >> 8;
+ }
+ else {
+ why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
+ status = exit_code & 0x7f;
+ }
+ }
+ else {
+ why = CLD_TRAPPED;
+ status = exit_code;
+ exit_code = (status << 8) | 0x7f;
+ }
+
+ /*
+ * At this point we are committed to a successful return
+ * or a user error return. Release the tasklist_lock.
+ */
+ get_task_struct(p);
+ read_unlock(&tasklist_lock);
+
+ if (rusagep)
+ err = getrusage(p, RUSAGE_BOTH, rusagep);
+ if (infop) {
+ if (!err)
+ err = put_user(SIGCHLD, &infop->si_signo);
+ if (!err)
+ err = put_user(0, &infop->si_errno);
+ if (!err)
+ err = put_user((short)why, &infop->si_code);
+ if (!err)
+ err = put_user(p->pid, &infop->si_pid);
+ if (!err)
+ err = put_user(p->uid, &infop->si_uid);
+ if (!err)
+ err = put_user(status, &infop->si_status);
+ }
+ if (!err && stat_addr)
+ err = put_user(exit_code, stat_addr);
+
+ if (!err) {
+ if (why != CLD_TRAPPED)
+ /*
+ * This was a death report. The ptracer's wait
+ * does an implicit detach, so the zombie reports
+ * to its real parent now.
+ */
+ detach_zombie(tsk, p, state);
+ err = p->pid;
+ }
+
+ put_task_struct(p);
+
+ return err;
+}
+
+
+/*
+ * All the report callbacks (except death and reap) are subject to a race
+ * with ptrace_exit doing a quick detach and ptrace_done. It can do this
+ * even when the target is not quiescent, so a callback may already be in
+ * progress when it does ptrace_done. Callbacks use this function to fetch
+ * the struct ptrace_state while ensuring it doesn't disappear until
+ * put_ptrace_state is called. This just uses RCU, since state and
+ * anything we try to do to state->parent is safe under rcu_read_lock.
+ */
+static struct ptrace_state *
+get_ptrace_state(struct utrace_attached_engine *engine,
+ struct task_struct *tsk)
+{
+ struct ptrace_state *state;
+
+ rcu_read_lock();
+ state = rcu_dereference((struct ptrace_state *) engine->data);
+ if (likely(state != NULL))
+ return state;
+
+ rcu_read_unlock();
+ return NULL;
+}
+
+static inline void
+put_ptrace_state(struct ptrace_state *state)
+{
+ rcu_read_unlock();
+}
+
+
+static void
+do_notify(struct task_struct *tsk, struct task_struct *parent, int why)
+{
+ struct siginfo info;
+ unsigned long flags;
+ struct sighand_struct *sighand;
+ int sa_mask;
+
+ info.si_signo = SIGCHLD;
+ info.si_errno = 0;
+ info.si_pid = tsk->pid;
+ info.si_uid = tsk->uid;
+
+ /* FIXME: find out whether or not this is supposed to be c*time. */
+ info.si_utime = cputime_to_jiffies(tsk->utime);
+ info.si_stime = cputime_to_jiffies(tsk->stime);
+
+ sa_mask = SA_NOCLDSTOP;
+ info.si_code = why;
+ info.si_status = tsk->exit_code & 0x7f;
+ if (why == CLD_CONTINUED)
+ info.si_status = SIGCONT;
+ else if (why == CLD_STOPPED)
+ info.si_status = tsk->signal->group_exit_code & 0x7f;
+ else if (why == CLD_EXITED) {
+ sa_mask = SA_NOCLDWAIT;
+ if (tsk->exit_code & 0x80)
+ info.si_code = CLD_DUMPED;
+ else if (tsk->exit_code & 0x7f)
+ info.si_code = CLD_KILLED;
+ else {
+ info.si_code = CLD_EXITED;
+ info.si_status = tsk->exit_code >> 8;
+ }
+ }
+
+ read_lock(&tasklist_lock);
+ if (unlikely(parent->signal == NULL))
+ goto out;
+
+ sighand = parent->sighand;
+ spin_lock_irqsave(&sighand->siglock, flags);
+ if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
+ !(sighand->action[SIGCHLD-1].sa.sa_flags & sa_mask))
+ __group_send_sig_info(SIGCHLD, &info, parent);
+ /*
+ * Even if SIGCHLD is not generated, we must wake up wait4 calls.
+ */
+ wake_up_interruptible_sync(&parent->signal->wait_chldexit);
+ spin_unlock_irqrestore(&sighand->siglock, flags);
+
+out:
+ read_unlock(&tasklist_lock);
+}
+
+static u32
+ptrace_report(struct utrace_attached_engine *engine,
+ struct task_struct *tsk,
+ struct ptrace_state *state,
+ int code)
+{
+ const struct utrace_regset *regset;
+
+ pr_debug("%d ptrace_report %d engine %p"
+ " state %p code %x parent %d (%p)\n",
+ current->pid, tsk->pid, engine, state, code,
+ state->parent->pid, state->parent);
+ if (!state->have_eventmsg && state->u.siginfo) {
+ const siginfo_t *si = state->u.siginfo;
+ pr_debug(" si %d code %x errno %d addr %p\n",
+ si->si_signo, si->si_code, si->si_errno,
+ si->si_addr);
+ }
+
+ /*
+ * Set our QUIESCE flag right now, before notifying the tracer.
+ * We do this before setting tsk->exit_code rather than
+ * by using UTRACE_ACTION_NEWSTATE in our return value, to
+ * ensure that the tracer can't get the notification and then
+ * try to resume us with PTRACE_CONT before we set the flag.
+ */
+ utrace_set_flags(tsk, engine, engine->flags | UTRACE_ACTION_QUIESCE);
+
+ /*
+ * If regset 0 has a writeback call, do it now. On register window
+ * machines, this makes sure the user memory backing the register
+ * data is up to date by the time wait_task_inactive returns to
+ * ptrace_start in our tracer doing a PTRACE_PEEKDATA or the like.
+ */
+ regset = utrace_regset(tsk, engine, utrace_native_view(tsk), 0);
+ if (regset->writeback)
+ (*regset->writeback)(tsk, regset, 0);
+
+ BUG_ON(code == 0);
+ tsk->exit_code = code;
+ do_notify(tsk, state->parent, CLD_TRAPPED);
+
+ pr_debug("%d ptrace_report quiescing exit_code %x\n",
+ current->pid, current->exit_code);
+
+ put_ptrace_state(state);
+
+ return UTRACE_ACTION_RESUME;
+}
+
+static inline u32
+ptrace_event(struct utrace_attached_engine *engine,
+ struct task_struct *tsk,
+ struct ptrace_state *state,
+ int event)
+{
+ state->syscall = 0;
+ return ptrace_report(engine, tsk, state, (event << 8) | SIGTRAP);
+}
+
+/*
+ * Unlike other report callbacks, this can't be called while ptrace_exit
+ * is doing ptrace_done in parallel, so we don't need get_ptrace_state.
+ */
+static u32
+ptrace_report_death(struct utrace_attached_engine *engine,
+ struct task_struct *tsk)
+{
+ struct ptrace_state *state = (struct ptrace_state *) engine->data;
+
+ if (tsk->exit_code == 0 && unlikely(tsk->flags & PF_SIGNALED))
+ /*
+ * This can only mean that tsk->exit_code was clobbered
+ * by ptrace_update or ptrace_do_wait in a race with
+ * an asynchronous wakeup and exit for SIGKILL.
+ */
+ tsk->exit_code = SIGKILL;
+
+ if (tsk->parent == state->parent && tsk->exit_signal != -1) {
+ /*
+ * This is a natural child (excluding clone siblings of a
+ * child group_leader), so we detach and let the normal
+ * reporting happen once our NOREAP action is gone. But
+ * first, generate a SIGCHLD for those cases where normal
+ * behavior won't. A ptrace'd child always generates SIGCHLD.
+ */
+ pr_debug("ptrace %d death natural parent %d exit_code %x\n",
+ tsk->pid, state->parent->pid, tsk->exit_code);
+ if (!thread_group_empty(tsk))
+ do_notify(tsk, state->parent, CLD_EXITED);
+ ptrace_state_unlink(state);
+ rcu_assign_pointer(engine->data, 0UL);
+ ptrace_done(state);
+ return UTRACE_ACTION_DETACH;
+ }
+
+ /*
+ * This might be a second report_death callback for a group leader
+ * that was delayed when its original report_death callback was made.
+ * Repeating do_notify is exactly what we need for that case too.
+ * After the wakeup, ptrace_do_wait will see delay_group_leader false.
+ */
+
+ pr_debug("ptrace %d death notify %d exit_code %x: ",
+ tsk->pid, state->parent->pid, tsk->exit_code);
+ do_notify(tsk, state->parent, CLD_EXITED);
+ pr_debug("%d notified %d\n", tsk->pid, state->parent->pid);
+ return UTRACE_ACTION_RESUME;
+}
+
+/*
+ * We get this only in the case where our UTRACE_ACTION_NOREAP was ignored.
+ * That happens solely when a non-leader exec reaps the old leader.
+ */
+static void
+ptrace_report_reap(struct utrace_attached_engine *engine,
+ struct task_struct *tsk)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (state != NULL) {
+ ptrace_state_unlink(state);
+ rcu_assign_pointer(engine->data, 0UL);
+ ptrace_done(state);
+ put_ptrace_state(state);
+ }
+}
+
+/*
+ * Start tracing the child. This has to do put_ptrace_state before it can
+ * do allocation that might block.
+ */
+static void
+ptrace_clone_setup(struct utrace_attached_engine *engine,
+ struct task_struct *parent,
+ struct ptrace_state *state,
+ struct task_struct *child)
+{
+ struct task_struct *tracer;
+ struct utrace_attached_engine *child_engine;
+ struct ptrace_state *child_state;
+ int ret;
+ u8 options;
+ int cap_sys_ptrace;
+
+ tracer = state->parent;
+ options = state->options;
+ cap_sys_ptrace = state->cap_sys_ptrace;
+ get_task_struct(tracer);
+ put_ptrace_state(state);
+
+ child_engine = utrace_attach(child, (UTRACE_ATTACH_CREATE
+ | UTRACE_ATTACH_EXCLUSIVE
+ | UTRACE_ATTACH_MATCH_OPS),
+ &ptrace_utrace_ops, 0UL);
+ if (unlikely(IS_ERR(child_engine))) {
+ BUG_ON(PTR_ERR(child_engine) != -ENOMEM);
+ put_task_struct(tracer);
+ goto nomem;
+ }
+
+ child_state = ptrace_setup(child, child_engine,
+ tracer, options, cap_sys_ptrace, NULL);
+
+ put_task_struct(tracer);
+
+ if (unlikely(IS_ERR(child_state))) {
+ (void) utrace_detach(child, child_engine);
+
+ if (PTR_ERR(child_state) == -ENOMEM)
+ goto nomem;
+
+ /*
+ * Our tracer has started exiting. It's
+ * too late to set it up tracing the child.
+ */
+ BUG_ON(PTR_ERR(child_state) != -EALREADY);
+ }
+ else {
+ sigaddset(&child->pending.signal, SIGSTOP);
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ ret = ptrace_update(child, child_engine, 0, 0);
+
+ /*
+ * The child hasn't run yet, it can't have died already.
+ */
+ BUG_ON(ret);
+ }
+
+ return;
+
+nomem:
+ printk(KERN_ERR "ptrace out of memory, lost child %d of %d",
+ child->pid, parent->pid);
+}
+
+static u32
+ptrace_report_clone(struct utrace_attached_engine *engine,
+ struct task_struct *parent,
+ unsigned long clone_flags, struct task_struct *child)
+{
+ int event, option;
+ struct ptrace_state *state = get_ptrace_state(engine, parent);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ pr_debug("%d (%p) engine %p"
+ " ptrace_report_clone child %d (%p) fl %lx\n",
+ parent->pid, parent, engine, child->pid, child, clone_flags);
+
+ event = PTRACE_EVENT_FORK;
+ option = PTRACE_O_TRACEFORK;
+ if (clone_flags & CLONE_VFORK) {
+ event = PTRACE_EVENT_VFORK;
+ option = PTRACE_O_TRACEVFORK;
+ }
+ else if ((clone_flags & CSIGNAL) != SIGCHLD) {
+ event = PTRACE_EVENT_CLONE;
+ option = PTRACE_O_TRACECLONE;
+ }
+
+ if (state->options & option) {
+ state->have_eventmsg = 1;
+ state->u.eventmsg = child->pid;
+ }
+ else
+ event = 0;
+
+ if (!(clone_flags & CLONE_UNTRACED)
+ && (event || (clone_flags & CLONE_PTRACE))) {
+ /*
+ * Have our tracer start following the child too.
+ */
+ ptrace_clone_setup(engine, parent, state, child);
+
+ /*
+ * That did put_ptrace_state, so we have to check
+ * again in case our tracer just started exiting.
+ */
+ state = get_ptrace_state(engine, parent);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+ }
+
+ if (event)
+ return ptrace_event(engine, parent, state, event);
+
+ put_ptrace_state(state);
+
+ return UTRACE_ACTION_RESUME;
+}
+
+
+static u32
+ptrace_report_vfork_done(struct utrace_attached_engine *engine,
+ struct task_struct *parent, pid_t child_pid)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, parent);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ state->have_eventmsg = 1;
+ state->u.eventmsg = child_pid;
+ return ptrace_event(engine, parent, state, PTRACE_EVENT_VFORK_DONE);
+}
+
+
+static u32
+ptrace_report_signal(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, struct pt_regs *regs,
+ u32 action, siginfo_t *info,
+ const struct k_sigaction *orig_ka,
+ struct k_sigaction *return_ka)
+{
+ int signo = info == NULL ? SIGTRAP : info->si_signo;
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ state->syscall = 0;
+ state->have_eventmsg = 0;
+ state->u.siginfo = info;
+ return ptrace_report(engine, tsk, state, signo) | UTRACE_SIGNAL_IGN;
+}
+
+static u32
+ptrace_report_jctl(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, int type)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ pr_debug("ptrace %d jctl notify %d type %x exit_code %x\n",
+ tsk->pid, state->parent->pid, type, tsk->exit_code);
+
+ do_notify(tsk, state->parent, type);
+ put_ptrace_state(state);
+
+ return UTRACE_JCTL_NOSIGCHLD;
+}
+
+static u32
+ptrace_report_exec(struct utrace_attached_engine *engine,
+ struct task_struct *tsk,
+ const struct linux_binprm *bprm,
+ struct pt_regs *regs)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ return ptrace_event(engine, tsk, state,
+ (state->options & PTRACE_O_TRACEEXEC)
+ ? PTRACE_EVENT_EXEC : 0);
+}
+
+static u32
+ptrace_report_syscall(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, struct pt_regs *regs,
+ int entry)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+#ifdef PTRACE_SYSEMU
+ if (entry && state->sysemu)
+ tracehook_abort_syscall(regs);
+#endif
+ state->syscall = 1;
+ return ptrace_report(engine, tsk, state,
+ ((state->options & PTRACE_O_TRACESYSGOOD)
+ ? 0x80 : 0) | SIGTRAP);
+}
+
+static u32
+ptrace_report_syscall_entry(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, struct pt_regs *regs)
+{
+ return ptrace_report_syscall(engine, tsk, regs, 1);
+}
+
+static u32
+ptrace_report_syscall_exit(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, struct pt_regs *regs)
+{
+ return ptrace_report_syscall(engine, tsk, regs, 0);
+}
+
+static u32
+ptrace_report_exit(struct utrace_attached_engine *engine,
+ struct task_struct *tsk, long orig_code, long *code)
+{
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (unlikely(state == NULL))
+ return UTRACE_ACTION_RESUME;
+
+ state->have_eventmsg = 1;
+ state->u.eventmsg = *code;
+ return ptrace_event(engine, tsk, state, PTRACE_EVENT_EXIT);
+}
+
+static int
+ptrace_unsafe_exec(struct utrace_attached_engine *engine,
+ struct task_struct *tsk)
+{
+ int unsafe = LSM_UNSAFE_PTRACE;
+ struct ptrace_state *state = get_ptrace_state(engine, tsk);
+ if (likely(state != NULL) && state->cap_sys_ptrace)
+ unsafe = LSM_UNSAFE_PTRACE_CAP;
+ put_ptrace_state(state);
+ return unsafe;
+}
+
+static struct task_struct *
+ptrace_tracer_task(struct utrace_attached_engine *engine,
+ struct task_struct *target)
+{
+ struct task_struct *parent = NULL;
+ struct ptrace_state *state = get_ptrace_state(engine, target);
+ if (likely(state != NULL)) {
+ parent = state->parent;
+ put_ptrace_state(state);
+ }
+ return parent;
+}
+
+static int
+ptrace_allow_access_process_vm(struct utrace_attached_engine *engine,
+ struct task_struct *target,
+ struct task_struct *caller)
+{
+ struct ptrace_state *state;
+ int ours = 0;
+
+ state = get_ptrace_state(engine, target);
+ if (likely(state != NULL)) {
+ ours = (((engine->flags & UTRACE_ACTION_QUIESCE)
+ || target->state == TASK_STOPPED)
+ && state->parent == caller);
+ put_ptrace_state(state);
+ }
+
+ return ours && security_ptrace(caller, target) == 0;
+}
+
+
+static const struct utrace_engine_ops ptrace_utrace_ops =
+{
+ .report_syscall_entry = ptrace_report_syscall_entry,
+ .report_syscall_exit = ptrace_report_syscall_exit,
+ .report_exec = ptrace_report_exec,
+ .report_jctl = ptrace_report_jctl,
+ .report_signal = ptrace_report_signal,
+ .report_vfork_done = ptrace_report_vfork_done,
+ .report_clone = ptrace_report_clone,
+ .report_exit = ptrace_report_exit,
+ .report_death = ptrace_report_death,
+ .report_reap = ptrace_report_reap,
+ .unsafe_exec = ptrace_unsafe_exec,
+ .tracer_task = ptrace_tracer_task,
+ .allow_access_process_vm = ptrace_allow_access_process_vm,
+};