+/*
+ * Called with utrace locked, unlocks it on return. Unconditionally
+ * recompute the flags after report_death is finished. This may notice
+ * that there are no engines left and free the utrace struct.
+ */
+static void
+finish_report_death(struct task_struct *tsk, struct utrace *utrace)
+{
+ /*
+ * After we unlock (possibly inside utrace_reap for callbacks) with
+ * this flag clear, competing utrace_detach/utrace_set_flags calls
+ * know that we've finished our callbacks and any detach bookkeeping.
+ */
+ utrace->u.exit.flags &= EXIT_FLAG_REAP;
+
+ if (utrace->u.exit.flags & EXIT_FLAG_REAP)
+ /*
+ * utrace_release_task was already called in parallel.
+ * We must complete its work now.
+ */
+ utrace_reap(tsk, utrace);
+ else
+ /*
+ * Clear out any detached engines and in the process
+ * recompute the flags. Mask off event bits we can't
+ * see any more. This tells utrace_release_task we
+ * have already finished, if it comes along later.
+ * Note this all happens on the already-locked utrace,
+ * which might already be removed from the task.
+ */
+ remove_detached(tsk, utrace, 0, DEAD_FLAGS_MASK);
+}
+
+/*
+ * Called with utrace locked, unlocks it on return.
+ * EXIT_FLAG_DELAYED_GROUP_LEADER is set.
+ * Do second report_death callbacks for engines using NOREAP.
+ */
+static void
+report_delayed_group_leader(struct task_struct *tsk, struct utrace *utrace)
+{
+ struct list_head *pos, *next;
+ struct utrace_attached_engine *engine;
+ u32 action;
+
+ utrace->u.exit.flags |= EXIT_FLAG_DEATH;
+ spin_unlock(&utrace->lock);
+
+ /* XXX must change for sharing */
+ list_for_each_safe_rcu(pos, next, &utrace->engines) {
+ engine = list_entry(pos, struct utrace_attached_engine, entry);
+#define NOREAP_DEATH (UTRACE_EVENT(DEATH) | UTRACE_ACTION_NOREAP)
+ if ((engine->flags & NOREAP_DEATH) == NOREAP_DEATH)
+ REPORT(report_death);
+ }
+
+ spin_lock(&utrace->lock);
+ finish_report_death(tsk, utrace);
+}
+