1 #include <linux/utrace.h>
2 #include <linux/tracehook.h>
4 #include <linux/sched.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <asm/tracehook.h>
11 static kmem_cache_t *utrace_cachep;
12 static kmem_cache_t *utrace_engine_cachep;
18 kmem_cache_create("utrace_cache",
19 sizeof(struct utrace), 0,
20 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
21 utrace_engine_cachep =
22 kmem_cache_create("utrace_engine_cache",
23 sizeof(struct utrace_attached_engine), 0,
24 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
27 subsys_initcall(utrace_init);
31 * Make sure target->utrace is allocated, and return with it locked on
32 * success. This function mediates startup races. The creating parent
33 * task has priority, and other callers will delay here to let its call
34 * succeed and take the new utrace lock first.
36 static struct utrace *
37 utrace_first_engine(struct task_struct *target,
38 struct utrace_attached_engine *engine)
40 struct utrace *utrace, *ret;
43 * If this is a newborn thread and we are not the creator,
44 * we have to wait for it. The creator gets the first chance
45 * to attach. The PF_STARTING flag is cleared after its
46 * report_clone hook has had a chance to run.
48 if ((target->flags & PF_STARTING)
49 && (current->utrace == NULL
50 || current->utrace->u.live.cloning != target)) {
52 return (signal_pending(current)
53 ? ERR_PTR(-ERESTARTNOINTR) : NULL);
56 utrace = kmem_cache_alloc(utrace_cachep, SLAB_KERNEL);
57 if (unlikely(utrace == NULL))
58 return ERR_PTR(-ENOMEM);
60 utrace->u.live.cloning = NULL;
61 utrace->u.live.signal = NULL;
62 INIT_LIST_HEAD(&utrace->engines);
63 list_add(&engine->entry, &utrace->engines);
64 spin_lock_init(&utrace->lock);
69 if (likely(target->utrace == NULL)) {
70 rcu_assign_pointer(target->utrace, utrace);
72 * The task_lock protects us against another thread doing
73 * the same thing. We might still be racing against
74 * tracehook_release_task. It's called with ->exit_state
75 * set to EXIT_DEAD and then checks ->utrace with an
76 * smp_mb() in between. If EXIT_DEAD is set, then
77 * release_task might have checked ->utrace already and saw
78 * it NULL; we can't attach. If we see EXIT_DEAD not yet
79 * set after our barrier, then we know release_task will
80 * see our target->utrace pointer.
83 if (target->exit_state == EXIT_DEAD) {
85 * The target has already been through release_task.
87 target->utrace = NULL;
93 * If the thread is already dead when we attach, then its
94 * parent was notified already and we shouldn't repeat the
95 * notification later after a detach or NOREAP flag change.
97 if (target->exit_state)
98 utrace->u.exit.notified = 1;
102 * Another engine attached first, so there is a struct already.
103 * A null return says to restart looking for the existing one.
108 utrace_unlock(utrace);
109 kmem_cache_free(utrace_cachep, utrace);
116 utrace_free(struct rcu_head *rhead)
118 struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
119 kmem_cache_free(utrace_cachep, utrace);
123 * Called with utrace locked. Clean it up and free it via RCU.
126 rcu_utrace_free(struct utrace *utrace)
128 utrace_unlock(utrace);
129 INIT_RCU_HEAD(&utrace->u.dead);
130 call_rcu(&utrace->u.dead, utrace_free);
134 utrace_engine_free(struct rcu_head *rhead)
136 struct utrace_attached_engine *engine =
137 container_of(rhead, struct utrace_attached_engine, rhead);
138 kmem_cache_free(utrace_engine_cachep, engine);
142 * Remove the utrace pointer from the task, unless there is a pending
143 * forced signal (or it's quiescent in utrace_get_signal).
146 utrace_clear_tsk(struct task_struct *tsk, struct utrace *utrace)
148 if (utrace->u.live.signal == NULL) {
150 if (likely(tsk->utrace != NULL)) {
151 rcu_assign_pointer(tsk->utrace, NULL);
152 tsk->utrace_flags &= UTRACE_ACTION_NOREAP;
159 * Called with utrace locked and the target quiescent (maybe current).
160 * If this was the last engine and there is no parting forced signal
161 * pending, utrace is left locked and not freed, but is removed from the task.
164 remove_engine(struct utrace_attached_engine *engine,
165 struct task_struct *tsk, struct utrace *utrace)
167 list_del_rcu(&engine->entry);
168 if (list_empty(&utrace->engines))
169 utrace_clear_tsk(tsk, utrace);
170 call_rcu(&engine->rhead, utrace_engine_free);
175 * Called with utrace locked, after remove_engine may have run.
176 * Passed the flags from all remaining engines, i.e. zero if none left.
177 * Install the flags in tsk->utrace_flags and return with utrace unlocked.
178 * If no engines are left and there is no parting forced signal pending,
179 * utrace is freed and we return NULL.
181 static struct utrace *
182 check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
187 if (utrace->u.live.signal != NULL)
189 * There is a pending forced signal. It may have been
190 * left by an engine now detached. The empty utrace
191 * remains attached until it can be processed.
193 flags |= UTRACE_ACTION_QUIESCE;
196 * If tracing was preventing a SIGCHLD or self-reaping
197 * and is no longer, we'll do that report or reaping now.
199 if (((tsk->utrace_flags &~ flags) & UTRACE_ACTION_NOREAP)
200 && tsk->exit_state && !utrace->u.exit.notified) {
201 BUG_ON(tsk->exit_state != EXIT_ZOMBIE);
203 * While holding the utrace lock, mark that it's been done.
204 * For self-reaping, we need to change tsk->exit_state
205 * before clearing tsk->utrace_flags, so that the real
206 * parent can't see it in EXIT_ZOMBIE momentarily and reap it.
208 utrace->u.exit.notified = 1;
209 if (tsk->exit_signal == -1) {
210 exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
211 BUG_ON(exit_state != EXIT_ZOMBIE);
212 exit_state = EXIT_DEAD;
215 * Now that we've changed its state to DEAD,
216 * it's safe to install the new tsk->utrace_flags
217 * value without the UTRACE_ACTION_NOREAP bit set.
220 else if (thread_group_empty(tsk)) {
222 * We need to prevent the real parent from reaping
223 * until after we've called do_notify_parent, below.
224 * It can get into wait_task_zombie any time after
225 * the UTRACE_ACTION_NOREAP bit is cleared. It's
226 * safe for that to do everything it does until its
227 * release_task call starts tearing things down.
228 * Holding tasklist_lock for reading prevents
229 * release_task from proceeding until we've done
230 * everything we need to do.
232 exit_state = EXIT_ZOMBIE;
233 read_lock(&tasklist_lock);
237 tsk->utrace_flags = flags;
239 utrace_unlock(utrace);
241 rcu_utrace_free(utrace);
246 * Now we're finished updating the utrace state.
247 * Do a pending self-reaping or parent notification.
249 if (exit_state == EXIT_DEAD)
251 * Note this can wind up in utrace_reap and do more callbacks.
252 * Our callers must be in places where that is OK.
255 else if (exit_state == EXIT_ZOMBIE) {
256 do_notify_parent(tsk, tsk->exit_signal);
257 read_unlock(&tasklist_lock); /* See comment above. */
266 * Get the target thread to quiesce. Return nonzero if it's already quiescent.
267 * Return zero if it will report a QUIESCE event soon.
268 * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
269 * If interrupt is zero, just make sure it quiesces before going to user mode.
272 quiesce(struct task_struct *target, int interrupt)
276 target->utrace_flags |= UTRACE_ACTION_QUIESCE;
277 read_barrier_depends();
279 quiescent = (target->exit_state
280 || target->state & (TASK_TRACED | TASK_STOPPED));
283 spin_lock_irq(&target->sighand->siglock);
284 quiescent = (unlikely(target->exit_state)
285 || unlikely(target->state
286 & (TASK_TRACED | TASK_STOPPED)));
289 signal_wake_up(target, 0);
291 set_tsk_thread_flag(target, TIF_SIGPENDING);
292 kick_process(target);
295 spin_unlock_irq(&target->sighand->siglock);
302 static struct utrace_attached_engine *
303 matching_engine(struct utrace *utrace, int flags,
304 const struct utrace_engine_ops *ops, unsigned long data)
306 struct utrace_attached_engine *engine;
307 list_for_each_entry_rcu(engine, &utrace->engines, entry) {
308 if ((flags & UTRACE_ATTACH_MATCH_OPS)
309 && engine->ops != ops)
311 if ((flags & UTRACE_ATTACH_MATCH_DATA)
312 && engine->data != data)
314 if (flags & UTRACE_ATTACH_EXCLUSIVE)
315 engine = ERR_PTR(-EEXIST);
323 option to match existing on ops, ops+data, return it; nocreate:lookup only
325 struct utrace_attached_engine *
326 utrace_attach(struct task_struct *target, int flags,
327 const struct utrace_engine_ops *ops, unsigned long data)
329 struct utrace *utrace;
330 struct utrace_attached_engine *engine;
334 utrace = rcu_dereference(target->utrace);
336 if (utrace == NULL) {
339 if (!(flags & UTRACE_ATTACH_CREATE)) {
340 return ERR_PTR(-ENOENT);
343 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
344 if (unlikely(engine == NULL))
345 return ERR_PTR(-ENOMEM);
349 utrace = utrace_first_engine(target, engine);
350 if (IS_ERR(utrace)) {
351 kmem_cache_free(utrace_engine_cachep, engine);
352 return ERR_PTR(PTR_ERR(utrace));
354 if (unlikely(utrace == NULL)) /* Race condition. */
357 else if (unlikely(target->exit_state == EXIT_DEAD)) {
359 * The target has already been reaped.
362 return ERR_PTR(-ESRCH);
365 if (!(flags & UTRACE_ATTACH_CREATE)) {
366 engine = matching_engine(utrace, flags, ops, data);
372 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
373 if (unlikely(engine == NULL))
374 return ERR_PTR(-ENOMEM);
375 engine->flags = ops->report_reap ? UTRACE_EVENT(REAP) : 0;
378 utrace = rcu_dereference(target->utrace);
379 if (unlikely(utrace == NULL)) { /* Race with detach. */
385 if (flags & UTRACE_ATTACH_EXCLUSIVE) {
386 struct utrace_attached_engine *old;
387 old = matching_engine(utrace, flags, ops, data);
389 utrace_unlock(utrace);
391 kmem_cache_free(utrace_engine_cachep, engine);
392 return ERR_PTR(-EEXIST);
396 if (unlikely(rcu_dereference(target->utrace) != utrace)) {
398 * We lost a race with other CPUs doing a sequence
399 * of detach and attach before we got in.
401 utrace_unlock(utrace);
403 kmem_cache_free(utrace_engine_cachep, engine);
408 list_add_tail_rcu(&engine->entry, &utrace->engines);
414 utrace_unlock(utrace);
418 EXPORT_SYMBOL_GPL(utrace_attach);
421 * When an engine is detached, the target thread may still see it and make
422 * callbacks until it quiesces. We reset its event flags to just QUIESCE
423 * and install a special ops vector whose callback is dead_engine_delete.
424 * When the target thread quiesces, it can safely free the engine itself.
427 dead_engine_delete(struct utrace_attached_engine *engine,
428 struct task_struct *tsk)
430 return UTRACE_ACTION_DETACH;
433 static const struct utrace_engine_ops dead_engine_ops =
435 .report_quiesce = &dead_engine_delete
440 * We may have been the one keeping the target thread quiescent.
441 * Check if it should wake up now.
442 * Called with utrace locked, and unlocks it on return.
443 * If we were keeping it stopped, resume it.
444 * If we were keeping its zombie from reporting/self-reap, do it now.
447 wake_quiescent(unsigned long old_flags,
448 struct utrace *utrace, struct task_struct *target)
451 struct utrace_attached_engine *engine;
453 if (target->exit_state) {
455 * Update the set of events of interest from the union
456 * of the interests of the remaining tracing engines.
459 list_for_each_entry(engine, &utrace->engines, entry)
460 flags |= engine->flags | UTRACE_EVENT(REAP);
461 utrace = check_dead_utrace(target, utrace, flags);
466 * Update the set of events of interest from the union
467 * of the interests of the remaining tracing engines.
470 list_for_each_entry(engine, &utrace->engines, entry)
471 flags |= engine->flags | UTRACE_EVENT(REAP);
472 utrace = check_dead_utrace(target, utrace, flags);
474 if (flags & UTRACE_ACTION_QUIESCE)
477 read_lock(&tasklist_lock);
478 if (!target->exit_state) {
480 * The target is not dead and should not be in tracing stop
481 * any more. Wake it unless it's in job control stop.
483 spin_lock_irq(&target->sighand->siglock);
484 if (target->signal->flags & SIGNAL_STOP_STOPPED) {
485 int stop_count = target->signal->group_stop_count;
486 target->state = TASK_STOPPED;
487 spin_unlock_irq(&target->sighand->siglock);
490 * If tracing was preventing a CLD_STOPPED report
491 * and is no longer, do that report right now.
495 /*&& (events &~ interest) & UTRACE_INHIBIT_CLDSTOP*/
497 do_notify_parent_cldstop(target, CLD_STOPPED);
503 recalc_sigpending_tsk(target);
504 wake_up_state(target, TASK_STOPPED | TASK_TRACED);
505 spin_unlock_irq(&target->sighand->siglock);
508 read_unlock(&tasklist_lock);
512 utrace_detach(struct task_struct *target,
513 struct utrace_attached_engine *engine)
515 struct utrace *utrace;
519 utrace = rcu_dereference(target->utrace);
521 if (unlikely(target->exit_state == EXIT_DEAD)) {
523 * Called after utrace_release_task might have started.
524 * A call to this engine's report_reap callback might
525 * already be in progress or engine might even have been
534 flags = engine->flags;
535 engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
536 rcu_assign_pointer(engine->ops, &dead_engine_ops);
538 if (quiesce(target, 1)) {
539 remove_engine(engine, target, utrace);
540 wake_quiescent(flags, utrace, target);
543 utrace_unlock(utrace);
545 EXPORT_SYMBOL_GPL(utrace_detach);
549 * Called with utrace->lock held.
550 * Notify and clean up all engines, then free utrace.
553 utrace_reap(struct task_struct *target, struct utrace *utrace)
555 struct utrace_attached_engine *engine, *next;
556 const struct utrace_engine_ops *ops;
559 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
560 list_del_rcu(&engine->entry);
563 * Now nothing else refers to this engine.
565 if (engine->flags & UTRACE_EVENT(REAP)) {
566 ops = rcu_dereference(engine->ops);
567 if (ops != &dead_engine_ops) {
568 utrace_unlock(utrace);
569 (*ops->report_reap)(engine, target);
570 call_rcu(&engine->rhead, utrace_engine_free);
575 call_rcu(&engine->rhead, utrace_engine_free);
578 rcu_utrace_free(utrace);
582 * Called by release_task. After this, target->utrace must be cleared.
585 utrace_release_task(struct task_struct *target)
587 struct utrace *utrace;
590 utrace = target->utrace;
591 rcu_assign_pointer(target->utrace, NULL);
594 if (unlikely(utrace == NULL))
599 if (!utrace->u.exit.notified
600 && (target->utrace_flags & (UTRACE_EVENT(DEATH)
601 | UTRACE_EVENT(QUIESCE)))) {
603 * The target will do some final callbacks but hasn't
604 * finished them yet. We know because it clears these
605 * event bits after it's done. Instead of cleaning up here
606 * and requiring utrace_report_death to cope with it, we
607 * delay the REAP report and the teardown until after the
608 * target finishes its death reports.
610 utrace->u.exit.reap = 1;
611 utrace_unlock(utrace);
614 utrace_reap(target, utrace); /* Unlocks and frees. */
619 utrace_set_flags(struct task_struct *target,
620 struct utrace_attached_engine *engine,
623 struct utrace *utrace;
625 unsigned long old_flags, old_utrace_flags;
627 #ifdef ARCH_HAS_SINGLE_STEP
628 if (! ARCH_HAS_SINGLE_STEP)
630 WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
631 #ifdef ARCH_HAS_BLOCK_STEP
632 if (! ARCH_HAS_BLOCK_STEP)
634 WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
637 utrace = rcu_dereference(target->utrace);
639 if (unlikely(target->exit_state == EXIT_DEAD)) {
650 old_utrace_flags = target->utrace_flags;
651 old_flags = engine->flags;
652 engine->flags = flags;
653 target->utrace_flags |= flags;
655 if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
656 if (flags & UTRACE_ACTION_QUIESCE) {
657 report = (quiesce(target, 1)
658 && (flags & UTRACE_EVENT(QUIESCE)));
659 utrace_unlock(utrace);
662 wake_quiescent(old_flags, utrace, target);
666 * If we're asking for single-stepping or syscall tracing,
667 * we need to pass through utrace_quiescent before resuming
668 * in user mode to get those effects, even if the target is
669 * not going to be quiescent right now.
671 if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
672 && ((flags &~ old_utrace_flags)
673 & (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
674 | UTRACE_EVENT_SYSCALL)))
676 utrace_unlock(utrace);
679 if (report) /* Already quiescent, won't report itself. */
680 (*engine->ops->report_quiesce)(engine, target);
682 EXPORT_SYMBOL_GPL(utrace_set_flags);
685 * While running an engine callback, no locks are held.
686 * If a callback updates its engine's action state, then
687 * we need to take the utrace lock to install the flags update.
690 update_action(struct task_struct *tsk, struct utrace *utrace,
691 struct utrace_attached_engine *engine,
694 if (ret & UTRACE_ACTION_DETACH)
695 rcu_assign_pointer(engine->ops, &dead_engine_ops);
696 else if ((ret & UTRACE_ACTION_NEWSTATE)
697 && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
698 #ifdef ARCH_HAS_SINGLE_STEP
699 if (! ARCH_HAS_SINGLE_STEP)
701 WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
702 #ifdef ARCH_HAS_BLOCK_STEP
703 if (! ARCH_HAS_BLOCK_STEP)
705 WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
708 * If we're changing something other than just QUIESCE,
709 * make sure we pass through utrace_quiescent before
710 * resuming even if we aren't going to stay quiescent.
711 * That's where we get the correct union of all engines'
712 * flags after they've finished changing, and apply changes.
714 if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
715 & ~UTRACE_ACTION_QUIESCE)))
716 tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
717 engine->flags &= ~UTRACE_ACTION_STATE_MASK;
718 engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
719 tsk->utrace_flags |= engine->flags;
720 utrace_unlock(utrace);
723 ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
727 #define REPORT(callback, ...) do { \
728 u32 ret = (*rcu_dereference(engine->ops)->callback) \
729 (engine, tsk, ##__VA_ARGS__); \
730 action = update_action(tsk, utrace, engine, ret); \
735 * Called with utrace->lock held, returns with it released.
738 remove_detached(struct task_struct *tsk, struct utrace *utrace,
739 struct utrace **utracep, u32 action)
741 struct utrace_attached_engine *engine, *next;
742 unsigned long flags = 0;
744 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
745 if (engine->ops == &dead_engine_ops)
746 remove_engine(engine, tsk, utrace);
748 flags |= engine->flags | UTRACE_EVENT(REAP);
750 utrace = check_dead_utrace(tsk, utrace, flags);
754 flags &= UTRACE_ACTION_STATE_MASK;
755 return flags | (action & UTRACE_ACTION_OP_MASK);
759 * Called after an event report loop. Remove any engines marked for detach.
762 check_detach(struct task_struct *tsk, u32 action)
764 if (action & UTRACE_ACTION_DETACH) {
765 utrace_lock(tsk->utrace);
766 action = remove_detached(tsk, tsk->utrace, NULL, action);
772 check_quiescent(struct task_struct *tsk, u32 action)
774 if (action & UTRACE_ACTION_STATE_MASK)
775 return utrace_quiescent(tsk, NULL);
780 * Called iff UTRACE_EVENT(CLONE) flag is set.
781 * This notification call blocks the wake_up_new_task call on the child.
782 * So we must not quiesce here. tracehook_report_clone_complete will do
783 * a quiescence check momentarily.
786 utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
788 struct task_struct *tsk = current;
789 struct utrace *utrace = tsk->utrace;
790 struct list_head *pos, *next;
791 struct utrace_attached_engine *engine;
792 unsigned long action;
794 utrace->u.live.cloning = child;
796 /* XXX must change for sharing */
797 action = UTRACE_ACTION_RESUME;
798 list_for_each_safe_rcu(pos, next, &utrace->engines) {
799 engine = list_entry(pos, struct utrace_attached_engine, entry);
800 if (engine->flags & UTRACE_EVENT(CLONE))
801 REPORT(report_clone, clone_flags, child);
802 if (action & UTRACE_ACTION_HIDE)
806 utrace->u.live.cloning = NULL;
808 check_detach(tsk, action);
812 report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
814 struct list_head *pos, *next;
815 struct utrace_attached_engine *engine;
817 list_for_each_safe_rcu(pos, next, &utrace->engines) {
818 engine = list_entry(pos, struct utrace_attached_engine, entry);
819 if (engine->flags & UTRACE_EVENT(QUIESCE))
820 REPORT(report_quiesce);
821 action |= engine->flags & UTRACE_ACTION_STATE_MASK;
824 return check_detach(tsk, action);
828 * Called iff UTRACE_EVENT(JCTL) flag is set.
831 utrace_report_jctl(int what)
833 struct task_struct *tsk = current;
834 struct utrace *utrace = tsk->utrace;
835 struct list_head *pos, *next;
836 struct utrace_attached_engine *engine;
837 unsigned long action;
839 /* XXX must change for sharing */
840 action = UTRACE_ACTION_RESUME;
841 list_for_each_safe_rcu(pos, next, &utrace->engines) {
842 engine = list_entry(pos, struct utrace_attached_engine, entry);
843 if (engine->flags & UTRACE_EVENT(JCTL))
844 REPORT(report_jctl, what);
845 if (action & UTRACE_ACTION_HIDE)
850 * We are becoming quiescent, so report it now.
851 * We don't block in utrace_quiescent because we are stopping anyway.
852 * We know that upon resuming we'll go through tracehook_induce_signal,
853 * which will keep us quiescent or set us up to resume with tracing.
855 action = report_quiescent(tsk, utrace, action);
857 if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
859 * The event report hooks could have blocked, though
860 * it should have been briefly. Make sure we're in
861 * TASK_STOPPED state again to block properly, unless
862 * we've just come back out of job control stop.
864 spin_lock_irq(&tsk->sighand->siglock);
865 if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
866 set_current_state(TASK_STOPPED);
867 spin_unlock_irq(&tsk->sighand->siglock);
870 return action & UTRACE_JCTL_NOSIGCHLD;
875 * Return nonzero if there is a SIGKILL that should be waking us up.
876 * Called with the siglock held.
879 sigkill_pending(struct task_struct *tsk)
881 return ((sigismember(&tsk->pending.signal, SIGKILL)
882 || sigismember(&tsk->signal->shared_pending.signal, SIGKILL))
883 && !unlikely(sigismember(&tsk->blocked, SIGKILL)));
887 * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
888 * Also called after other event reports.
889 * It is a good time to block.
890 * Returns nonzero if we woke up prematurely due to SIGKILL.
892 * The signal pointer is nonzero when called from utrace_get_signal,
893 * where a pending forced signal can be processed right away. Otherwise,
894 * we keep UTRACE_ACTION_QUIESCE set after resuming so that utrace_get_signal
895 * will be entered before user mode.
898 utrace_quiescent(struct task_struct *tsk, struct utrace_signal *signal)
900 struct utrace *utrace = tsk->utrace;
901 unsigned long action;
904 /* XXX must change for sharing */
906 action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
909 * If some engines want us quiescent, we block here.
911 if (action & UTRACE_ACTION_QUIESCE) {
914 if (signal != NULL) {
915 BUG_ON(utrace->u.live.signal != NULL);
916 utrace->u.live.signal = signal;
919 spin_lock_irq(&tsk->sighand->siglock);
921 * If wake_quiescent is trying to wake us up now, it will
922 * have cleared the QUIESCE flag before trying to take the
923 * siglock. Now we have the siglock, so either it has
924 * already cleared the flag, or it will wake us up after we
925 * release the siglock it's waiting for.
926 * Never stop when there is a SIGKILL bringing us down.
928 killed = sigkill_pending(tsk);
929 if (!killed && (tsk->utrace_flags & UTRACE_ACTION_QUIESCE)) {
930 set_current_state(TASK_TRACED);
932 * If there is a group stop in progress,
933 * we must participate in the bookkeeping.
935 if (tsk->signal->group_stop_count > 0)
936 --tsk->signal->group_stop_count;
937 spin_unlock_irq(&tsk->sighand->siglock);
941 spin_unlock_irq(&tsk->sighand->siglock);
943 if (signal != NULL) {
945 * We know the struct stays in place when its
946 * u.live.signal is set, see check_dead_utrace.
947 * This makes it safe to clear its pointer here.
949 BUG_ON(tsk->utrace != utrace);
950 BUG_ON(utrace->u.live.signal != signal);
951 utrace->u.live.signal = NULL;
954 if (killed) /* Game over, man! */
958 * We've woken up. One engine could be waking us up while
959 * another has asked us to quiesce. So check afresh. We
960 * could have been detached while quiescent. Now we are no
961 * longer quiescent, so don't need to do any RCU locking.
962 * But we do need to check our utrace pointer anew.
964 utrace = tsk->utrace;
965 if (tsk->utrace_flags
966 & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
969 else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
971 * Our flags are out of date.
972 * Update the set of events of interest from the union
973 * of the interests of the remaining tracing engines.
974 * This may notice that there are no engines left
975 * and clean up the struct utrace. It's left in place
976 * and the QUIESCE flag set as long as utrace_get_signal
977 * still needs to process a pending forced signal.
979 struct utrace_attached_engine *engine;
980 unsigned long flags = 0;
981 utrace = rcu_dereference(tsk->utrace);
983 list_for_each_entry(engine, &utrace->engines, entry)
984 flags |= engine->flags | UTRACE_EVENT(REAP);
986 utrace_clear_tsk(tsk, utrace);
987 utrace = check_dead_utrace(tsk, utrace, flags);
991 * We're resuming. Update the machine layer tracing state and then go.
993 #ifdef ARCH_HAS_SINGLE_STEP
994 if (action & UTRACE_ACTION_SINGLESTEP)
995 tracehook_enable_single_step(tsk);
997 tracehook_disable_single_step(tsk);
999 #ifdef ARCH_HAS_BLOCK_STEP
1000 if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
1001 == UTRACE_ACTION_BLOCKSTEP)
1002 tracehook_enable_block_step(tsk);
1004 tracehook_disable_block_step(tsk);
1006 if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
1007 tracehook_enable_syscall_trace(tsk);
1009 tracehook_disable_syscall_trace(tsk);
1016 * Called iff UTRACE_EVENT(EXIT) flag is set.
1019 utrace_report_exit(long *exit_code)
1021 struct task_struct *tsk = current;
1022 struct utrace *utrace = tsk->utrace;
1023 struct list_head *pos, *next;
1024 struct utrace_attached_engine *engine;
1025 unsigned long action;
1026 long orig_code = *exit_code;
1028 /* XXX must change for sharing */
1029 action = UTRACE_ACTION_RESUME;
1030 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1031 engine = list_entry(pos, struct utrace_attached_engine, entry);
1032 if (engine->flags & UTRACE_EVENT(EXIT))
1033 REPORT(report_exit, orig_code, exit_code);
1035 action = check_detach(tsk, action);
1036 check_quiescent(tsk, action);
1040 * Called iff UTRACE_EVENT(DEATH) or UTRACE_ACTION_QUIESCE flag is set.
1042 * It is always possible that we are racing with utrace_release_task here,
1043 * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
1044 * where the old leader will get released regardless of NOREAP. For this
1045 * reason, utrace_release_task checks for the event bits that get us here,
1046 * and delays its cleanup for us to do.
1049 utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
1051 struct list_head *pos, *next;
1052 struct utrace_attached_engine *engine;
1053 u32 action, oaction;
1055 BUG_ON(!tsk->exit_state);
1057 oaction = tsk->utrace_flags;
1059 /* XXX must change for sharing */
1060 action = UTRACE_ACTION_RESUME;
1061 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1062 engine = list_entry(pos, struct utrace_attached_engine, entry);
1063 if (engine->flags & UTRACE_EVENT(DEATH))
1064 REPORT(report_death);
1065 if (engine->flags & UTRACE_EVENT(QUIESCE))
1066 REPORT(report_quiesce);
1070 * Unconditionally lock and recompute the flags.
1071 * This may notice that there are no engines left and
1072 * free the utrace struct.
1074 utrace_lock(utrace);
1075 if (utrace->u.exit.reap) {
1077 * utrace_release_task was already called in parallel.
1078 * We must complete its work now.
1081 utrace_reap(tsk, utrace);
1084 action = remove_detached(tsk, utrace, &utrace, action);
1086 if (utrace != NULL) {
1087 utrace_lock(utrace);
1088 if (utrace->u.exit.reap)
1092 * Clear event bits we can't see any more. This
1093 * tells utrace_release_task we have already
1094 * finished, if it comes along later.
1096 tsk->utrace_flags &= (UTRACE_EVENT(REAP)
1097 | UTRACE_ACTION_NOREAP);
1099 utrace_unlock(utrace);
1105 * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
1108 utrace_report_vfork_done(pid_t child_pid)
1110 struct task_struct *tsk = current;
1111 struct utrace *utrace = tsk->utrace;
1112 struct list_head *pos, *next;
1113 struct utrace_attached_engine *engine;
1114 unsigned long action;
1116 /* XXX must change for sharing */
1117 action = UTRACE_ACTION_RESUME;
1118 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1119 engine = list_entry(pos, struct utrace_attached_engine, entry);
1120 if (engine->flags & UTRACE_EVENT(VFORK_DONE))
1121 REPORT(report_vfork_done, child_pid);
1122 if (action & UTRACE_ACTION_HIDE)
1125 action = check_detach(tsk, action);
1126 check_quiescent(tsk, action);
1130 * Called iff UTRACE_EVENT(EXEC) flag is set.
1133 utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
1135 struct task_struct *tsk = current;
1136 struct utrace *utrace = tsk->utrace;
1137 struct list_head *pos, *next;
1138 struct utrace_attached_engine *engine;
1139 unsigned long action;
1141 /* XXX must change for sharing */
1142 action = UTRACE_ACTION_RESUME;
1143 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1144 engine = list_entry(pos, struct utrace_attached_engine, entry);
1145 if (engine->flags & UTRACE_EVENT(EXEC))
1146 REPORT(report_exec, bprm, regs);
1147 if (action & UTRACE_ACTION_HIDE)
1150 action = check_detach(tsk, action);
1151 check_quiescent(tsk, action);
1155 * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
1158 utrace_report_syscall(struct pt_regs *regs, int is_exit)
1160 struct task_struct *tsk = current;
1161 struct utrace *utrace = tsk->utrace;
1162 struct list_head *pos, *next;
1163 struct utrace_attached_engine *engine;
1164 unsigned long action, ev;
1167 XXX pass syscall # to engine hook directly, let it return inhibit-action
1169 long syscall = tracehook_syscall_number(regs, is_exit);
1172 ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
1174 /* XXX must change for sharing */
1175 action = UTRACE_ACTION_RESUME;
1176 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1177 engine = list_entry(pos, struct utrace_attached_engine, entry);
1178 if (engine->flags & ev) {
1180 REPORT(report_syscall_exit, regs);
1182 REPORT(report_syscall_entry, regs);
1184 if (action & UTRACE_ACTION_HIDE)
1187 action = check_detach(tsk, action);
1188 if (unlikely(check_quiescent(tsk, action)) && !is_exit)
1190 * We are continuing despite QUIESCE because of a SIGKILL.
1191 * Don't let the system call actually proceed.
1193 tracehook_abort_syscall(regs);
1198 * This is pointed to by the utrace struct, but it's really a private
1199 * structure between utrace_get_signal and utrace_inject_signal.
1201 struct utrace_signal
1203 siginfo_t *const info;
1204 struct k_sigaction *return_ka;
1209 // XXX copied from signal.c
1211 #define M_SIGEMT M(SIGEMT)
1216 #if SIGRTMIN > BITS_PER_LONG
1217 #define M(sig) (1ULL << ((sig)-1))
1219 #define M(sig) (1UL << ((sig)-1))
1221 #define T(sig, mask) (M(sig) & (mask))
1223 #define SIG_KERNEL_ONLY_MASK (\
1224 M(SIGKILL) | M(SIGSTOP) )
1226 #define SIG_KERNEL_STOP_MASK (\
1227 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
1229 #define SIG_KERNEL_COREDUMP_MASK (\
1230 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
1231 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
1232 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
1234 #define SIG_KERNEL_IGNORE_MASK (\
1235 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
1237 #define sig_kernel_only(sig) \
1238 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
1239 #define sig_kernel_coredump(sig) \
1240 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
1241 #define sig_kernel_ignore(sig) \
1242 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
1243 #define sig_kernel_stop(sig) \
1244 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
1248 * Call each interested tracing engine's report_signal callback.
1251 report_signal(struct task_struct *tsk, struct pt_regs *regs,
1252 struct utrace *utrace, u32 action,
1253 unsigned long flags1, unsigned long flags2, siginfo_t *info,
1254 const struct k_sigaction *ka, struct k_sigaction *return_ka)
1256 struct list_head *pos, *next;
1257 struct utrace_attached_engine *engine;
1259 /* XXX must change for sharing */
1260 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1261 engine = list_entry(pos, struct utrace_attached_engine, entry);
1262 if ((engine->flags & flags1) && (engine->flags & flags2)) {
1263 u32 disp = action & UTRACE_ACTION_OP_MASK;
1264 action &= ~UTRACE_ACTION_OP_MASK;
1265 REPORT(report_signal, regs, disp, info, ka, return_ka);
1266 if ((action & UTRACE_ACTION_OP_MASK) == 0)
1268 if (action & UTRACE_ACTION_HIDE)
1277 utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
1280 action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
1281 UTRACE_EVENT_SIGNAL_ALL,
1282 UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
1284 action = check_detach(tsk, action);
1285 check_quiescent(tsk, action);
1290 * This is the hook from the signals code, called with the siglock held.
1291 * Here is the ideal place to quiesce. We also dequeue and intercept signals.
1294 utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
1295 siginfo_t *info, struct k_sigaction *return_ka)
1297 struct utrace *utrace = tsk->utrace;
1298 struct utrace_signal signal = { info, return_ka, 0 };
1299 struct k_sigaction *ka;
1300 unsigned long action, event;
1303 * If a signal was injected previously, it could not use our
1304 * stack space directly. It had to allocate a data structure,
1305 * which we can now copy out of and free.
1307 * We don't have to lock access to u.live.signal because it's only
1308 * touched by utrace_inject_signal when we're quiescent.
1310 if (utrace->u.live.signal != NULL) {
1311 signal.signr = utrace->u.live.signal->signr;
1312 copy_siginfo(info, utrace->u.live.signal->info);
1313 if (utrace->u.live.signal->return_ka)
1314 *return_ka = *utrace->u.live.signal->return_ka;
1316 signal.return_ka = NULL;
1317 kfree(utrace->u.live.signal);
1318 utrace->u.live.signal = NULL;
1322 * If we should quiesce, now is the time.
1323 * First stash a pointer to the state on our stack,
1324 * so that utrace_inject_signal can tell us what to do.
1326 if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1327 int killed = sigkill_pending(tsk);
1329 spin_unlock_irq(&tsk->sighand->siglock);
1331 killed = utrace_quiescent(tsk, &signal);
1334 * Noone wants us quiescent any more, we can take
1335 * signals. Unless we have a forced signal to take,
1336 * back out to the signal code to resynchronize after
1337 * releasing the siglock.
1339 if (signal.signr == 0 && !killed)
1341 * This return value says to reacquire the
1342 * siglock and check again. This will check
1343 * for a pending group stop and process it
1344 * before coming back here.
1348 spin_lock_irq(&tsk->sighand->siglock);
1352 * The only reason we woke up now was because of a
1353 * SIGKILL. Don't do normal dequeuing in case it
1354 * might get a signal other than SIGKILL. That would
1355 * perturb the death state so it might differ from
1356 * what the debugger would have allowed to happen.
1357 * Instead, pluck out just the SIGKILL to be sure
1358 * we'll die immediately with nothing else different
1359 * from the quiescent state the debugger wanted us in.
1361 sigset_t sigkill_only;
1362 sigfillset(&sigkill_only);
1363 sigdelset(&sigkill_only, SIGKILL);
1364 killed = dequeue_signal(tsk, &sigkill_only, info);
1365 BUG_ON(killed != SIGKILL);
1371 * If a signal was injected, everything is in place now. Go do it.
1373 if (signal.signr != 0) {
1374 if (signal.return_ka == NULL) {
1375 ka = &tsk->sighand->action[signal.signr - 1];
1376 if (ka->sa.sa_flags & SA_ONESHOT)
1377 ka->sa.sa_handler = SIG_DFL;
1381 BUG_ON(signal.return_ka != return_ka);
1382 return signal.signr;
1386 * If noone is interested in intercepting signals, let the caller
1387 * just dequeue them normally.
1389 if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
1393 * Steal the next signal so we can let tracing engines examine it.
1394 * From the signal number and sigaction, determine what normal
1395 * delivery would do. If no engine perturbs it, we'll do that
1396 * by returning the signal number after setting *return_ka.
1398 signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
1399 if (signal.signr == 0)
1402 BUG_ON(signal.signr != info->si_signo);
1404 ka = &tsk->sighand->action[signal.signr - 1];
1408 * We are never allowed to interfere with SIGKILL,
1409 * just punt after filling in *return_ka for our caller.
1411 if (signal.signr == SIGKILL)
1412 return signal.signr;
1414 if (ka->sa.sa_handler == SIG_IGN) {
1415 event = UTRACE_EVENT(SIGNAL_IGN);
1416 action = UTRACE_SIGNAL_IGN;
1418 else if (ka->sa.sa_handler != SIG_DFL) {
1419 event = UTRACE_EVENT(SIGNAL);
1420 action = UTRACE_ACTION_RESUME;
1422 else if (sig_kernel_coredump(signal.signr)) {
1423 event = UTRACE_EVENT(SIGNAL_CORE);
1424 action = UTRACE_SIGNAL_CORE;
1426 else if (sig_kernel_ignore(signal.signr)) {
1427 event = UTRACE_EVENT(SIGNAL_IGN);
1428 action = UTRACE_SIGNAL_IGN;
1430 else if (sig_kernel_stop(signal.signr)) {
1431 event = UTRACE_EVENT(SIGNAL_STOP);
1432 action = (signal.signr == SIGSTOP
1433 ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
1436 event = UTRACE_EVENT(SIGNAL_TERM);
1437 action = UTRACE_SIGNAL_TERM;
1440 if (tsk->utrace_flags & event) {
1442 * We have some interested engines, so tell them about the
1443 * signal and let them change its disposition.
1446 spin_unlock_irq(&tsk->sighand->siglock);
1448 action = report_signal(tsk, regs, utrace, action, event, event,
1449 info, ka, return_ka);
1450 action &= UTRACE_ACTION_OP_MASK;
1452 if (action & UTRACE_SIGNAL_HOLD) {
1453 struct sigqueue *q = sigqueue_alloc();
1454 if (likely(q != NULL)) {
1456 copy_siginfo(&q->info, info);
1458 action &= ~UTRACE_SIGNAL_HOLD;
1459 spin_lock_irq(&tsk->sighand->siglock);
1460 sigaddset(&tsk->pending.signal, info->si_signo);
1461 if (likely(q != NULL))
1462 list_add(&q->list, &tsk->pending.list);
1465 spin_lock_irq(&tsk->sighand->siglock);
1467 recalc_sigpending_tsk(tsk);
1471 * We express the chosen action to the signals code in terms
1472 * of a representative signal whose default action does it.
1475 case UTRACE_SIGNAL_IGN:
1477 * We've eaten the signal. That's all we do.
1478 * Tell the caller to restart.
1480 spin_unlock_irq(&tsk->sighand->siglock);
1483 case UTRACE_ACTION_RESUME:
1484 case UTRACE_SIGNAL_DELIVER:
1486 * The handler will run. We do the SA_ONESHOT work here
1487 * since the normal path will only touch *return_ka now.
1489 if (return_ka->sa.sa_flags & SA_ONESHOT)
1490 ka->sa.sa_handler = SIG_DFL;
1493 case UTRACE_SIGNAL_TSTP:
1494 signal.signr = SIGTSTP;
1495 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1496 return_ka->sa.sa_handler = SIG_DFL;
1499 case UTRACE_SIGNAL_STOP:
1500 signal.signr = SIGSTOP;
1501 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1502 return_ka->sa.sa_handler = SIG_DFL;
1505 case UTRACE_SIGNAL_TERM:
1506 signal.signr = SIGTERM;
1507 return_ka->sa.sa_handler = SIG_DFL;
1510 case UTRACE_SIGNAL_CORE:
1511 signal.signr = SIGQUIT;
1512 return_ka->sa.sa_handler = SIG_DFL;
1519 return signal.signr;
1524 * Cause a specified signal delivery in the target thread,
1525 * which must be quiescent. The action has UTRACE_SIGNAL_* bits
1526 * as returned from a report_signal callback. If ka is non-null,
1527 * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
1528 * otherwise, the installed sigaction at the time of delivery is used.
1531 utrace_inject_signal(struct task_struct *target,
1532 struct utrace_attached_engine *engine,
1533 u32 action, siginfo_t *info,
1534 const struct k_sigaction *ka)
1536 struct utrace *utrace;
1537 struct utrace_signal *signal;
1540 if (info->si_signo == 0 || !valid_signal(info->si_signo))
1544 utrace = rcu_dereference(target->utrace);
1545 if (utrace == NULL) {
1549 utrace_lock(utrace);
1553 signal = utrace->u.live.signal;
1554 if (unlikely(target->exit_state))
1556 else if (signal == NULL) {
1557 ret = -ENOSYS; /* XXX */
1559 else if (signal->signr != 0)
1562 if (info != signal->info)
1563 copy_siginfo(signal->info, info);
1570 case UTRACE_SIGNAL_IGN:
1573 case UTRACE_ACTION_RESUME:
1574 case UTRACE_SIGNAL_DELIVER:
1576 * The handler will run. We do the SA_ONESHOT work
1577 * here since the normal path will not touch the
1578 * real sigaction when using an injected signal.
1581 signal->return_ka = NULL;
1582 else if (ka != signal->return_ka)
1583 *signal->return_ka = *ka;
1584 if (ka && ka->sa.sa_flags & SA_ONESHOT) {
1585 struct k_sigaction *a;
1586 a = &target->sighand->action[info->si_signo-1];
1587 spin_lock_irq(&target->sighand->siglock);
1588 a->sa.sa_handler = SIG_DFL;
1589 spin_unlock_irq(&target->sighand->siglock);
1591 signal->signr = info->si_signo;
1594 case UTRACE_SIGNAL_TSTP:
1595 signal->signr = SIGTSTP;
1596 spin_lock_irq(&target->sighand->siglock);
1597 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1598 spin_unlock_irq(&target->sighand->siglock);
1599 signal->return_ka->sa.sa_handler = SIG_DFL;
1602 case UTRACE_SIGNAL_STOP:
1603 signal->signr = SIGSTOP;
1604 spin_lock_irq(&target->sighand->siglock);
1605 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1606 spin_unlock_irq(&target->sighand->siglock);
1607 signal->return_ka->sa.sa_handler = SIG_DFL;
1610 case UTRACE_SIGNAL_TERM:
1611 signal->signr = SIGTERM;
1612 signal->return_ka->sa.sa_handler = SIG_DFL;
1615 case UTRACE_SIGNAL_CORE:
1616 signal->signr = SIGQUIT;
1617 signal->return_ka->sa.sa_handler = SIG_DFL;
1622 utrace_unlock(utrace);
1626 EXPORT_SYMBOL_GPL(utrace_inject_signal);
1629 const struct utrace_regset *
1630 utrace_regset(struct task_struct *target,
1631 struct utrace_attached_engine *engine,
1632 const struct utrace_regset_view *view, int which)
1634 if (unlikely((unsigned) which >= view->n))
1637 if (target != current)
1638 wait_task_inactive(target);
1640 return &view->regsets[which];
1642 EXPORT_SYMBOL_GPL(utrace_regset);
1646 * Return the task_struct for the task using ptrace on this one, or NULL.
1647 * Must be called with rcu_read_lock held to keep the returned struct alive.
1649 * At exec time, this may be called with task_lock(p) still held from when
1650 * tracehook_unsafe_exec was just called. In that case it must give
1651 * results consistent with those unsafe_exec results, i.e. non-NULL if
1652 * any LSM_UNSAFE_PTRACE_* bits were set.
1654 * The value is also used to display after "TracerPid:" in /proc/PID/status,
1655 * where it is called with only rcu_read_lock held.
1657 struct task_struct *
1658 utrace_tracer_task(struct task_struct *target)
1660 struct utrace *utrace;
1661 struct task_struct *tracer = NULL;
1663 utrace = rcu_dereference(target->utrace);
1664 if (utrace != NULL) {
1665 struct list_head *pos, *next;
1666 struct utrace_attached_engine *engine;
1667 const struct utrace_engine_ops *ops;
1668 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1669 engine = list_entry(pos, struct utrace_attached_engine,
1671 ops = rcu_dereference(engine->ops);
1672 if (ops->tracer_task) {
1673 tracer = (*ops->tracer_task)(engine, target);
1684 utrace_allow_access_process_vm(struct task_struct *target)
1686 struct utrace *utrace;
1690 utrace = rcu_dereference(target->utrace);
1691 if (utrace != NULL) {
1692 struct list_head *pos, *next;
1693 struct utrace_attached_engine *engine;
1694 const struct utrace_engine_ops *ops;
1695 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1696 engine = list_entry(pos, struct utrace_attached_engine,
1698 ops = rcu_dereference(engine->ops);
1699 if (ops->allow_access_process_vm) {
1700 ret = (*ops->allow_access_process_vm)(engine,
1714 * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
1715 * Called with task_lock held.
1718 utrace_unsafe_exec(struct task_struct *tsk)
1720 struct utrace *utrace = tsk->utrace;
1721 struct list_head *pos, *next;
1722 struct utrace_attached_engine *engine;
1723 const struct utrace_engine_ops *ops;
1726 /* XXX must change for sharing */
1727 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1728 engine = list_entry(pos, struct utrace_attached_engine, entry);
1729 ops = rcu_dereference(engine->ops);
1730 if (ops->unsafe_exec)
1731 unsafe |= (*ops->unsafe_exec)(engine, tsk);