1 #include <linux/utrace.h>
2 #include <linux/tracehook.h>
4 #include <linux/sched.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <asm/tracehook.h>
11 static kmem_cache_t *utrace_cachep;
12 static kmem_cache_t *utrace_engine_cachep;
18 kmem_cache_create("utrace_cache",
19 sizeof(struct utrace), 0,
20 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
21 utrace_engine_cachep =
22 kmem_cache_create("utrace_engine_cache",
23 sizeof(struct utrace_attached_engine), 0,
24 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
27 subsys_initcall(utrace_init);
31 * Make sure target->utrace is allocated, and return with it locked on
32 * success. This function mediates startup races. The creating parent
33 * task has priority, and other callers will delay here to let its call
34 * succeed and take the new utrace lock first.
36 static struct utrace *
37 utrace_first_engine(struct task_struct *target,
38 struct utrace_attached_engine *engine)
40 struct utrace *utrace, *ret;
43 * If this is a newborn thread and we are not the creator,
44 * we have to wait for it. The creator gets the first chance
45 * to attach. The PF_STARTING flag is cleared after its
46 * report_clone hook has had a chance to run.
48 if ((target->flags & PF_STARTING)
49 && (current->utrace == NULL
50 || current->utrace->u.live.cloning != target)) {
52 return (signal_pending(current)
53 ? ERR_PTR(-ERESTARTNOINTR) : NULL);
56 utrace = kmem_cache_alloc(utrace_cachep, SLAB_KERNEL);
57 if (unlikely(utrace == NULL))
58 return ERR_PTR(-ENOMEM);
60 utrace->u.live.cloning = NULL;
61 utrace->u.live.signal = NULL;
62 INIT_LIST_HEAD(&utrace->engines);
63 list_add(&engine->entry, &utrace->engines);
64 spin_lock_init(&utrace->lock);
69 if (likely(target->utrace == NULL)) {
70 rcu_assign_pointer(target->utrace, utrace);
72 * The task_lock protects us against another thread doing
73 * the same thing. We might still be racing against
74 * tracehook_release_task. It's called with ->exit_state
75 * set to EXIT_DEAD and then checks ->utrace with an
76 * smp_mb() in between. If EXIT_DEAD is set, then
77 * release_task might have checked ->utrace already and saw
78 * it NULL; we can't attach. If we see EXIT_DEAD not yet
79 * set after our barrier, then we know release_task will
80 * see our target->utrace pointer.
83 if (target->exit_state == EXIT_DEAD) {
85 * The target has already been through release_task.
87 target->utrace = NULL;
93 * If the thread is already dead when we attach, then its
94 * parent was notified already and we shouldn't repeat the
95 * notification later after a detach or NOREAP flag change.
97 if (target->exit_state)
98 utrace->u.exit.notified = 1;
102 * Another engine attached first, so there is a struct already.
103 * A null return says to restart looking for the existing one.
108 utrace_unlock(utrace);
109 kmem_cache_free(utrace_cachep, utrace);
116 utrace_free(struct rcu_head *rhead)
118 struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
119 kmem_cache_free(utrace_cachep, utrace);
123 rcu_utrace_free(struct utrace *utrace)
125 INIT_RCU_HEAD(&utrace->u.dead);
126 call_rcu(&utrace->u.dead, utrace_free);
130 utrace_engine_free(struct rcu_head *rhead)
132 struct utrace_attached_engine *engine =
133 container_of(rhead, struct utrace_attached_engine, rhead);
134 kmem_cache_free(utrace_engine_cachep, engine);
138 * Called with utrace locked and the target quiescent (maybe current).
139 * If this was the last engine, utrace is left locked and not freed,
140 * but is removed from the task.
143 remove_engine(struct utrace_attached_engine *engine,
144 struct task_struct *tsk, struct utrace *utrace)
146 list_del_rcu(&engine->entry);
147 if (list_empty(&utrace->engines)) {
149 if (likely(tsk->utrace != NULL)) {
150 rcu_assign_pointer(tsk->utrace, NULL);
151 tsk->utrace_flags = 0;
155 call_rcu(&engine->rhead, utrace_engine_free);
159 * This is pointed to by the utrace struct, but it's really a private
160 * structure between utrace_get_signal and utrace_inject_signal.
164 siginfo_t *const info;
165 struct k_sigaction *return_ka;
170 * Called with utrace locked, after remove_engine may have run.
171 * Passed the flags from all remaining engines, i.e. zero if none left.
172 * Install the flags in tsk->utrace_flags and return with utrace unlocked.
173 * If no engines are left, utrace is freed and we return NULL.
175 static struct utrace *
176 check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
180 tsk->utrace_flags = flags;
181 utrace_unlock(utrace);
185 if (utrace->u.live.signal && utrace->u.live.signal->signr != 0) {
186 utrace_unlock(utrace);
190 utrace_unlock(utrace);
191 rcu_utrace_free(utrace);
198 * Get the target thread to quiesce. Return nonzero if it's already quiescent.
199 * Return zero if it will report a QUIESCE event soon.
200 * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
201 * If interrupt is zero, just make sure it quiesces before going to user mode.
204 quiesce(struct task_struct *target, int interrupt)
208 target->utrace_flags |= UTRACE_ACTION_QUIESCE;
209 read_barrier_depends();
211 quiescent = (target->exit_state
212 || target->state & (TASK_TRACED | TASK_STOPPED));
215 spin_lock_irq(&target->sighand->siglock);
216 quiescent = (unlikely(target->exit_state)
217 || unlikely(target->state
218 & (TASK_TRACED | TASK_STOPPED)));
221 signal_wake_up(target, 0);
223 set_tsk_thread_flag(target, TIF_SIGPENDING);
224 kick_process(target);
227 spin_unlock_irq(&target->sighand->siglock);
234 static struct utrace_attached_engine *
235 matching_engine(struct utrace *utrace, int flags,
236 const struct utrace_engine_ops *ops, unsigned long data)
238 struct utrace_attached_engine *engine;
239 list_for_each_entry_rcu(engine, &utrace->engines, entry) {
240 if ((flags & UTRACE_ATTACH_MATCH_OPS)
241 && engine->ops != ops)
243 if ((flags & UTRACE_ATTACH_MATCH_DATA)
244 && engine->data != data)
246 if (flags & UTRACE_ATTACH_EXCLUSIVE)
247 engine = ERR_PTR(-EEXIST);
255 option to match existing on ops, ops+data, return it; nocreate:lookup only
257 struct utrace_attached_engine *
258 utrace_attach(struct task_struct *target, int flags,
259 const struct utrace_engine_ops *ops, unsigned long data)
261 struct utrace *utrace;
262 struct utrace_attached_engine *engine;
266 utrace = rcu_dereference(target->utrace);
268 if (utrace == NULL) {
271 if (!(flags & UTRACE_ATTACH_CREATE)) {
272 return ERR_PTR(-ENOENT);
275 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
276 if (unlikely(engine == NULL))
277 return ERR_PTR(-ENOMEM);
281 utrace = utrace_first_engine(target, engine);
282 if (IS_ERR(utrace)) {
283 kmem_cache_free(utrace_engine_cachep, engine);
284 return ERR_PTR(PTR_ERR(utrace));
286 if (unlikely(utrace == NULL)) /* Race condition. */
289 else if (unlikely(target->exit_state == EXIT_DEAD)) {
291 * The target has already been reaped.
294 return ERR_PTR(-ESRCH);
297 if (!(flags & UTRACE_ATTACH_CREATE)) {
298 engine = matching_engine(utrace, flags, ops, data);
303 engine = kmem_cache_alloc(utrace_engine_cachep, SLAB_KERNEL);
304 if (unlikely(engine == NULL))
305 return ERR_PTR(-ENOMEM);
306 engine->flags = ops->report_reap ? UTRACE_EVENT(REAP) : 0;
309 utrace = rcu_dereference(target->utrace);
310 if (unlikely(utrace == NULL)) { /* Race with detach. */
316 if (flags & UTRACE_ATTACH_EXCLUSIVE) {
317 struct utrace_attached_engine *old;
318 old = matching_engine(utrace, flags, ops, data);
320 utrace_unlock(utrace);
322 kmem_cache_free(utrace_engine_cachep, engine);
323 return ERR_PTR(-EEXIST);
327 if (unlikely(rcu_dereference(target->utrace) != utrace)) {
329 * We lost a race with other CPUs doing a sequence
330 * of detach and attach before we got in.
332 utrace_unlock(utrace);
334 kmem_cache_free(utrace_engine_cachep, engine);
337 list_add_tail_rcu(&engine->entry, &utrace->engines);
343 utrace_unlock(utrace);
347 EXPORT_SYMBOL_GPL(utrace_attach);
350 * When an engine is detached, the target thread may still see it and make
351 * callbacks until it quiesces. We reset its event flags to just QUIESCE
352 * and install a special ops vector whose callback is dead_engine_delete.
353 * When the target thread quiesces, it can safely free the engine itself.
356 dead_engine_delete(struct utrace_attached_engine *engine,
357 struct task_struct *tsk)
359 return UTRACE_ACTION_DETACH;
362 static const struct utrace_engine_ops dead_engine_ops =
364 .report_quiesce = &dead_engine_delete
369 * If tracing was preventing a SIGCHLD or self-reaping
370 * and is no longer, do that report or reaping right now.
373 check_noreap(struct task_struct *target, struct utrace *utrace,
374 u32 old_action, u32 action)
376 if ((action | ~old_action) & UTRACE_ACTION_NOREAP)
379 if (utrace && xchg(&utrace->u.exit.notified, 1))
382 if (target->exit_signal == -1)
383 release_task(target);
384 else if (thread_group_empty(target)) {
385 read_lock(&tasklist_lock);
386 do_notify_parent(target, target->exit_signal);
387 read_unlock(&tasklist_lock);
392 * We may have been the one keeping the target thread quiescent.
393 * Check if it should wake up now.
394 * Called with utrace locked, and unlocks it on return.
395 * If we were keeping it stopped, resume it.
396 * If we were keeping its zombie from reporting/self-reap, do it now.
399 wake_quiescent(unsigned long old_flags,
400 struct utrace *utrace, struct task_struct *target)
403 struct utrace_attached_engine *engine;
405 if (target->exit_state) {
407 * Update the set of events of interest from the union
408 * of the interests of the remaining tracing engines.
411 list_for_each_entry(engine, &utrace->engines, entry)
412 flags |= engine->flags | UTRACE_EVENT(REAP);
413 utrace = check_dead_utrace(target, utrace, flags);
415 check_noreap(target, utrace, old_flags, flags);
420 * Update the set of events of interest from the union
421 * of the interests of the remaining tracing engines.
424 list_for_each_entry(engine, &utrace->engines, entry)
425 flags |= engine->flags | UTRACE_EVENT(REAP);
426 utrace = check_dead_utrace(target, utrace, flags);
428 if (flags & UTRACE_ACTION_QUIESCE)
431 read_lock(&tasklist_lock);
432 if (!target->exit_state) {
434 * The target is not dead and should not be in tracing stop
435 * any more. Wake it unless it's in job control stop.
437 spin_lock_irq(&target->sighand->siglock);
438 if (target->signal->flags & SIGNAL_STOP_STOPPED) {
439 int stop_count = target->signal->group_stop_count;
440 target->state = TASK_STOPPED;
441 spin_unlock_irq(&target->sighand->siglock);
444 * If tracing was preventing a CLD_STOPPED report
445 * and is no longer, do that report right now.
449 /*&& (events &~ interest) & UTRACE_INHIBIT_CLDSTOP*/
451 do_notify_parent_cldstop(target, CLD_STOPPED);
457 recalc_sigpending_tsk(target);
458 wake_up_state(target, TASK_STOPPED | TASK_TRACED);
459 spin_unlock_irq(&target->sighand->siglock);
462 read_unlock(&tasklist_lock);
466 utrace_detach(struct task_struct *target,
467 struct utrace_attached_engine *engine)
469 struct utrace *utrace;
473 utrace = rcu_dereference(target->utrace);
475 if (unlikely(target->exit_state == EXIT_DEAD)) {
477 * Called after utrace_release_task might have started.
478 * A call to this engine's report_reap callback might
479 * already be in progress or engine might even have been
488 flags = engine->flags;
489 engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
490 rcu_assign_pointer(engine->ops, &dead_engine_ops);
492 if (quiesce(target, 1)) {
493 remove_engine(engine, target, utrace);
494 wake_quiescent(flags, utrace, target);
497 utrace_unlock(utrace);
499 EXPORT_SYMBOL_GPL(utrace_detach);
503 * Called with utrace->lock held.
504 * Notify and clean up all engines, then free utrace.
507 utrace_reap(struct task_struct *target, struct utrace *utrace)
509 struct utrace_attached_engine *engine, *next;
510 const struct utrace_engine_ops *ops;
513 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
514 list_del_rcu(&engine->entry);
517 * Now nothing else refers to this engine.
519 if (engine->flags & UTRACE_EVENT(REAP)) {
520 ops = rcu_dereference(engine->ops);
521 if (ops != &dead_engine_ops) {
522 utrace_unlock(utrace);
523 (*ops->report_reap)(engine, target);
524 call_rcu(&engine->rhead, utrace_engine_free);
529 call_rcu(&engine->rhead, utrace_engine_free);
531 utrace_unlock(utrace);
533 rcu_utrace_free(utrace);
537 * Called by release_task. After this, target->utrace must be cleared.
540 utrace_release_task(struct task_struct *target)
542 struct utrace *utrace;
545 utrace = target->utrace;
546 rcu_assign_pointer(target->utrace, NULL);
549 if (unlikely(utrace == NULL))
554 if (!utrace->u.exit.notified
555 && (target->utrace_flags & (UTRACE_EVENT(DEATH)
556 | UTRACE_EVENT(QUIESCE)))) {
558 * The target will do some final callbacks but hasn't
559 * finished them yet. We know because it clears these
560 * event bits after it's done. Instead of cleaning up here
561 * and requiring utrace_report_death to cope with it, we
562 * delay the REAP report and the teardown until after the
563 * target finishes its death reports.
565 utrace->u.exit.reap = 1;
566 utrace_unlock(utrace);
569 utrace_reap(target, utrace); /* Unlocks and frees. */
574 utrace_set_flags(struct task_struct *target,
575 struct utrace_attached_engine *engine,
578 struct utrace *utrace;
580 unsigned long old_flags, old_utrace_flags;
582 #ifdef ARCH_HAS_SINGLE_STEP
583 if (! ARCH_HAS_SINGLE_STEP)
585 WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
586 #ifdef ARCH_HAS_BLOCK_STEP
587 if (! ARCH_HAS_BLOCK_STEP)
589 WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
592 utrace = rcu_dereference(target->utrace);
594 if (unlikely(target->exit_state == EXIT_DEAD)) {
605 old_utrace_flags = target->utrace_flags;
606 old_flags = engine->flags;
607 engine->flags = flags;
608 target->utrace_flags |= flags;
610 if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
611 if (flags & UTRACE_ACTION_QUIESCE) {
612 report = (quiesce(target, 1)
613 && (flags & UTRACE_EVENT(QUIESCE)));
614 utrace_unlock(utrace);
617 wake_quiescent(old_flags, utrace, target);
621 * If we're asking for single-stepping or syscall tracing,
622 * we need to pass through utrace_quiescent before resuming
623 * in user mode to get those effects, even if the target is
624 * not going to be quiescent right now.
626 if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
627 && ((flags &~ old_utrace_flags)
628 & (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
629 | UTRACE_EVENT_SYSCALL)))
631 utrace_unlock(utrace);
634 if (report) /* Already quiescent, won't report itself. */
635 (*engine->ops->report_quiesce)(engine, target);
637 EXPORT_SYMBOL_GPL(utrace_set_flags);
640 * While running an engine callback, no locks are held.
641 * If a callback updates its engine's action state, then
642 * we need to take the utrace lock to install the flags update.
645 update_action(struct task_struct *tsk, struct utrace *utrace,
646 struct utrace_attached_engine *engine,
649 if (ret & UTRACE_ACTION_DETACH)
650 rcu_assign_pointer(engine->ops, &dead_engine_ops);
651 else if ((ret & UTRACE_ACTION_NEWSTATE)
652 && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
653 #ifdef ARCH_HAS_SINGLE_STEP
654 if (! ARCH_HAS_SINGLE_STEP)
656 WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
657 #ifdef ARCH_HAS_BLOCK_STEP
658 if (! ARCH_HAS_BLOCK_STEP)
660 WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
663 * If we're changing something other than just QUIESCE,
664 * make sure we pass through utrace_quiescent before
665 * resuming even if we aren't going to stay quiescent.
666 * That's where we get the correct union of all engines'
667 * flags after they've finished changing, and apply changes.
669 if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
670 & ~UTRACE_ACTION_QUIESCE)))
671 tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
672 engine->flags &= ~UTRACE_ACTION_STATE_MASK;
673 engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
674 tsk->utrace_flags |= engine->flags;
675 utrace_unlock(utrace);
678 ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
682 #define REPORT(callback, ...) do { \
683 u32 ret = (*rcu_dereference(engine->ops)->callback) \
684 (engine, tsk, ##__VA_ARGS__); \
685 action = update_action(tsk, utrace, engine, ret); \
690 * Called with utrace->lock held, returns with it released.
693 remove_detached(struct task_struct *tsk, struct utrace *utrace,
694 struct utrace **utracep, u32 action)
696 struct utrace_attached_engine *engine, *next;
700 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
701 if (engine->ops == &dead_engine_ops)
702 remove_engine(engine, tsk, utrace);
704 flags |= engine->flags | UTRACE_EVENT(REAP);
706 utrace = check_dead_utrace(tsk, utrace, flags);
710 flags &= UTRACE_ACTION_STATE_MASK;
711 return flags | (action & UTRACE_ACTION_OP_MASK);
715 * Called after an event report loop. Remove any engines marked for detach.
718 check_detach(struct task_struct *tsk, u32 action)
720 if (action & UTRACE_ACTION_DETACH) {
721 utrace_lock(tsk->utrace);
722 action = remove_detached(tsk, tsk->utrace, NULL, action);
728 check_quiescent(struct task_struct *tsk, u32 action)
730 if (action & UTRACE_ACTION_STATE_MASK)
731 utrace_quiescent(tsk);
735 * Called iff UTRACE_EVENT(CLONE) flag is set.
736 * This notification call blocks the wake_up_new_task call on the child.
737 * So we must not quiesce here. tracehook_report_clone_complete will do
738 * a quiescence check momentarily.
741 utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
743 struct task_struct *tsk = current;
744 struct utrace *utrace = tsk->utrace;
745 struct list_head *pos, *next;
746 struct utrace_attached_engine *engine;
747 unsigned long action;
749 utrace->u.live.cloning = child;
751 /* XXX must change for sharing */
752 action = UTRACE_ACTION_RESUME;
753 list_for_each_safe_rcu(pos, next, &utrace->engines) {
754 engine = list_entry(pos, struct utrace_attached_engine, entry);
755 if (engine->flags & UTRACE_EVENT(CLONE))
756 REPORT(report_clone, clone_flags, child);
757 if (action & UTRACE_ACTION_HIDE)
761 utrace->u.live.cloning = NULL;
763 check_detach(tsk, action);
767 report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
769 struct list_head *pos, *next;
770 struct utrace_attached_engine *engine;
772 list_for_each_safe_rcu(pos, next, &utrace->engines) {
773 engine = list_entry(pos, struct utrace_attached_engine, entry);
774 if (engine->flags & UTRACE_EVENT(QUIESCE))
775 REPORT(report_quiesce);
776 action |= engine->flags & UTRACE_ACTION_STATE_MASK;
779 return check_detach(tsk, action);
783 * Called iff UTRACE_EVENT(JCTL) flag is set.
786 utrace_report_jctl(int what)
788 struct task_struct *tsk = current;
789 struct utrace *utrace = tsk->utrace;
790 struct list_head *pos, *next;
791 struct utrace_attached_engine *engine;
792 unsigned long action;
794 /* XXX must change for sharing */
795 action = UTRACE_ACTION_RESUME;
796 list_for_each_safe_rcu(pos, next, &utrace->engines) {
797 engine = list_entry(pos, struct utrace_attached_engine, entry);
798 if (engine->flags & UTRACE_EVENT(JCTL))
799 REPORT(report_jctl, what);
800 if (action & UTRACE_ACTION_HIDE)
805 * We are becoming quiescent, so report it now.
806 * We don't block in utrace_quiescent because we are stopping anyway.
807 * We know that upon resuming we'll go through tracehook_induce_signal,
808 * which will keep us quiescent or set us up to resume with tracing.
810 action = report_quiescent(tsk, utrace, action);
812 if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
814 * The event report hooks could have blocked, though
815 * it should have been briefly. Make sure we're in
816 * TASK_STOPPED state again to block properly, unless
817 * we've just come back out of job control stop.
819 spin_lock_irq(&tsk->sighand->siglock);
820 if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
821 set_current_state(TASK_STOPPED);
822 spin_unlock_irq(&tsk->sighand->siglock);
825 return action & UTRACE_JCTL_NOSIGCHLD;
830 * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
831 * Also called after other event reports.
832 * It is a good time to block.
835 utrace_quiescent(struct task_struct *tsk)
837 struct utrace *utrace = tsk->utrace;
838 unsigned long action;
841 /* XXX must change for sharing */
843 action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
846 * If some engines want us quiescent, we block here.
848 if (action & UTRACE_ACTION_QUIESCE) {
849 spin_lock_irq(&tsk->sighand->siglock);
851 * If wake_quiescent is trying to wake us up now, it will
852 * have cleared the QUIESCE flag before trying to take the
853 * siglock. Now we have the siglock, so either it has
854 * already cleared the flag, or it will wake us up after we
855 * release the siglock it's waiting for.
856 * Never stop when there is a SIGKILL bringing us down.
858 if ((tsk->utrace_flags & UTRACE_ACTION_QUIESCE)
859 /*&& !(tsk->signal->flags & SIGNAL_GROUP_SIGKILL)*/) {
860 set_current_state(TASK_TRACED);
862 * If there is a group stop in progress,
863 * we must participate in the bookkeeping.
865 if (tsk->signal->group_stop_count > 0)
866 --tsk->signal->group_stop_count;
867 spin_unlock_irq(&tsk->sighand->siglock);
871 spin_unlock_irq(&tsk->sighand->siglock);
874 * We've woken up. One engine could be waking us up while
875 * another has asked us to quiesce. So check afresh. We
876 * could have been detached while quiescent. Now we are no
877 * longer quiescent, so don't need to do any RCU locking.
878 * But we do need to check our utrace pointer anew.
880 utrace = tsk->utrace;
881 if (tsk->utrace_flags
882 & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
885 else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
887 * Our flags are out of date.
888 * Update the set of events of interest from the union
889 * of the interests of the remaining tracing engines.
891 struct utrace_attached_engine *engine;
892 unsigned long flags = 0;
893 utrace = rcu_dereference(tsk->utrace);
895 list_for_each_entry(engine, &utrace->engines, entry)
896 flags |= engine->flags | UTRACE_EVENT(REAP);
897 tsk->utrace_flags = flags;
898 utrace_unlock(utrace);
902 * We're resuming. Update the machine layer tracing state and then go.
904 #ifdef ARCH_HAS_SINGLE_STEP
905 if (action & UTRACE_ACTION_SINGLESTEP)
906 tracehook_enable_single_step(tsk);
908 tracehook_disable_single_step(tsk);
910 #ifdef ARCH_HAS_BLOCK_STEP
911 if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
912 == UTRACE_ACTION_BLOCKSTEP)
913 tracehook_enable_block_step(tsk);
915 tracehook_disable_block_step(tsk);
917 if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
918 tracehook_enable_syscall_trace(tsk);
920 tracehook_disable_syscall_trace(tsk);
925 * Called iff UTRACE_EVENT(EXIT) flag is set.
928 utrace_report_exit(long *exit_code)
930 struct task_struct *tsk = current;
931 struct utrace *utrace = tsk->utrace;
932 struct list_head *pos, *next;
933 struct utrace_attached_engine *engine;
934 unsigned long action;
935 long orig_code = *exit_code;
937 /* XXX must change for sharing */
938 action = UTRACE_ACTION_RESUME;
939 list_for_each_safe_rcu(pos, next, &utrace->engines) {
940 engine = list_entry(pos, struct utrace_attached_engine, entry);
941 if (engine->flags & UTRACE_EVENT(EXIT))
942 REPORT(report_exit, orig_code, exit_code);
944 action = check_detach(tsk, action);
945 check_quiescent(tsk, action);
949 * Called iff UTRACE_EVENT(DEATH) flag is set.
951 * It is always possible that we are racing with utrace_release_task here,
952 * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
953 * where the old leader will get released regardless of NOREAP. For this
954 * reason, utrace_release_task checks for the event bits that get us here,
955 * and delays its cleanup for us to do.
958 utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
960 struct list_head *pos, *next;
961 struct utrace_attached_engine *engine;
964 BUG_ON(!tsk->exit_state);
966 oaction = tsk->utrace_flags;
968 /* XXX must change for sharing */
969 action = UTRACE_ACTION_RESUME;
970 list_for_each_safe_rcu(pos, next, &utrace->engines) {
971 engine = list_entry(pos, struct utrace_attached_engine, entry);
972 if (engine->flags & UTRACE_EVENT(DEATH))
973 REPORT(report_death);
974 if (engine->flags & UTRACE_EVENT(QUIESCE))
975 REPORT(report_quiesce);
978 * Unconditionally lock and recompute the flags.
979 * This may notice that there are no engines left and
980 * free the utrace struct.
983 if (utrace->u.exit.reap) {
985 * utrace_release_task was already called in parallel.
986 * We must complete its work now.
989 utrace_reap(tsk, utrace);
992 action = remove_detached(tsk, utrace, &utrace, action);
994 if (utrace != NULL) {
996 if (utrace->u.exit.reap)
1000 * Clear event bits we can't see any more. This
1001 * tells utrace_release_task we have already
1002 * finished, if it comes along later.
1004 tsk->utrace_flags &= (UTRACE_EVENT(REAP)
1005 | UTRACE_ACTION_NOREAP);
1007 utrace_unlock(utrace);
1010 check_noreap(tsk, utrace, oaction, action);
1015 * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
1018 utrace_report_vfork_done(pid_t child_pid)
1020 struct task_struct *tsk = current;
1021 struct utrace *utrace = tsk->utrace;
1022 struct list_head *pos, *next;
1023 struct utrace_attached_engine *engine;
1024 unsigned long action;
1026 /* XXX must change for sharing */
1027 action = UTRACE_ACTION_RESUME;
1028 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1029 engine = list_entry(pos, struct utrace_attached_engine, entry);
1030 if (engine->flags & UTRACE_EVENT(VFORK_DONE))
1031 REPORT(report_vfork_done, child_pid);
1032 if (action & UTRACE_ACTION_HIDE)
1035 action = check_detach(tsk, action);
1036 check_quiescent(tsk, action);
1040 * Called iff UTRACE_EVENT(EXEC) flag is set.
1043 utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
1045 struct task_struct *tsk = current;
1046 struct utrace *utrace = tsk->utrace;
1047 struct list_head *pos, *next;
1048 struct utrace_attached_engine *engine;
1049 unsigned long action;
1051 /* XXX must change for sharing */
1052 action = UTRACE_ACTION_RESUME;
1053 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1054 engine = list_entry(pos, struct utrace_attached_engine, entry);
1055 if (engine->flags & UTRACE_EVENT(EXEC))
1056 REPORT(report_exec, bprm, regs);
1057 if (action & UTRACE_ACTION_HIDE)
1060 action = check_detach(tsk, action);
1061 check_quiescent(tsk, action);
1065 * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
1068 utrace_report_syscall(struct pt_regs *regs, int is_exit)
1070 struct task_struct *tsk = current;
1071 struct utrace *utrace = tsk->utrace;
1072 struct list_head *pos, *next;
1073 struct utrace_attached_engine *engine;
1074 unsigned long action, ev;
1077 XXX pass syscall # to engine hook directly, let it return inhibit-action
1079 long syscall = tracehook_syscall_number(regs, is_exit);
1082 ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
1084 /* XXX must change for sharing */
1085 action = UTRACE_ACTION_RESUME;
1086 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1087 engine = list_entry(pos, struct utrace_attached_engine, entry);
1088 if (engine->flags & ev) {
1090 REPORT(report_syscall_exit, regs);
1092 REPORT(report_syscall_entry, regs);
1094 if (action & UTRACE_ACTION_HIDE)
1097 action = check_detach(tsk, action);
1098 check_quiescent(tsk, action);
1101 // XXX copied from signal.c
1103 #define M_SIGEMT M(SIGEMT)
1108 #if SIGRTMIN > BITS_PER_LONG
1109 #define M(sig) (1ULL << ((sig)-1))
1111 #define M(sig) (1UL << ((sig)-1))
1113 #define T(sig, mask) (M(sig) & (mask))
1115 #define SIG_KERNEL_ONLY_MASK (\
1116 M(SIGKILL) | M(SIGSTOP) )
1118 #define SIG_KERNEL_STOP_MASK (\
1119 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
1121 #define SIG_KERNEL_COREDUMP_MASK (\
1122 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
1123 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
1124 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
1126 #define SIG_KERNEL_IGNORE_MASK (\
1127 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
1129 #define sig_kernel_only(sig) \
1130 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
1131 #define sig_kernel_coredump(sig) \
1132 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
1133 #define sig_kernel_ignore(sig) \
1134 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
1135 #define sig_kernel_stop(sig) \
1136 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
1140 * Call each interested tracing engine's report_signal callback.
1143 report_signal(struct task_struct *tsk, struct pt_regs *regs,
1144 struct utrace *utrace, u32 action,
1145 unsigned long flags1, unsigned long flags2, siginfo_t *info,
1146 const struct k_sigaction *ka, struct k_sigaction *return_ka)
1148 struct list_head *pos, *next;
1149 struct utrace_attached_engine *engine;
1151 /* XXX must change for sharing */
1152 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1153 engine = list_entry(pos, struct utrace_attached_engine, entry);
1154 if ((engine->flags & flags1) && (engine->flags & flags2)) {
1155 u32 disp = action & UTRACE_ACTION_OP_MASK;
1156 action &= ~UTRACE_ACTION_OP_MASK;
1157 REPORT(report_signal, regs, disp, info, ka, return_ka);
1158 if ((action & UTRACE_ACTION_OP_MASK) == 0)
1160 if (action & UTRACE_ACTION_HIDE)
1169 utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
1172 action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
1173 UTRACE_EVENT_SIGNAL_ALL,
1174 UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
1176 action = check_detach(tsk, action);
1177 check_quiescent(tsk, action);
1182 * This is the hook from the signals code, called with the siglock held.
1183 * Here is the ideal place to quiesce. We also dequeue and intercept signals.
1186 utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
1187 siginfo_t *info, struct k_sigaction *return_ka)
1189 struct utrace *utrace = tsk->utrace;
1190 struct utrace_signal signal = { info, return_ka, 0 };
1191 struct k_sigaction *ka;
1192 unsigned long action, event;
1195 if (tsk->signal->flags & SIGNAL_GROUP_SIGKILL)
1200 * If we should quiesce, now is the time.
1201 * First stash a pointer to the state on our stack,
1202 * so that utrace_inject_signal can tell us what to do.
1204 if (utrace->u.live.signal == NULL)
1205 utrace->u.live.signal = &signal;
1207 if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1208 spin_unlock_irq(&tsk->sighand->siglock);
1209 utrace_quiescent(tsk);
1210 if (signal.signr == 0)
1212 * This return value says to reacquire the siglock
1213 * and check again. This will check for a pending
1214 * group stop and process it before coming back here.
1217 spin_lock_irq(&tsk->sighand->siglock);
1221 * If a signal was injected previously, it could not use our
1222 * stack space directly. It had to allocate a data structure,
1223 * which we can now copy out of and free.
1225 if (utrace->u.live.signal != &signal) {
1226 signal.signr = utrace->u.live.signal->signr;
1227 copy_siginfo(info, utrace->u.live.signal->info);
1228 if (utrace->u.live.signal->return_ka)
1229 *return_ka = *utrace->u.live.signal->return_ka;
1231 signal.return_ka = NULL;
1232 kfree(utrace->u.live.signal);
1234 utrace->u.live.signal = NULL;
1237 * If a signal was injected, everything is in place now. Go do it.
1239 if (signal.signr != 0) {
1240 if (signal.return_ka == NULL) {
1241 ka = &tsk->sighand->action[signal.signr - 1];
1242 if (ka->sa.sa_flags & SA_ONESHOT)
1243 ka->sa.sa_handler = SIG_DFL;
1247 BUG_ON(signal.return_ka != return_ka);
1248 return signal.signr;
1252 * If noone is interested in intercepting signals, let the caller
1253 * just dequeue them normally.
1255 if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
1259 * Steal the next signal so we can let tracing engines examine it.
1260 * From the signal number and sigaction, determine what normal
1261 * delivery would do. If no engine perturbs it, we'll do that
1262 * by returning the signal number after setting *return_ka.
1264 signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
1265 if (signal.signr == 0)
1268 BUG_ON(signal.signr != info->si_signo);
1270 ka = &tsk->sighand->action[signal.signr - 1];
1273 if (signal.signr == SIGKILL)
1274 return signal.signr;
1276 if (ka->sa.sa_handler == SIG_IGN) {
1277 event = UTRACE_EVENT(SIGNAL_IGN);
1278 action = UTRACE_SIGNAL_IGN;
1280 else if (ka->sa.sa_handler != SIG_DFL) {
1281 event = UTRACE_EVENT(SIGNAL);
1282 action = UTRACE_ACTION_RESUME;
1284 else if (sig_kernel_coredump(signal.signr)) {
1285 event = UTRACE_EVENT(SIGNAL_CORE);
1286 action = UTRACE_SIGNAL_CORE;
1288 else if (sig_kernel_ignore(signal.signr)) {
1289 event = UTRACE_EVENT(SIGNAL_IGN);
1290 action = UTRACE_SIGNAL_IGN;
1292 else if (sig_kernel_stop(signal.signr)) {
1293 event = UTRACE_EVENT(SIGNAL_STOP);
1294 action = (signal.signr == SIGSTOP
1295 ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
1298 event = UTRACE_EVENT(SIGNAL_TERM);
1299 action = UTRACE_SIGNAL_TERM;
1302 if (tsk->utrace_flags & event) {
1304 * We have some interested engines, so tell them about the
1305 * signal and let them change its disposition.
1308 spin_unlock_irq(&tsk->sighand->siglock);
1310 action = report_signal(tsk, regs, utrace, action, event, event,
1311 info, ka, return_ka);
1312 action &= UTRACE_ACTION_OP_MASK;
1314 if (action & UTRACE_SIGNAL_HOLD) {
1315 struct sigqueue *q = sigqueue_alloc();
1316 if (likely(q != NULL)) {
1318 copy_siginfo(&q->info, info);
1320 action &= ~UTRACE_SIGNAL_HOLD;
1321 spin_lock_irq(&tsk->sighand->siglock);
1322 sigaddset(&tsk->pending.signal, info->si_signo);
1323 if (likely(q != NULL))
1324 list_add(&q->list, &tsk->pending.list);
1327 spin_lock_irq(&tsk->sighand->siglock);
1329 recalc_sigpending_tsk(tsk);
1332 if (tsk->utrace != utrace)
1333 rcu_utrace_free(utrace);
1336 * We express the chosen action to the signals code in terms
1337 * of a representative signal whose default action does it.
1340 case UTRACE_SIGNAL_IGN:
1342 * We've eaten the signal. That's all we do.
1343 * Tell the caller to restart.
1345 spin_unlock_irq(&tsk->sighand->siglock);
1348 case UTRACE_ACTION_RESUME:
1349 case UTRACE_SIGNAL_DELIVER:
1351 * The handler will run. We do the SA_ONESHOT work here
1352 * since the normal path will only touch *return_ka now.
1354 if (return_ka->sa.sa_flags & SA_ONESHOT)
1355 ka->sa.sa_handler = SIG_DFL;
1358 case UTRACE_SIGNAL_TSTP:
1359 signal.signr = SIGTSTP;
1360 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1361 return_ka->sa.sa_handler = SIG_DFL;
1364 case UTRACE_SIGNAL_STOP:
1365 signal.signr = SIGSTOP;
1366 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1367 return_ka->sa.sa_handler = SIG_DFL;
1370 case UTRACE_SIGNAL_TERM:
1371 signal.signr = SIGTERM;
1372 return_ka->sa.sa_handler = SIG_DFL;
1375 case UTRACE_SIGNAL_CORE:
1376 signal.signr = SIGQUIT;
1377 return_ka->sa.sa_handler = SIG_DFL;
1384 return signal.signr;
1389 * Cause a specified signal delivery in the target thread,
1390 * which must be quiescent. The action has UTRACE_SIGNAL_* bits
1391 * as returned from a report_signal callback. If ka is non-null,
1392 * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
1393 * otherwise, the installed sigaction at the time of delivery is used.
1396 utrace_inject_signal(struct task_struct *target,
1397 struct utrace_attached_engine *engine,
1398 u32 action, siginfo_t *info,
1399 const struct k_sigaction *ka)
1401 struct utrace *utrace;
1402 struct utrace_signal *signal;
1405 if (info->si_signo == 0 || !valid_signal(info->si_signo))
1409 utrace = rcu_dereference(target->utrace);
1410 if (utrace == NULL) {
1414 utrace_lock(utrace);
1418 signal = utrace->u.live.signal;
1419 if (signal == NULL) {
1420 ret = -ENOSYS; /* XXX */
1422 else if (signal->signr != 0)
1425 if (info != signal->info)
1426 copy_siginfo(signal->info, info);
1433 case UTRACE_SIGNAL_IGN:
1436 case UTRACE_ACTION_RESUME:
1437 case UTRACE_SIGNAL_DELIVER:
1439 * The handler will run. We do the SA_ONESHOT work
1440 * here since the normal path will not touch the
1441 * real sigaction when using an injected signal.
1444 signal->return_ka = NULL;
1445 else if (ka != signal->return_ka)
1446 *signal->return_ka = *ka;
1447 if (ka && ka->sa.sa_flags & SA_ONESHOT) {
1448 struct k_sigaction *a;
1449 a = &target->sighand->action[info->si_signo-1];
1450 spin_lock_irq(&target->sighand->siglock);
1451 a->sa.sa_handler = SIG_DFL;
1452 spin_unlock_irq(&target->sighand->siglock);
1454 signal->signr = info->si_signo;
1457 case UTRACE_SIGNAL_TSTP:
1458 signal->signr = SIGTSTP;
1459 spin_lock_irq(&target->sighand->siglock);
1460 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1461 spin_unlock_irq(&target->sighand->siglock);
1462 signal->return_ka->sa.sa_handler = SIG_DFL;
1465 case UTRACE_SIGNAL_STOP:
1466 signal->signr = SIGSTOP;
1467 spin_lock_irq(&target->sighand->siglock);
1468 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1469 spin_unlock_irq(&target->sighand->siglock);
1470 signal->return_ka->sa.sa_handler = SIG_DFL;
1473 case UTRACE_SIGNAL_TERM:
1474 signal->signr = SIGTERM;
1475 signal->return_ka->sa.sa_handler = SIG_DFL;
1478 case UTRACE_SIGNAL_CORE:
1479 signal->signr = SIGQUIT;
1480 signal->return_ka->sa.sa_handler = SIG_DFL;
1485 utrace_unlock(utrace);
1489 EXPORT_SYMBOL_GPL(utrace_inject_signal);
1492 const struct utrace_regset *
1493 utrace_regset(struct task_struct *target,
1494 struct utrace_attached_engine *engine,
1495 const struct utrace_regset_view *view, int which)
1497 if (unlikely((unsigned) which >= view->n))
1500 if (target != current)
1501 wait_task_inactive(target);
1503 return &view->regsets[which];
1505 EXPORT_SYMBOL_GPL(utrace_regset);
1509 * Return the task_struct for the task using ptrace on this one, or NULL.
1510 * Must be called with rcu_read_lock held to keep the returned struct alive.
1512 * At exec time, this may be called with task_lock(p) still held from when
1513 * tracehook_unsafe_exec was just called. In that case it must give
1514 * results consistent with those unsafe_exec results, i.e. non-NULL if
1515 * any LSM_UNSAFE_PTRACE_* bits were set.
1517 * The value is also used to display after "TracerPid:" in /proc/PID/status,
1518 * where it is called with only rcu_read_lock held.
1520 struct task_struct *
1521 utrace_tracer_task(struct task_struct *target)
1523 struct utrace *utrace;
1524 struct task_struct *tracer = NULL;
1526 utrace = rcu_dereference(target->utrace);
1527 if (utrace != NULL) {
1528 struct list_head *pos, *next;
1529 struct utrace_attached_engine *engine;
1530 const struct utrace_engine_ops *ops;
1531 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1532 engine = list_entry(pos, struct utrace_attached_engine,
1534 ops = rcu_dereference(engine->ops);
1535 if (ops->tracer_task) {
1536 tracer = (*ops->tracer_task)(engine, target);
1547 utrace_allow_access_process_vm(struct task_struct *target)
1549 struct utrace *utrace;
1553 utrace = rcu_dereference(target->utrace);
1554 if (utrace != NULL) {
1555 struct list_head *pos, *next;
1556 struct utrace_attached_engine *engine;
1557 const struct utrace_engine_ops *ops;
1558 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1559 engine = list_entry(pos, struct utrace_attached_engine,
1561 ops = rcu_dereference(engine->ops);
1562 if (ops->allow_access_process_vm) {
1563 ret = (*ops->allow_access_process_vm)(engine,
1577 * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
1578 * Called with task_lock held.
1581 utrace_unsafe_exec(struct task_struct *tsk)
1583 struct utrace *utrace = tsk->utrace;
1584 struct list_head *pos, *next;
1585 struct utrace_attached_engine *engine;
1586 const struct utrace_engine_ops *ops;
1589 /* XXX must change for sharing */
1590 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1591 engine = list_entry(pos, struct utrace_attached_engine, entry);
1592 ops = rcu_dereference(engine->ops);
1593 if (ops->unsafe_exec)
1594 unsafe |= (*ops->unsafe_exec)(engine, tsk);