2 * utrace infrastructure interface for debugging user processes
4 * Copyright (C) 2006, 2007 Red Hat, Inc. All rights reserved.
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
10 * Red Hat Author: Roland McGrath.
13 #include <linux/utrace.h>
14 #include <linux/tracehook.h>
15 #include <linux/err.h>
16 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <asm/tracehook.h>
23 static struct kmem_cache *utrace_cachep;
24 static struct kmem_cache *utrace_engine_cachep;
30 kmem_cache_create("utrace_cache",
31 sizeof(struct utrace), 0,
32 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
33 utrace_engine_cachep =
34 kmem_cache_create("utrace_engine_cache",
35 sizeof(struct utrace_attached_engine), 0,
36 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
39 subsys_initcall(utrace_init);
43 * Make sure target->utrace is allocated, and return with it locked on
44 * success. This function mediates startup races. The creating parent
45 * task has priority, and other callers will delay here to let its call
46 * succeed and take the new utrace lock first.
48 static struct utrace *
49 utrace_first_engine(struct task_struct *target,
50 struct utrace_attached_engine *engine)
52 struct utrace *utrace, *ret;
55 * If this is a newborn thread and we are not the creator,
56 * we have to wait for it. The creator gets the first chance
57 * to attach. The PF_STARTING flag is cleared after its
58 * report_clone hook has had a chance to run.
60 if ((target->flags & PF_STARTING)
61 && (current->utrace == NULL
62 || current->utrace->u.live.cloning != target)) {
64 return (signal_pending(current)
65 ? ERR_PTR(-ERESTARTNOINTR) : NULL);
68 utrace = kmem_cache_alloc(utrace_cachep, GFP_KERNEL);
69 if (unlikely(utrace == NULL))
70 return ERR_PTR(-ENOMEM);
72 utrace->u.live.cloning = NULL;
73 utrace->u.live.signal = NULL;
74 INIT_LIST_HEAD(&utrace->engines);
75 list_add(&engine->entry, &utrace->engines);
76 spin_lock_init(&utrace->lock);
79 spin_lock(&utrace->lock);
81 if (likely(target->utrace == NULL)) {
82 rcu_assign_pointer(target->utrace, utrace);
84 * The task_lock protects us against another thread doing
85 * the same thing. We might still be racing against
86 * tracehook_release_task. It's called with ->exit_state
87 * set to EXIT_DEAD and then checks ->utrace with an
88 * smp_mb() in between. If EXIT_DEAD is set, then
89 * release_task might have checked ->utrace already and saw
90 * it NULL; we can't attach. If we see EXIT_DEAD not yet
91 * set after our barrier, then we know release_task will
92 * see our target->utrace pointer.
95 if (target->exit_state == EXIT_DEAD) {
97 * The target has already been through release_task.
99 target->utrace = NULL;
106 * Another engine attached first, so there is a struct already.
107 * A null return says to restart looking for the existing one.
112 spin_unlock(&utrace->lock);
113 kmem_cache_free(utrace_cachep, utrace);
120 utrace_free(struct rcu_head *rhead)
122 struct utrace *utrace = container_of(rhead, struct utrace, u.dead);
123 kmem_cache_free(utrace_cachep, utrace);
127 * Called with utrace locked. Clean it up and free it via RCU.
130 rcu_utrace_free(struct utrace *utrace)
132 spin_unlock(&utrace->lock);
133 INIT_RCU_HEAD(&utrace->u.dead);
134 call_rcu(&utrace->u.dead, utrace_free);
138 utrace_engine_free(struct rcu_head *rhead)
140 struct utrace_attached_engine *engine =
141 container_of(rhead, struct utrace_attached_engine, rhead);
142 kmem_cache_free(utrace_engine_cachep, engine);
146 * Remove the utrace pointer from the task, unless there is a pending
147 * forced signal (or it's quiescent in utrace_get_signal).
150 utrace_clear_tsk(struct task_struct *tsk, struct utrace *utrace)
152 if (utrace->u.live.signal == NULL) {
154 if (likely(tsk->utrace != NULL)) {
155 rcu_assign_pointer(tsk->utrace, NULL);
156 tsk->utrace_flags &= UTRACE_ACTION_NOREAP;
163 * Called with utrace locked and the target quiescent (maybe current).
164 * If this was the last engine and there is no parting forced signal
165 * pending, utrace is left locked and not freed, but is removed from the task.
168 remove_engine(struct utrace_attached_engine *engine,
169 struct task_struct *tsk, struct utrace *utrace)
171 list_del_rcu(&engine->entry);
172 if (list_empty(&utrace->engines))
173 utrace_clear_tsk(tsk, utrace);
174 call_rcu(&engine->rhead, utrace_engine_free);
179 * Called with utrace locked, after remove_engine may have run.
180 * Passed the flags from all remaining engines, i.e. zero if none
181 * left. Install the flags in tsk->utrace_flags and return with
182 * utrace unlocked. If no engines are left and there is no parting
183 * forced signal pending, utrace is freed.
186 check_dead_utrace(struct task_struct *tsk, struct utrace *utrace,
191 if (!tsk->exit_state && utrace->u.live.signal != NULL)
193 * There is a pending forced signal. It may have been
194 * left by an engine now detached. The empty utrace
195 * remains attached until it can be processed.
197 flags |= UTRACE_ACTION_QUIESCE;
200 * If tracing was preventing a SIGCHLD or self-reaping
201 * and is no longer, we'll do that report or reaping now.
203 if (((tsk->utrace_flags &~ flags) & UTRACE_ACTION_NOREAP)
204 && tsk->exit_state) {
206 * While holding the utrace lock, mark that it's been done.
207 * For self-reaping, we need to change tsk->exit_state
208 * before clearing tsk->utrace_flags, so that the real
209 * parent can't see it in EXIT_ZOMBIE momentarily and reap
210 * it. If tsk was the group_leader, an exec by another
211 * thread can release_task it despite our NOREAP. Holding
212 * tasklist_lock for reading excludes de_thread until we
215 read_lock(&tasklist_lock);
216 if (tsk->exit_signal == -1) { /* Self-reaping thread. */
217 exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
218 read_unlock(&tasklist_lock);
220 BUG_ON(exit_state != EXIT_ZOMBIE);
221 exit_state = EXIT_DEAD; /* Reap it below. */
224 * Now that we've changed its state to DEAD,
225 * it's safe to install the new tsk->utrace_flags
226 * value without the UTRACE_ACTION_NOREAP bit set.
229 else if (thread_group_empty(tsk)) /* Normal solo zombie. */
231 * We need to prevent the real parent from reaping
232 * until after we've called do_notify_parent, below.
233 * It can get into wait_task_zombie any time after
234 * the UTRACE_ACTION_NOREAP bit is cleared. It's
235 * safe for that to do everything it does until its
236 * release_task call starts tearing things down.
237 * Holding tasklist_lock for reading prevents
238 * release_task from proceeding until we've done
239 * everything we need to do.
241 exit_state = EXIT_ZOMBIE;
244 * Delayed group leader, nothing to do yet.
245 * This is also the situation with the old
246 * group leader in an exec by another thread,
247 * which will call release_task itself.
249 read_unlock(&tasklist_lock);
253 tsk->utrace_flags = flags;
255 spin_unlock(&utrace->lock);
257 rcu_utrace_free(utrace);
260 * Now we're finished updating the utrace state.
261 * Do a pending self-reaping or parent notification.
263 if (exit_state == EXIT_ZOMBIE) {
264 do_notify_parent(tsk, tsk->exit_signal);
267 * If SIGCHLD was ignored, that set tsk->exit_signal = -1
268 * to tell us to reap it immediately.
270 if (tsk->exit_signal == -1) {
271 exit_state = xchg(&tsk->exit_state, EXIT_DEAD);
272 BUG_ON(exit_state != EXIT_ZOMBIE);
273 exit_state = EXIT_DEAD; /* Reap it below. */
275 read_unlock(&tasklist_lock); /* See comment above. */
277 if (exit_state == EXIT_DEAD)
279 * Note this can wind up in utrace_reap and do more callbacks.
280 * Our callers must be in places where that is OK.
288 * Get the target thread to quiesce. Return nonzero if it's already quiescent.
289 * Return zero if it will report a QUIESCE event soon.
290 * If interrupt is nonzero, wake it like a signal would so it quiesces ASAP.
291 * If interrupt is zero, just make sure it quiesces before going to user mode.
294 quiesce(struct task_struct *target, int interrupt)
298 target->utrace_flags |= UTRACE_ACTION_QUIESCE;
299 read_barrier_depends();
301 quiescent = (target->exit_state
302 || target->state & (TASK_TRACED | TASK_STOPPED));
305 spin_lock_irq(&target->sighand->siglock);
306 quiescent = (unlikely(target->exit_state)
307 || unlikely(target->state
308 & (TASK_TRACED | TASK_STOPPED)));
311 signal_wake_up(target, 0);
313 set_tsk_thread_flag(target, TIF_SIGPENDING);
314 kick_process(target);
317 spin_unlock_irq(&target->sighand->siglock);
324 static struct utrace_attached_engine *
325 matching_engine(struct utrace *utrace, int flags,
326 const struct utrace_engine_ops *ops, unsigned long data)
328 struct utrace_attached_engine *engine;
329 list_for_each_entry_rcu(engine, &utrace->engines, entry) {
330 if ((flags & UTRACE_ATTACH_MATCH_OPS)
331 && engine->ops != ops)
333 if ((flags & UTRACE_ATTACH_MATCH_DATA)
334 && engine->data != data)
338 return ERR_PTR(-ENOENT);
343 option to match existing on ops, ops+data, return it; nocreate:lookup only
345 struct utrace_attached_engine *
346 utrace_attach(struct task_struct *target, int flags,
347 const struct utrace_engine_ops *ops, unsigned long data)
349 struct utrace *utrace;
350 struct utrace_attached_engine *engine;
354 utrace = rcu_dereference(target->utrace);
356 if (unlikely(target->exit_state == EXIT_DEAD)) {
358 * The target has already been reaped.
359 * Check this first; a race with reaping may lead to restart.
362 return ERR_PTR(-ESRCH);
364 if (utrace == NULL) {
367 if (!(flags & UTRACE_ATTACH_CREATE))
368 return ERR_PTR(-ENOENT);
370 engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
371 if (unlikely(engine == NULL))
372 return ERR_PTR(-ENOMEM);
376 utrace = utrace_first_engine(target, engine);
377 if (IS_ERR(utrace) || unlikely(utrace == NULL)) {
378 kmem_cache_free(utrace_engine_cachep, engine);
379 if (unlikely(utrace == NULL)) /* Race condition. */
381 return ERR_PTR(PTR_ERR(utrace));
385 if (!(flags & UTRACE_ATTACH_CREATE)) {
386 engine = matching_engine(utrace, flags, ops, data);
392 engine = kmem_cache_alloc(utrace_engine_cachep, GFP_KERNEL);
393 if (unlikely(engine == NULL))
394 return ERR_PTR(-ENOMEM);
398 utrace = rcu_dereference(target->utrace);
399 if (unlikely(utrace == NULL)) { /* Race with detach. */
403 spin_lock(&utrace->lock);
405 if (flags & UTRACE_ATTACH_EXCLUSIVE) {
406 struct utrace_attached_engine *old;
407 old = matching_engine(utrace, flags, ops, data);
409 spin_unlock(&utrace->lock);
411 kmem_cache_free(utrace_engine_cachep, engine);
412 return ERR_PTR(-EEXIST);
416 if (unlikely(rcu_dereference(target->utrace) != utrace)) {
418 * We lost a race with other CPUs doing a sequence
419 * of detach and attach before we got in.
421 spin_unlock(&utrace->lock);
423 kmem_cache_free(utrace_engine_cachep, engine);
428 list_add_tail_rcu(&engine->entry, &utrace->engines);
434 spin_unlock(&utrace->lock);
438 EXPORT_SYMBOL_GPL(utrace_attach);
441 * When an engine is detached, the target thread may still see it and make
442 * callbacks until it quiesces. We reset its event flags to just QUIESCE
443 * and install a special ops vector whose callback is dead_engine_delete.
444 * When the target thread quiesces, it can safely free the engine itself.
447 dead_engine_delete(struct utrace_attached_engine *engine,
448 struct task_struct *tsk)
450 return UTRACE_ACTION_DETACH;
453 static const struct utrace_engine_ops dead_engine_ops =
455 .report_quiesce = &dead_engine_delete
460 * Called with utrace locked. Recompute the union of engines' flags.
462 static inline unsigned long
463 rescan_flags(struct utrace *utrace)
465 struct utrace_attached_engine *engine;
466 unsigned long flags = 0;
467 list_for_each_entry(engine, &utrace->engines, entry)
468 flags |= engine->flags | UTRACE_EVENT(REAP);
473 * Only these flags matter any more for a dead task (exit_state set).
474 * We use this mask on flags installed in ->utrace_flags after
475 * exit_notify (and possibly utrace_report_death) has run.
476 * This ensures that utrace_release_task knows positively that
477 * utrace_report_death will not run later.
479 #define DEAD_FLAGS_MASK (UTRACE_EVENT(REAP) | UTRACE_ACTION_NOREAP)
482 * Flags bits in utrace->u.exit.flags word. These are private
483 * communication among utrace_report_death, utrace_release_task,
484 * utrace_detach, and utrace_set_flags.
486 #define EXIT_FLAG_DEATH 1 /* utrace_report_death running */
487 #define EXIT_FLAG_DELAYED_GROUP_LEADER 2 /* utrace_delayed_group_leader ran */
488 #define EXIT_FLAG_REAP 4 /* release_task ran */
492 * We may have been the one keeping the target thread quiescent.
493 * Check if it should wake up now.
494 * Called with utrace locked, and unlocks it on return.
495 * If we were keeping it stopped, resume it.
496 * If we were keeping its zombie from reporting/self-reap, do it now.
499 wake_quiescent(unsigned long old_flags,
500 struct utrace *utrace, struct task_struct *target)
505 * Update the set of events of interest from the union
506 * of the interests of the remaining tracing engines.
508 flags = rescan_flags(utrace);
509 if (target->exit_state) {
510 BUG_ON(utrace->u.exit.flags & EXIT_FLAG_DEATH);
511 flags &= DEAD_FLAGS_MASK;
513 check_dead_utrace(target, utrace, flags);
515 if (target->exit_state || (flags & UTRACE_ACTION_QUIESCE))
518 read_lock(&tasklist_lock);
519 if (!unlikely(target->exit_state)) {
521 * The target is not dead and should not be in tracing stop
522 * any more. Wake it unless it's in job control stop.
524 spin_lock_irq(&target->sighand->siglock);
525 if (target->signal->flags & SIGNAL_STOP_STOPPED) {
526 int stop_count = target->signal->group_stop_count;
527 target->state = TASK_STOPPED;
528 spin_unlock_irq(&target->sighand->siglock);
531 * If tracing was preventing a CLD_STOPPED report
532 * and is no longer, do that report right now.
535 && ((old_flags &~ flags) & UTRACE_ACTION_NOREAP))
536 do_notify_parent_cldstop(target, CLD_STOPPED);
542 recalc_sigpending_tsk(target);
543 wake_up_state(target, TASK_STOPPED | TASK_TRACED);
544 spin_unlock_irq(&target->sighand->siglock);
547 read_unlock(&tasklist_lock);
551 * The engine is supposed to be attached. The caller really needs
552 * rcu_read_lock if it wants to look at the engine struct
553 * (e.g. engine->data), to be sure it hasn't been freed by utrace_reap
554 * asynchronously--unless he has synchronized with his report_reap
555 * callback, which would have happened before then. A simultaneous
556 * utrace_detach call or UTRACE_ACTION_DETACH return from a callback can
557 * also free the engine if rcu_read_lock is not held, but that is in the
558 * tracing engine's power to avoid.
560 * Get the utrace lock for the target task.
561 * Returns the struct if locked, or ERR_PTR(-errno).
563 * This has to be robust against races with:
564 * utrace_detach calls
565 * UTRACE_ACTION_DETACH after reports
566 * utrace_report_death
567 * utrace_release_task
569 static struct utrace *
570 get_utrace_lock_attached(struct task_struct *target,
571 struct utrace_attached_engine *engine)
573 struct utrace *utrace;
576 utrace = rcu_dereference(target->utrace);
578 if (unlikely(target->exit_state == EXIT_DEAD)) {
580 * Called after utrace_release_task might have started.
581 * A call to this engine's report_reap callback might
582 * already be in progress or engine might even have been
585 utrace = ERR_PTR(-ESRCH);
588 spin_lock(&utrace->lock);
589 if (unlikely(rcu_dereference(target->utrace) != utrace)
590 || unlikely(rcu_dereference(engine->ops)
591 == &dead_engine_ops)) {
593 * By the time we got the utrace lock,
594 * it had been reaped or detached already.
596 spin_unlock(&utrace->lock);
597 utrace = ERR_PTR(-ESRCH);
606 utrace_detach(struct task_struct *target,
607 struct utrace_attached_engine *engine)
609 struct utrace *utrace;
612 utrace = get_utrace_lock_attached(target, engine);
613 if (unlikely(IS_ERR(utrace)))
614 return PTR_ERR(utrace);
616 if (target->exit_state
617 && unlikely(utrace->u.exit.flags & (EXIT_FLAG_DEATH
618 | EXIT_FLAG_REAP))) {
620 * We have already started the death report, or
621 * even entered release_task. We can't prevent
622 * the report_death and report_reap callbacks,
623 * so tell the caller they will happen.
625 int ret = ((utrace->u.exit.flags & EXIT_FLAG_REAP)
626 ? -ESRCH : -EALREADY);
627 spin_unlock(&utrace->lock);
631 flags = engine->flags;
632 engine->flags = UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_QUIESCE;
633 rcu_assign_pointer(engine->ops, &dead_engine_ops);
635 if (quiesce(target, 1)) {
636 remove_engine(engine, target, utrace);
637 wake_quiescent(flags, utrace, target);
640 spin_unlock(&utrace->lock);
645 EXPORT_SYMBOL_GPL(utrace_detach);
649 * Called with utrace->lock held.
650 * Notify and clean up all engines, then free utrace.
653 utrace_reap(struct task_struct *target, struct utrace *utrace)
655 struct utrace_attached_engine *engine, *next;
656 const struct utrace_engine_ops *ops;
659 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
660 list_del_rcu(&engine->entry);
663 * Now nothing else refers to this engine.
665 if (engine->flags & UTRACE_EVENT(REAP)) {
666 ops = rcu_dereference(engine->ops);
667 if (ops != &dead_engine_ops) {
668 spin_unlock(&utrace->lock);
669 (*ops->report_reap)(engine, target);
670 call_rcu(&engine->rhead, utrace_engine_free);
671 spin_lock(&utrace->lock);
675 call_rcu(&engine->rhead, utrace_engine_free);
678 rcu_utrace_free(utrace);
682 * Called by release_task. After this, target->utrace must be cleared.
685 utrace_release_task(struct task_struct *target)
687 struct utrace *utrace;
690 utrace = target->utrace;
691 rcu_assign_pointer(target->utrace, NULL);
694 if (unlikely(utrace == NULL))
697 spin_lock(&utrace->lock);
698 utrace->u.exit.flags |= EXIT_FLAG_REAP;
700 if (target->utrace_flags & (UTRACE_EVENT(DEATH)
701 | UTRACE_EVENT(QUIESCE)))
703 * The target will do some final callbacks but hasn't
704 * finished them yet. We know because it clears these
705 * event bits after it's done. Instead of cleaning up here
706 * and requiring utrace_report_death to cope with it, we
707 * delay the REAP report and the teardown until after the
708 * target finishes its death reports.
710 spin_unlock(&utrace->lock);
712 utrace_reap(target, utrace); /* Unlocks and frees. */
717 utrace_set_flags(struct task_struct *target,
718 struct utrace_attached_engine *engine,
721 struct utrace *utrace;
723 unsigned long old_flags, old_utrace_flags;
726 #ifdef ARCH_HAS_SINGLE_STEP
727 if (! ARCH_HAS_SINGLE_STEP)
729 WARN_ON(flags & UTRACE_ACTION_SINGLESTEP);
730 #ifdef ARCH_HAS_BLOCK_STEP
731 if (! ARCH_HAS_BLOCK_STEP)
733 WARN_ON(flags & UTRACE_ACTION_BLOCKSTEP);
735 utrace = get_utrace_lock_attached(target, engine);
736 if (unlikely(IS_ERR(utrace)))
737 return PTR_ERR(utrace);
739 restart: /* See below. */
741 old_utrace_flags = target->utrace_flags;
742 old_flags = engine->flags;
744 if (target->exit_state
745 && (((flags &~ old_flags) & (UTRACE_ACTION_QUIESCE
746 | UTRACE_ACTION_NOREAP
747 | UTRACE_EVENT(DEATH)
748 | UTRACE_EVENT(QUIESCE)))
749 || ((utrace->u.exit.flags & EXIT_FLAG_DEATH)
750 && ((old_flags &~ flags) & (UTRACE_EVENT(DEATH) |
751 UTRACE_EVENT(QUIESCE))))
752 || ((utrace->u.exit.flags & EXIT_FLAG_REAP)
753 && ((old_flags &~ flags) & UTRACE_EVENT(REAP))))) {
754 spin_unlock(&utrace->lock);
759 * When setting these flags, it's essential that we really
760 * synchronize with exit_notify. They cannot be set after
761 * exit_notify takes the tasklist_lock. By holding the read
762 * lock here while setting the flags, we ensure that the calls
763 * to tracehook_notify_death and tracehook_report_death will
764 * see the new flags. This ensures that utrace_release_task
765 * knows positively that utrace_report_death will be called or
768 if ((flags &~ old_utrace_flags) & (UTRACE_ACTION_NOREAP
769 | UTRACE_EVENT(DEATH)
770 | UTRACE_EVENT(QUIESCE))) {
771 read_lock(&tasklist_lock);
772 if (unlikely(target->exit_state)) {
773 read_unlock(&tasklist_lock);
774 spin_unlock(&utrace->lock);
777 target->utrace_flags |= flags;
778 read_unlock(&tasklist_lock);
781 engine->flags = flags;
782 target->utrace_flags |= flags;
786 if ((old_flags ^ flags) & UTRACE_ACTION_QUIESCE) {
787 if (flags & UTRACE_ACTION_QUIESCE) {
788 report = (quiesce(target, 1)
789 && (flags & UTRACE_EVENT(QUIESCE)));
790 spin_unlock(&utrace->lock);
793 wake_quiescent(old_flags, utrace, target);
795 else if (((old_flags &~ flags) & UTRACE_ACTION_NOREAP)
796 && target->exit_state)
797 wake_quiescent(old_flags, utrace, target);
800 * If we're asking for single-stepping or syscall tracing,
801 * we need to pass through utrace_quiescent before resuming
802 * in user mode to get those effects, even if the target is
803 * not going to be quiescent right now.
805 if (!(target->utrace_flags & UTRACE_ACTION_QUIESCE)
806 && !target->exit_state
807 && ((flags &~ old_utrace_flags)
808 & (UTRACE_ACTION_SINGLESTEP | UTRACE_ACTION_BLOCKSTEP
809 | UTRACE_EVENT_SYSCALL)))
811 spin_unlock(&utrace->lock);
814 if (report) { /* Already quiescent, won't report itself. */
815 u32 action = (*engine->ops->report_quiesce)(engine, target);
816 if (action & UTRACE_ACTION_DETACH)
817 utrace_detach(target, engine);
818 else if (action & UTRACE_ACTION_NEWSTATE) {
820 * The callback has us changing the flags yet
821 * again. Since we released the lock, they
822 * could have changed asynchronously just now.
823 * We must refetch the current flags to change
824 * the UTRACE_ACTION_STATE_MASK bits. If the
825 * target thread started dying, then there is
826 * nothing we can do--but that failure is due
827 * to the report_quiesce callback after the
828 * original utrace_set_flags has already
829 * succeeded, so we don't want to return
830 * failure here (hence leave ret = 0).
832 utrace = get_utrace_lock_attached(target, engine);
833 if (!unlikely(IS_ERR(utrace))) {
834 flags = action & UTRACE_ACTION_STATE_MASK;
835 flags |= (engine->flags
836 &~ UTRACE_ACTION_STATE_MASK);
844 EXPORT_SYMBOL_GPL(utrace_set_flags);
847 * While running an engine callback, no locks are held.
848 * If a callback updates its engine's action state, then
849 * we need to take the utrace lock to install the flags update.
852 update_action(struct task_struct *tsk, struct utrace *utrace,
853 struct utrace_attached_engine *engine,
856 if (ret & UTRACE_ACTION_DETACH)
857 rcu_assign_pointer(engine->ops, &dead_engine_ops);
858 else if ((ret & UTRACE_ACTION_NEWSTATE)
859 && ((ret ^ engine->flags) & UTRACE_ACTION_STATE_MASK)) {
860 #ifdef ARCH_HAS_SINGLE_STEP
861 if (! ARCH_HAS_SINGLE_STEP)
863 WARN_ON(ret & UTRACE_ACTION_SINGLESTEP);
864 #ifdef ARCH_HAS_BLOCK_STEP
865 if (! ARCH_HAS_BLOCK_STEP)
867 WARN_ON(ret & UTRACE_ACTION_BLOCKSTEP);
868 spin_lock(&utrace->lock);
870 * If we're changing something other than just QUIESCE,
871 * make sure we pass through utrace_quiescent before
872 * resuming even if we aren't going to stay quiescent.
873 * That's where we get the correct union of all engines'
874 * flags after they've finished changing, and apply changes.
876 if (((ret ^ engine->flags) & (UTRACE_ACTION_STATE_MASK
877 & ~UTRACE_ACTION_QUIESCE)))
878 tsk->utrace_flags |= UTRACE_ACTION_QUIESCE;
879 engine->flags &= ~UTRACE_ACTION_STATE_MASK;
880 engine->flags |= ret & UTRACE_ACTION_STATE_MASK;
881 tsk->utrace_flags |= engine->flags;
882 spin_unlock(&utrace->lock);
885 ret |= engine->flags & UTRACE_ACTION_STATE_MASK;
889 #define REPORT(callback, ...) do { \
890 u32 ret = (*rcu_dereference(engine->ops)->callback) \
891 (engine, tsk, ##__VA_ARGS__); \
892 action = update_action(tsk, utrace, engine, ret); \
897 * Called with utrace->lock held, returns with it released.
900 remove_detached(struct task_struct *tsk, struct utrace *utrace,
901 u32 action, unsigned long mask)
903 struct utrace_attached_engine *engine, *next;
904 unsigned long flags = 0;
906 list_for_each_entry_safe(engine, next, &utrace->engines, entry) {
907 if (engine->ops == &dead_engine_ops)
908 remove_engine(engine, tsk, utrace);
910 flags |= engine->flags | UTRACE_EVENT(REAP);
912 check_dead_utrace(tsk, utrace, flags & mask);
914 flags &= UTRACE_ACTION_STATE_MASK;
915 return flags | (action & UTRACE_ACTION_OP_MASK);
919 * Called after an event report loop. Remove any engines marked for detach.
922 check_detach(struct task_struct *tsk, u32 action)
924 if (action & UTRACE_ACTION_DETACH) {
926 * This must be current to be sure it's not possibly
927 * getting into utrace_report_death.
929 BUG_ON(tsk != current);
930 spin_lock(&tsk->utrace->lock);
931 action = remove_detached(tsk, tsk->utrace, action, ~0UL);
937 check_quiescent(struct task_struct *tsk, u32 action)
939 if (action & UTRACE_ACTION_STATE_MASK)
940 return utrace_quiescent(tsk, NULL);
945 * Called iff UTRACE_EVENT(CLONE) flag is set.
946 * This notification call blocks the wake_up_new_task call on the child.
947 * So we must not quiesce here. tracehook_report_clone_complete will do
948 * a quiescence check momentarily.
951 utrace_report_clone(unsigned long clone_flags, struct task_struct *child)
953 struct task_struct *tsk = current;
954 struct utrace *utrace = tsk->utrace;
955 struct list_head *pos, *next;
956 struct utrace_attached_engine *engine;
957 unsigned long action;
959 utrace->u.live.cloning = child;
961 /* XXX must change for sharing */
962 action = UTRACE_ACTION_RESUME;
963 list_for_each_safe_rcu(pos, next, &utrace->engines) {
964 engine = list_entry(pos, struct utrace_attached_engine, entry);
965 if (engine->flags & UTRACE_EVENT(CLONE))
966 REPORT(report_clone, clone_flags, child);
967 if (action & UTRACE_ACTION_HIDE)
971 utrace->u.live.cloning = NULL;
973 check_detach(tsk, action);
977 report_quiescent(struct task_struct *tsk, struct utrace *utrace, u32 action)
979 struct list_head *pos, *next;
980 struct utrace_attached_engine *engine;
982 list_for_each_safe_rcu(pos, next, &utrace->engines) {
983 engine = list_entry(pos, struct utrace_attached_engine, entry);
984 if (engine->flags & UTRACE_EVENT(QUIESCE))
985 REPORT(report_quiesce);
986 action |= engine->flags & UTRACE_ACTION_STATE_MASK;
989 return check_detach(tsk, action);
993 * Called iff UTRACE_EVENT(JCTL) flag is set.
996 utrace_report_jctl(int what)
998 struct task_struct *tsk = current;
999 struct utrace *utrace = tsk->utrace;
1000 struct list_head *pos, *next;
1001 struct utrace_attached_engine *engine;
1002 unsigned long action;
1004 /* XXX must change for sharing */
1005 action = UTRACE_ACTION_RESUME;
1006 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1007 engine = list_entry(pos, struct utrace_attached_engine, entry);
1008 if (engine->flags & UTRACE_EVENT(JCTL))
1009 REPORT(report_jctl, what);
1010 if (action & UTRACE_ACTION_HIDE)
1015 * We are becoming quiescent, so report it now.
1016 * We don't block in utrace_quiescent because we are stopping anyway.
1017 * We know that upon resuming we'll go through tracehook_induce_signal,
1018 * which will keep us quiescent or set us up to resume with tracing.
1020 action = report_quiescent(tsk, utrace, action);
1022 if (what == CLD_STOPPED && tsk->state != TASK_STOPPED) {
1024 * The event report hooks could have blocked, though
1025 * it should have been briefly. Make sure we're in
1026 * TASK_STOPPED state again to block properly, unless
1027 * we've just come back out of job control stop.
1029 spin_lock_irq(&tsk->sighand->siglock);
1030 if (tsk->signal->flags & SIGNAL_STOP_STOPPED)
1031 set_current_state(TASK_STOPPED);
1032 spin_unlock_irq(&tsk->sighand->siglock);
1035 return action & UTRACE_JCTL_NOSIGCHLD;
1040 * Return nonzero if there is a SIGKILL that should be waking us up.
1041 * Called with the siglock held.
1044 sigkill_pending(struct task_struct *tsk)
1046 return ((sigismember(&tsk->pending.signal, SIGKILL)
1047 || sigismember(&tsk->signal->shared_pending.signal, SIGKILL))
1048 && !unlikely(sigismember(&tsk->blocked, SIGKILL)));
1052 * Called if UTRACE_EVENT(QUIESCE) or UTRACE_ACTION_QUIESCE flag is set.
1053 * Also called after other event reports.
1054 * It is a good time to block.
1055 * Returns nonzero if we woke up prematurely due to SIGKILL.
1057 * The signal pointer is nonzero when called from utrace_get_signal,
1058 * where a pending forced signal can be processed right away. Otherwise,
1059 * we keep UTRACE_ACTION_QUIESCE set after resuming so that utrace_get_signal
1060 * will be entered before user mode.
1063 utrace_quiescent(struct task_struct *tsk, struct utrace_signal *signal)
1065 struct utrace *utrace = tsk->utrace;
1066 unsigned long action;
1069 /* XXX must change for sharing */
1071 action = report_quiescent(tsk, utrace, UTRACE_ACTION_RESUME);
1074 * If some engines want us quiescent, we block here.
1076 if (action & UTRACE_ACTION_QUIESCE) {
1079 if (signal != NULL) {
1080 BUG_ON(utrace->u.live.signal != NULL);
1081 utrace->u.live.signal = signal;
1084 spin_lock_irq(&tsk->sighand->siglock);
1086 * If wake_quiescent is trying to wake us up now, it will
1087 * have cleared the QUIESCE flag before trying to take the
1088 * siglock. Now we have the siglock, so either it has
1089 * already cleared the flag, or it will wake us up after we
1090 * release the siglock it's waiting for.
1091 * Never stop when there is a SIGKILL bringing us down.
1093 killed = sigkill_pending(tsk);
1094 if (!killed && (tsk->utrace_flags & UTRACE_ACTION_QUIESCE)) {
1095 set_current_state(TASK_TRACED);
1097 * If there is a group stop in progress,
1098 * we must participate in the bookkeeping.
1100 if (tsk->signal->group_stop_count > 0)
1101 --tsk->signal->group_stop_count;
1102 spin_unlock_irq(&tsk->sighand->siglock);
1106 spin_unlock_irq(&tsk->sighand->siglock);
1108 if (signal != NULL) {
1110 * We know the struct stays in place when its
1111 * u.live.signal is set, see check_dead_utrace.
1112 * This makes it safe to clear its pointer here.
1114 BUG_ON(tsk->utrace != utrace);
1115 BUG_ON(utrace->u.live.signal != signal);
1116 utrace->u.live.signal = NULL;
1119 if (killed) /* Game over, man! */
1123 * We've woken up. One engine could be waking us up while
1124 * another has asked us to quiesce. So check afresh. We
1125 * could have been detached while quiescent. Now we are no
1126 * longer quiescent, so don't need to do any RCU locking.
1127 * But we do need to check our utrace pointer anew.
1129 utrace = tsk->utrace;
1130 if (tsk->utrace_flags
1131 & (UTRACE_EVENT(QUIESCE) | UTRACE_ACTION_STATE_MASK))
1134 else if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1136 * Our flags are out of date.
1137 * Update the set of events of interest from the union
1138 * of the interests of the remaining tracing engines.
1139 * This may notice that there are no engines left
1140 * and clean up the struct utrace. It's left in place
1141 * and the QUIESCE flag set as long as utrace_get_signal
1142 * still needs to process a pending forced signal.
1144 unsigned long flags;
1145 utrace = rcu_dereference(tsk->utrace);
1146 spin_lock(&utrace->lock);
1147 flags = rescan_flags(utrace);
1149 utrace_clear_tsk(tsk, utrace);
1150 check_dead_utrace(tsk, utrace, flags);
1154 * We're resuming. Update the machine layer tracing state and then go.
1156 #ifdef ARCH_HAS_SINGLE_STEP
1157 if (action & UTRACE_ACTION_SINGLESTEP)
1158 tracehook_enable_single_step(tsk);
1160 tracehook_disable_single_step(tsk);
1162 #ifdef ARCH_HAS_BLOCK_STEP
1163 if ((action & (UTRACE_ACTION_BLOCKSTEP|UTRACE_ACTION_SINGLESTEP))
1164 == UTRACE_ACTION_BLOCKSTEP)
1165 tracehook_enable_block_step(tsk);
1167 tracehook_disable_block_step(tsk);
1169 if (tsk->utrace_flags & UTRACE_EVENT_SYSCALL)
1170 tracehook_enable_syscall_trace(tsk);
1172 tracehook_disable_syscall_trace(tsk);
1179 * Called iff UTRACE_EVENT(EXIT) flag is set.
1182 utrace_report_exit(long *exit_code)
1184 struct task_struct *tsk = current;
1185 struct utrace *utrace = tsk->utrace;
1186 struct list_head *pos, *next;
1187 struct utrace_attached_engine *engine;
1188 unsigned long action;
1189 long orig_code = *exit_code;
1191 /* XXX must change for sharing */
1192 action = UTRACE_ACTION_RESUME;
1193 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1194 engine = list_entry(pos, struct utrace_attached_engine, entry);
1195 if (engine->flags & UTRACE_EVENT(EXIT))
1196 REPORT(report_exit, orig_code, exit_code);
1198 action = check_detach(tsk, action);
1199 check_quiescent(tsk, action);
1203 * Called with utrace locked, unlocks it on return. Unconditionally
1204 * recompute the flags after report_death is finished. This may notice
1205 * that there are no engines left and free the utrace struct.
1208 finish_report_death(struct task_struct *tsk, struct utrace *utrace)
1211 * After we unlock (possibly inside utrace_reap for callbacks) with
1212 * this flag clear, competing utrace_detach/utrace_set_flags calls
1213 * know that we've finished our callbacks and any detach bookkeeping.
1215 utrace->u.exit.flags &= EXIT_FLAG_REAP;
1217 if (utrace->u.exit.flags & EXIT_FLAG_REAP)
1219 * utrace_release_task was already called in parallel.
1220 * We must complete its work now.
1222 utrace_reap(tsk, utrace);
1225 * Clear out any detached engines and in the process
1226 * recompute the flags. Mask off event bits we can't
1227 * see any more. This tells utrace_release_task we
1228 * have already finished, if it comes along later.
1229 * Note this all happens on the already-locked utrace,
1230 * which might already be removed from the task.
1232 remove_detached(tsk, utrace, 0, DEAD_FLAGS_MASK);
1236 * Called with utrace locked, unlocks it on return.
1237 * EXIT_FLAG_DELAYED_GROUP_LEADER is set.
1238 * Do second report_death callbacks for engines using NOREAP.
1241 report_delayed_group_leader(struct task_struct *tsk, struct utrace *utrace)
1243 struct list_head *pos, *next;
1244 struct utrace_attached_engine *engine;
1247 utrace->u.exit.flags |= EXIT_FLAG_DEATH;
1248 spin_unlock(&utrace->lock);
1250 /* XXX must change for sharing */
1251 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1252 engine = list_entry(pos, struct utrace_attached_engine, entry);
1253 #define NOREAP_DEATH (UTRACE_EVENT(DEATH) | UTRACE_ACTION_NOREAP)
1254 if ((engine->flags & NOREAP_DEATH) == NOREAP_DEATH)
1255 REPORT(report_death);
1258 spin_lock(&utrace->lock);
1259 finish_report_death(tsk, utrace);
1263 * Called iff UTRACE_EVENT(DEATH) or UTRACE_ACTION_QUIESCE flag is set.
1265 * It is always possible that we are racing with utrace_release_task here,
1266 * if UTRACE_ACTION_NOREAP is not set, or in the case of non-leader exec
1267 * where the old leader will get released regardless of NOREAP. For this
1268 * reason, utrace_release_task checks for the event bits that get us here,
1269 * and delays its cleanup for us to do.
1272 utrace_report_death(struct task_struct *tsk, struct utrace *utrace)
1274 struct list_head *pos, *next;
1275 struct utrace_attached_engine *engine;
1278 BUG_ON(!tsk->exit_state);
1281 * We are presently considered "quiescent"--which is accurate
1282 * inasmuch as we won't run any more user instructions ever again.
1283 * But for utrace_detach and utrace_set_flags to be robust, they
1284 * must be sure whether or not we will run any more callbacks. If
1285 * a call comes in before we do, taking the lock here synchronizes
1286 * us so we don't run any callbacks just disabled. Calls that come
1287 * in while we're running the callbacks will see the report_death
1288 * flag and know that we are not yet fully quiescent for purposes
1289 * of detach bookkeeping.
1291 spin_lock(&utrace->lock);
1292 BUG_ON(utrace->u.exit.flags & EXIT_FLAG_DEATH);
1293 utrace->u.exit.flags &= EXIT_FLAG_REAP;
1294 utrace->u.exit.flags |= EXIT_FLAG_DEATH;
1295 spin_unlock(&utrace->lock);
1297 /* XXX must change for sharing */
1298 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1299 engine = list_entry(pos, struct utrace_attached_engine, entry);
1300 if (engine->flags & UTRACE_EVENT(DEATH))
1301 REPORT(report_death);
1302 if (engine->flags & UTRACE_EVENT(QUIESCE))
1303 REPORT(report_quiesce);
1306 spin_lock(&utrace->lock);
1307 if (unlikely(utrace->u.exit.flags & EXIT_FLAG_DELAYED_GROUP_LEADER))
1309 * Another thread's release_task came along and
1310 * removed the delayed_group_leader condition,
1311 * but after we might have started callbacks.
1312 * Do the second report_death callback right now.
1314 report_delayed_group_leader(tsk, utrace);
1316 finish_report_death(tsk, utrace);
1320 * We're called from release_task when delayed_group_leader(tsk) was
1321 * previously true and is no longer true, and NOREAP was set.
1322 * This means no parent notifications have happened for this zombie.
1325 utrace_report_delayed_group_leader(struct task_struct *tsk)
1327 struct utrace *utrace;
1330 utrace = rcu_dereference(tsk->utrace);
1331 if (unlikely(utrace == NULL)) {
1335 spin_lock(&utrace->lock);
1338 utrace->u.exit.flags |= EXIT_FLAG_DELAYED_GROUP_LEADER;
1341 * If utrace_report_death is still running, or release_task has
1342 * started already, there is nothing more to do now.
1344 if ((utrace->u.exit.flags & (EXIT_FLAG_DEATH | EXIT_FLAG_REAP))
1345 || !likely(tsk->utrace_flags & UTRACE_ACTION_NOREAP))
1346 spin_unlock(&utrace->lock);
1348 report_delayed_group_leader(tsk, utrace);
1352 * Called iff UTRACE_EVENT(VFORK_DONE) flag is set.
1355 utrace_report_vfork_done(pid_t child_pid)
1357 struct task_struct *tsk = current;
1358 struct utrace *utrace = tsk->utrace;
1359 struct list_head *pos, *next;
1360 struct utrace_attached_engine *engine;
1361 unsigned long action;
1363 /* XXX must change for sharing */
1364 action = UTRACE_ACTION_RESUME;
1365 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1366 engine = list_entry(pos, struct utrace_attached_engine, entry);
1367 if (engine->flags & UTRACE_EVENT(VFORK_DONE))
1368 REPORT(report_vfork_done, child_pid);
1369 if (action & UTRACE_ACTION_HIDE)
1372 action = check_detach(tsk, action);
1373 check_quiescent(tsk, action);
1377 * Called iff UTRACE_EVENT(EXEC) flag is set.
1380 utrace_report_exec(struct linux_binprm *bprm, struct pt_regs *regs)
1382 struct task_struct *tsk = current;
1383 struct utrace *utrace = tsk->utrace;
1384 struct list_head *pos, *next;
1385 struct utrace_attached_engine *engine;
1386 unsigned long action;
1388 /* XXX must change for sharing */
1389 action = UTRACE_ACTION_RESUME;
1390 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1391 engine = list_entry(pos, struct utrace_attached_engine, entry);
1392 if (engine->flags & UTRACE_EVENT(EXEC))
1393 REPORT(report_exec, bprm, regs);
1394 if (action & UTRACE_ACTION_HIDE)
1397 action = check_detach(tsk, action);
1398 check_quiescent(tsk, action);
1402 * Called iff UTRACE_EVENT(SYSCALL_{ENTRY,EXIT}) flag is set.
1405 utrace_report_syscall(struct pt_regs *regs, int is_exit)
1407 struct task_struct *tsk = current;
1408 struct utrace *utrace = tsk->utrace;
1409 struct list_head *pos, *next;
1410 struct utrace_attached_engine *engine;
1411 unsigned long action, ev;
1414 XXX pass syscall # to engine hook directly, let it return inhibit-action
1416 long syscall = tracehook_syscall_number(regs, is_exit);
1419 ev = is_exit ? UTRACE_EVENT(SYSCALL_EXIT) : UTRACE_EVENT(SYSCALL_ENTRY);
1421 /* XXX must change for sharing */
1422 action = UTRACE_ACTION_RESUME;
1423 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1424 engine = list_entry(pos, struct utrace_attached_engine, entry);
1425 if (engine->flags & ev) {
1427 REPORT(report_syscall_exit, regs);
1429 REPORT(report_syscall_entry, regs);
1431 if (action & UTRACE_ACTION_HIDE)
1434 action = check_detach(tsk, action);
1435 if (unlikely(check_quiescent(tsk, action)) && !is_exit)
1437 * We are continuing despite QUIESCE because of a SIGKILL.
1438 * Don't let the system call actually proceed.
1440 tracehook_abort_syscall(regs);
1445 * This is pointed to by the utrace struct, but it's really a private
1446 * structure between utrace_get_signal and utrace_inject_signal.
1448 struct utrace_signal
1450 siginfo_t *const info;
1451 struct k_sigaction *return_ka;
1456 // XXX copied from signal.c
1458 #define M_SIGEMT M(SIGEMT)
1463 #if SIGRTMIN > BITS_PER_LONG
1464 #define M(sig) (1ULL << ((sig)-1))
1466 #define M(sig) (1UL << ((sig)-1))
1468 #define T(sig, mask) (M(sig) & (mask))
1470 #define SIG_KERNEL_ONLY_MASK (\
1471 M(SIGKILL) | M(SIGSTOP) )
1473 #define SIG_KERNEL_STOP_MASK (\
1474 M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) )
1476 #define SIG_KERNEL_COREDUMP_MASK (\
1477 M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \
1478 M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \
1479 M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT )
1481 #define SIG_KERNEL_IGNORE_MASK (\
1482 M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) )
1484 #define sig_kernel_only(sig) \
1485 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK))
1486 #define sig_kernel_coredump(sig) \
1487 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK))
1488 #define sig_kernel_ignore(sig) \
1489 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK))
1490 #define sig_kernel_stop(sig) \
1491 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
1495 * Call each interested tracing engine's report_signal callback.
1498 report_signal(struct task_struct *tsk, struct pt_regs *regs,
1499 struct utrace *utrace, u32 action,
1500 unsigned long flags1, unsigned long flags2, siginfo_t *info,
1501 const struct k_sigaction *ka, struct k_sigaction *return_ka)
1503 struct list_head *pos, *next;
1504 struct utrace_attached_engine *engine;
1506 /* XXX must change for sharing */
1507 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1508 engine = list_entry(pos, struct utrace_attached_engine, entry);
1509 if ((engine->flags & flags1) && (engine->flags & flags2)) {
1510 u32 disp = action & UTRACE_ACTION_OP_MASK;
1511 action &= ~UTRACE_ACTION_OP_MASK;
1512 REPORT(report_signal, regs, disp, info, ka, return_ka);
1513 if ((action & UTRACE_ACTION_OP_MASK) == 0)
1515 if (action & UTRACE_ACTION_HIDE)
1524 utrace_signal_handler_singlestep(struct task_struct *tsk, struct pt_regs *regs)
1527 action = report_signal(tsk, regs, tsk->utrace, UTRACE_SIGNAL_HANDLER,
1528 UTRACE_EVENT_SIGNAL_ALL,
1529 UTRACE_ACTION_SINGLESTEP|UTRACE_ACTION_BLOCKSTEP,
1531 action = check_detach(tsk, action);
1532 check_quiescent(tsk, action);
1537 * This is the hook from the signals code, called with the siglock held.
1538 * Here is the ideal place to quiesce. We also dequeue and intercept signals.
1541 utrace_get_signal(struct task_struct *tsk, struct pt_regs *regs,
1542 siginfo_t *info, struct k_sigaction *return_ka)
1544 struct utrace *utrace = tsk->utrace;
1545 struct utrace_signal signal = { info, return_ka, 0 };
1546 struct k_sigaction *ka;
1547 unsigned long action, event;
1550 * If a signal was injected previously, it could not use our
1551 * stack space directly. It had to allocate a data structure,
1552 * which we can now copy out of and free.
1554 * We don't have to lock access to u.live.signal because it's only
1555 * touched by utrace_inject_signal when we're quiescent.
1557 if (utrace->u.live.signal != NULL) {
1558 signal.signr = utrace->u.live.signal->signr;
1559 copy_siginfo(info, utrace->u.live.signal->info);
1560 if (utrace->u.live.signal->return_ka)
1561 *return_ka = *utrace->u.live.signal->return_ka;
1563 signal.return_ka = NULL;
1564 kfree(utrace->u.live.signal);
1565 utrace->u.live.signal = NULL;
1569 * If we should quiesce, now is the time.
1570 * First stash a pointer to the state on our stack,
1571 * so that utrace_inject_signal can tell us what to do.
1573 if (tsk->utrace_flags & UTRACE_ACTION_QUIESCE) {
1574 int killed = sigkill_pending(tsk);
1576 spin_unlock_irq(&tsk->sighand->siglock);
1578 killed = utrace_quiescent(tsk, &signal);
1581 * Noone wants us quiescent any more, we can take
1582 * signals. Unless we have a forced signal to take,
1583 * back out to the signal code to resynchronize after
1584 * releasing the siglock.
1586 if (signal.signr == 0 && !killed)
1588 * This return value says to reacquire the
1589 * siglock and check again. This will check
1590 * for a pending group stop and process it
1591 * before coming back here.
1595 spin_lock_irq(&tsk->sighand->siglock);
1599 * The only reason we woke up now was because of a
1600 * SIGKILL. Don't do normal dequeuing in case it
1601 * might get a signal other than SIGKILL. That would
1602 * perturb the death state so it might differ from
1603 * what the debugger would have allowed to happen.
1604 * Instead, pluck out just the SIGKILL to be sure
1605 * we'll die immediately with nothing else different
1606 * from the quiescent state the debugger wanted us in.
1608 sigset_t sigkill_only;
1609 sigfillset(&sigkill_only);
1610 sigdelset(&sigkill_only, SIGKILL);
1611 killed = dequeue_signal(tsk, &sigkill_only, info);
1612 BUG_ON(killed != SIGKILL);
1613 *return_ka = tsk->sighand->action[killed - 1];
1619 * If a signal was injected, everything is in place now. Go do it.
1621 if (signal.signr != 0) {
1622 if (signal.return_ka == NULL) {
1623 ka = &tsk->sighand->action[signal.signr - 1];
1624 if (ka->sa.sa_flags & SA_ONESHOT)
1625 ka->sa.sa_handler = SIG_DFL;
1629 BUG_ON(signal.return_ka != return_ka);
1630 return signal.signr;
1634 * If noone is interested in intercepting signals, let the caller
1635 * just dequeue them normally.
1637 if ((tsk->utrace_flags & UTRACE_EVENT_SIGNAL_ALL) == 0)
1641 * Steal the next signal so we can let tracing engines examine it.
1642 * From the signal number and sigaction, determine what normal
1643 * delivery would do. If no engine perturbs it, we'll do that
1644 * by returning the signal number after setting *return_ka.
1646 signal.signr = dequeue_signal(tsk, &tsk->blocked, info);
1647 if (signal.signr == 0)
1650 BUG_ON(signal.signr != info->si_signo);
1652 ka = &tsk->sighand->action[signal.signr - 1];
1656 * We are never allowed to interfere with SIGKILL,
1657 * just punt after filling in *return_ka for our caller.
1659 if (signal.signr == SIGKILL)
1660 return signal.signr;
1662 if (ka->sa.sa_handler == SIG_IGN) {
1663 event = UTRACE_EVENT(SIGNAL_IGN);
1664 action = UTRACE_SIGNAL_IGN;
1666 else if (ka->sa.sa_handler != SIG_DFL) {
1667 event = UTRACE_EVENT(SIGNAL);
1668 action = UTRACE_ACTION_RESUME;
1670 else if (sig_kernel_coredump(signal.signr)) {
1671 event = UTRACE_EVENT(SIGNAL_CORE);
1672 action = UTRACE_SIGNAL_CORE;
1674 else if (sig_kernel_ignore(signal.signr)) {
1675 event = UTRACE_EVENT(SIGNAL_IGN);
1676 action = UTRACE_SIGNAL_IGN;
1678 else if (sig_kernel_stop(signal.signr)) {
1679 event = UTRACE_EVENT(SIGNAL_STOP);
1680 action = (signal.signr == SIGSTOP
1681 ? UTRACE_SIGNAL_STOP : UTRACE_SIGNAL_TSTP);
1684 event = UTRACE_EVENT(SIGNAL_TERM);
1685 action = UTRACE_SIGNAL_TERM;
1688 if (tsk->utrace_flags & event) {
1690 * We have some interested engines, so tell them about the
1691 * signal and let them change its disposition.
1694 spin_unlock_irq(&tsk->sighand->siglock);
1696 action = report_signal(tsk, regs, utrace, action, event, event,
1697 info, ka, return_ka);
1698 action &= UTRACE_ACTION_OP_MASK;
1700 if (action & UTRACE_SIGNAL_HOLD) {
1701 struct sigqueue *q = sigqueue_alloc();
1702 if (likely(q != NULL)) {
1704 copy_siginfo(&q->info, info);
1706 action &= ~UTRACE_SIGNAL_HOLD;
1707 spin_lock_irq(&tsk->sighand->siglock);
1708 sigaddset(&tsk->pending.signal, info->si_signo);
1709 if (likely(q != NULL))
1710 list_add(&q->list, &tsk->pending.list);
1713 spin_lock_irq(&tsk->sighand->siglock);
1715 recalc_sigpending_tsk(tsk);
1719 * We express the chosen action to the signals code in terms
1720 * of a representative signal whose default action does it.
1723 case UTRACE_SIGNAL_IGN:
1725 * We've eaten the signal. That's all we do.
1726 * Tell the caller to restart.
1728 spin_unlock_irq(&tsk->sighand->siglock);
1731 case UTRACE_ACTION_RESUME:
1732 case UTRACE_SIGNAL_DELIVER:
1734 * The handler will run. We do the SA_ONESHOT work here
1735 * since the normal path will only touch *return_ka now.
1737 if (return_ka->sa.sa_flags & SA_ONESHOT)
1738 ka->sa.sa_handler = SIG_DFL;
1741 case UTRACE_SIGNAL_TSTP:
1742 signal.signr = SIGTSTP;
1743 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1744 return_ka->sa.sa_handler = SIG_DFL;
1747 case UTRACE_SIGNAL_STOP:
1748 signal.signr = SIGSTOP;
1749 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
1750 return_ka->sa.sa_handler = SIG_DFL;
1753 case UTRACE_SIGNAL_TERM:
1754 signal.signr = SIGTERM;
1755 return_ka->sa.sa_handler = SIG_DFL;
1758 case UTRACE_SIGNAL_CORE:
1759 signal.signr = SIGQUIT;
1760 return_ka->sa.sa_handler = SIG_DFL;
1767 return signal.signr;
1772 * Cause a specified signal delivery in the target thread,
1773 * which must be quiescent. The action has UTRACE_SIGNAL_* bits
1774 * as returned from a report_signal callback. If ka is non-null,
1775 * it gives the sigaction to follow for UTRACE_SIGNAL_DELIVER;
1776 * otherwise, the installed sigaction at the time of delivery is used.
1779 utrace_inject_signal(struct task_struct *target,
1780 struct utrace_attached_engine *engine,
1781 u32 action, siginfo_t *info,
1782 const struct k_sigaction *ka)
1784 struct utrace *utrace;
1785 struct utrace_signal *signal;
1788 if (info->si_signo == 0 || !valid_signal(info->si_signo))
1791 utrace = get_utrace_lock_attached(target, engine);
1792 if (unlikely(IS_ERR(utrace)))
1793 return PTR_ERR(utrace);
1796 signal = utrace->u.live.signal;
1797 if (unlikely(target->exit_state))
1799 else if (signal == NULL) {
1800 ret = -ENOSYS; /* XXX */
1802 else if (signal->signr != 0)
1805 if (info != signal->info)
1806 copy_siginfo(signal->info, info);
1813 case UTRACE_SIGNAL_IGN:
1816 case UTRACE_ACTION_RESUME:
1817 case UTRACE_SIGNAL_DELIVER:
1819 * The handler will run. We do the SA_ONESHOT work
1820 * here since the normal path will not touch the
1821 * real sigaction when using an injected signal.
1824 signal->return_ka = NULL;
1825 else if (ka != signal->return_ka)
1826 *signal->return_ka = *ka;
1827 if (ka && ka->sa.sa_flags & SA_ONESHOT) {
1828 struct k_sigaction *a;
1829 a = &target->sighand->action[info->si_signo-1];
1830 spin_lock_irq(&target->sighand->siglock);
1831 a->sa.sa_handler = SIG_DFL;
1832 spin_unlock_irq(&target->sighand->siglock);
1834 signal->signr = info->si_signo;
1837 case UTRACE_SIGNAL_TSTP:
1838 signal->signr = SIGTSTP;
1839 spin_lock_irq(&target->sighand->siglock);
1840 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1841 spin_unlock_irq(&target->sighand->siglock);
1842 signal->return_ka->sa.sa_handler = SIG_DFL;
1845 case UTRACE_SIGNAL_STOP:
1846 signal->signr = SIGSTOP;
1847 spin_lock_irq(&target->sighand->siglock);
1848 target->signal->flags |= SIGNAL_STOP_DEQUEUED;
1849 spin_unlock_irq(&target->sighand->siglock);
1850 signal->return_ka->sa.sa_handler = SIG_DFL;
1853 case UTRACE_SIGNAL_TERM:
1854 signal->signr = SIGTERM;
1855 signal->return_ka->sa.sa_handler = SIG_DFL;
1858 case UTRACE_SIGNAL_CORE:
1859 signal->signr = SIGQUIT;
1860 signal->return_ka->sa.sa_handler = SIG_DFL;
1865 spin_unlock(&utrace->lock);
1869 EXPORT_SYMBOL_GPL(utrace_inject_signal);
1872 const struct utrace_regset *
1873 utrace_regset(struct task_struct *target,
1874 struct utrace_attached_engine *engine,
1875 const struct utrace_regset_view *view, int which)
1877 if (unlikely((unsigned) which >= view->n))
1880 if (target != current)
1881 wait_task_inactive(target);
1883 return &view->regsets[which];
1885 EXPORT_SYMBOL_GPL(utrace_regset);
1889 * Return the task_struct for the task using ptrace on this one, or NULL.
1890 * Must be called with rcu_read_lock held to keep the returned struct alive.
1892 * At exec time, this may be called with task_lock(p) still held from when
1893 * tracehook_unsafe_exec was just called. In that case it must give
1894 * results consistent with those unsafe_exec results, i.e. non-NULL if
1895 * any LSM_UNSAFE_PTRACE_* bits were set.
1897 * The value is also used to display after "TracerPid:" in /proc/PID/status,
1898 * where it is called with only rcu_read_lock held.
1900 struct task_struct *
1901 utrace_tracer_task(struct task_struct *target)
1903 struct utrace *utrace;
1904 struct task_struct *tracer = NULL;
1906 utrace = rcu_dereference(target->utrace);
1907 if (utrace != NULL) {
1908 struct list_head *pos, *next;
1909 struct utrace_attached_engine *engine;
1910 const struct utrace_engine_ops *ops;
1911 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1912 engine = list_entry(pos, struct utrace_attached_engine,
1914 ops = rcu_dereference(engine->ops);
1915 if (ops->tracer_task) {
1916 tracer = (*ops->tracer_task)(engine, target);
1927 utrace_allow_access_process_vm(struct task_struct *target)
1929 struct utrace *utrace;
1933 utrace = rcu_dereference(target->utrace);
1934 if (utrace != NULL) {
1935 struct list_head *pos, *next;
1936 struct utrace_attached_engine *engine;
1937 const struct utrace_engine_ops *ops;
1938 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1939 engine = list_entry(pos, struct utrace_attached_engine,
1941 ops = rcu_dereference(engine->ops);
1942 if (ops->allow_access_process_vm) {
1943 ret = (*ops->allow_access_process_vm)(engine,
1957 * Called on the current task to return LSM_UNSAFE_* bits implied by tracing.
1958 * Called with task_lock held.
1961 utrace_unsafe_exec(struct task_struct *tsk)
1963 struct utrace *utrace = tsk->utrace;
1964 struct list_head *pos, *next;
1965 struct utrace_attached_engine *engine;
1966 const struct utrace_engine_ops *ops;
1969 /* XXX must change for sharing */
1970 list_for_each_safe_rcu(pos, next, &utrace->engines) {
1971 engine = list_entry(pos, struct utrace_attached_engine, entry);
1972 ops = rcu_dereference(engine->ops);
1973 if (ops->unsafe_exec)
1974 unsafe |= (*ops->unsafe_exec)(engine, tsk);