2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
22 #include <asm/pgtable.h>
23 #include <asm/uaccess.h>
26 #include <linux/utrace.h>
27 #include <linux/tracehook.h>
28 #include <asm/tracehook.h>
31 int getrusage(struct task_struct *, int, struct rusage __user *);
33 //#define PTRACE_DEBUG
35 int __ptrace_may_attach(struct task_struct *task)
37 /* May we inspect the given task?
38 * This check is used both for attaching with ptrace
39 * and for allowing access to sensitive information in /proc.
41 * ptrace_attach denies several cases that /proc allows
42 * because setting up the necessary parent/child relationship
43 * or halting the specified task is impossible.
46 /* Don't let security modules deny introspection */
49 if (((current->uid != task->euid) ||
50 (current->uid != task->suid) ||
51 (current->uid != task->uid) ||
52 (current->gid != task->egid) ||
53 (current->gid != task->sgid) ||
54 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
58 dumpable = task->mm->dumpable;
59 if (!dumpable && !capable(CAP_SYS_PTRACE))
62 return security_ptrace(current, task);
65 int ptrace_may_attach(struct task_struct *task)
69 err = __ptrace_may_attach(task);
75 * Access another process' address space.
76 * Source/target buffer must be kernel space,
77 * Do not walk the page table directly, use get_user_pages
80 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
83 struct vm_area_struct *vma;
87 mm = get_task_mm(tsk);
91 down_read(&mm->mmap_sem);
92 /* ignore errors, just check how much was sucessfully transfered */
94 int bytes, ret, offset;
97 ret = get_user_pages(tsk, mm, addr, 1,
98 write, 1, &page, &vma);
103 offset = addr & (PAGE_SIZE-1);
104 if (bytes > PAGE_SIZE-offset)
105 bytes = PAGE_SIZE-offset;
109 copy_to_user_page(vma, page, addr,
110 maddr + offset, buf, bytes);
111 set_page_dirty_lock(page);
113 copy_from_user_page(vma, page, addr,
114 buf, maddr + offset, bytes);
117 page_cache_release(page);
122 up_read(&mm->mmap_sem);
125 return buf - old_buf;
129 #ifndef CONFIG_PTRACE
131 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
141 * These elements are always available, even when the struct is
142 * awaiting destruction at the next RCU callback point.
144 struct utrace_attached_engine *engine;
145 struct task_struct *task; /* Target task. */
146 struct task_struct *parent; /* Whom we report to. */
147 struct list_head entry; /* Entry on parent->ptracees list. */
150 struct rcu_head dead;
152 u8 options; /* PTRACE_SETOPTIONS bits. */
153 unsigned int stopped:1; /* Stopped for report. */
154 unsigned int reported:1; /* wait already reported. */
155 unsigned int syscall:1; /* Reporting for syscall. */
157 unsigned int sysemu:1; /* PTRACE_SYSEMU in progress. */
159 unsigned int have_eventmsg:1; /* u.eventmsg valid. */
160 unsigned int cap_sys_ptrace:1; /* Tracer capable. */
164 unsigned long eventmsg;
171 static const struct utrace_engine_ops ptrace_utrace_ops; /* Initialized below. */
175 ptrace_state_link(struct ptrace_state *state)
177 task_lock(state->parent);
178 list_add_rcu(&state->entry, &state->parent->ptracees);
179 task_unlock(state->parent);
183 ptrace_state_unlink(struct ptrace_state *state)
185 task_lock(state->parent);
186 list_del_rcu(&state->entry);
187 task_unlock(state->parent);
191 ptrace_setup(struct task_struct *target, struct utrace_attached_engine *engine,
192 struct task_struct *parent, u8 options, int cap_sys_ptrace)
194 struct ptrace_state *state = kzalloc(sizeof *state, GFP_USER);
195 if (unlikely(state == NULL))
198 state->engine = engine;
199 state->task = target;
200 state->parent = parent;
201 state->u.live.options = options;
202 state->u.live.cap_sys_ptrace = cap_sys_ptrace;
203 ptrace_state_link(state);
205 BUG_ON(engine->data != 0);
206 rcu_assign_pointer(engine->data, (unsigned long) state);
212 ptrace_state_free(struct rcu_head *rhead)
214 struct ptrace_state *state = container_of(rhead,
215 struct ptrace_state, u.dead);
220 ptrace_done(struct ptrace_state *state)
222 INIT_RCU_HEAD(&state->u.dead);
223 call_rcu(&state->u.dead, ptrace_state_free);
227 * Update the tracing engine state to match the new ptrace state.
230 ptrace_update(struct task_struct *target, struct utrace_attached_engine *engine,
233 struct ptrace_state *state = (struct ptrace_state *) engine->data;
236 * These events are always reported.
238 flags |= (UTRACE_EVENT(DEATH) | UTRACE_EVENT(EXEC)
239 | UTRACE_EVENT_SIGNAL_ALL);
242 * We always have to examine clone events to check for CLONE_PTRACE.
244 flags |= UTRACE_EVENT(CLONE);
247 * PTRACE_SETOPTIONS can request more events.
249 if (state->u.live.options & PTRACE_O_TRACEEXIT)
250 flags |= UTRACE_EVENT(EXIT);
251 if (state->u.live.options & PTRACE_O_TRACEVFORKDONE)
252 flags |= UTRACE_EVENT(VFORK_DONE);
255 * ptrace always inhibits normal parent reaping.
256 * But for a corner case we sometimes see the REAP event instead.
258 flags |= UTRACE_ACTION_NOREAP | UTRACE_EVENT(REAP);
260 state->u.live.stopped = (flags & UTRACE_ACTION_QUIESCE) != 0;
261 if (!state->u.live.stopped) {
262 if (!state->u.live.have_eventmsg)
263 state->u.live.u.siginfo = NULL;
264 if (!(target->flags & PF_EXITING))
265 target->exit_code = 0;
267 utrace_set_flags(target, engine, flags);
270 static int ptrace_traceme(void)
272 struct utrace_attached_engine *engine;
275 engine = utrace_attach(current, (UTRACE_ATTACH_CREATE
276 | UTRACE_ATTACH_EXCLUSIVE
277 | UTRACE_ATTACH_MATCH_OPS),
278 &ptrace_utrace_ops, 0UL);
280 if (IS_ERR(engine)) {
281 retval = PTR_ERR(engine);
282 if (retval == -EEXIST)
287 retval = security_ptrace(current->parent, current);
288 task_unlock(current);
290 retval = ptrace_setup(current, engine,
291 current->parent, 0, 0);
293 utrace_detach(current, engine);
295 ptrace_update(current, engine, 0);
301 static int ptrace_attach(struct task_struct *task)
303 struct utrace_attached_engine *engine;
309 if (task->tgid == current->tgid)
311 if (!task->mm) /* kernel threads */
314 engine = utrace_attach(task, (UTRACE_ATTACH_CREATE
315 | UTRACE_ATTACH_EXCLUSIVE
316 | UTRACE_ATTACH_MATCH_OPS),
317 &ptrace_utrace_ops, 0);
318 if (IS_ERR(engine)) {
319 retval = PTR_ERR(engine);
320 if (retval == -EEXIST)
325 if (ptrace_may_attach(task))
326 retval = ptrace_setup(task, engine, current, 0,
327 capable(CAP_SYS_PTRACE));
329 utrace_detach(task, engine);
334 ptrace_update(task, engine, 0);
335 force_sig_specific(SIGSTOP, task);
337 spin_lock_irq(&task->sighand->siglock);
338 stopped = (task->state == TASK_STOPPED);
339 spin_unlock_irq(&task->sighand->siglock);
343 * Do now the regset 0 writeback that we do on every
344 * stop, since it's never been done. On register
345 * window machines, this makes sure the user memory
346 * backing the register data is up to date.
348 const struct utrace_regset *regset;
349 regset = utrace_regset(task, engine,
350 utrace_native_view(task), 0);
351 if (regset->writeback)
352 (*regset->writeback)(task, regset, 1);
360 static int ptrace_detach(struct task_struct *task,
361 struct utrace_attached_engine *engine)
363 struct ptrace_state *state = (struct ptrace_state *) engine->data;
365 * Clearing ->data before detach makes sure an unrelated task
366 * calling into ptrace_tracer_task won't try to touch stale state.
368 rcu_assign_pointer(engine->data, 0UL);
369 utrace_detach(task, engine);
370 ptrace_state_unlink(state);
377 * This is called when we are exiting. We must stop all our ptracing.
380 ptrace_exit(struct task_struct *tsk)
383 if (unlikely(!list_empty(&tsk->ptracees))) {
384 struct ptrace_state *state, *next;
387 * First detach the utrace layer from all the tasks.
388 * We don't want to hold any locks while calling utrace_detach.
390 list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
391 rcu_assign_pointer(state->engine->data, 0UL);
392 utrace_detach(state->task, state->engine);
396 * Now clear out our list and clean up our data structures.
397 * The task_lock protects our list structure.
400 list_for_each_entry_safe(state, next, &tsk->ptracees, entry) {
401 list_del_rcu(&state->entry);
408 BUG_ON(!list_empty(&tsk->ptracees));
412 ptrace_induce_signal(struct task_struct *target,
413 struct utrace_attached_engine *engine,
416 struct ptrace_state *state = (struct ptrace_state *) engine->data;
421 if (!valid_signal(signr))
424 if (state->u.live.syscall) {
426 * This is the traditional ptrace behavior when given
427 * a signal to resume from a syscall tracing stop.
429 send_sig(signr, target, 1);
431 else if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
432 siginfo_t *info = state->u.live.u.siginfo;
434 /* Update the siginfo structure if the signal has
435 changed. If the debugger wanted something
436 specific in the siginfo structure then it should
437 have updated *info via PTRACE_SETSIGINFO. */
438 if (signr != info->si_signo) {
439 info->si_signo = signr;
441 info->si_code = SI_USER;
442 info->si_pid = current->pid;
443 info->si_uid = current->uid;
446 return utrace_inject_signal(target, engine,
447 UTRACE_ACTION_RESUME, info, NULL);
454 ptrace_regset_access(struct task_struct *target,
455 struct utrace_attached_engine *engine,
456 const struct utrace_regset_view *view,
457 int setno, unsigned long offset, unsigned int size,
458 void __user *data, int write)
460 const struct utrace_regset *regset = utrace_regset(target, engine,
464 if (unlikely(regset == NULL))
467 if (size == (unsigned int) -1)
468 size = regset->size * regset->n;
471 if (!access_ok(VERIFY_READ, data, size))
474 ret = (*regset->set)(target, regset,
475 offset, size, NULL, data);
478 if (!access_ok(VERIFY_WRITE, data, size))
481 ret = (*regset->get)(target, regset,
482 offset, size, NULL, data);
489 ptrace_onereg_access(struct task_struct *target,
490 struct utrace_attached_engine *engine,
491 const struct utrace_regset_view *view,
492 int setno, unsigned long regno,
493 void __user *data, int write)
495 const struct utrace_regset *regset = utrace_regset(target, engine,
500 if (unlikely(regset == NULL))
503 if (regno < regset->bias || regno >= regset->bias + regset->n)
506 pos = (regno - regset->bias) * regset->size;
509 if (!access_ok(VERIFY_READ, data, regset->size))
512 ret = (*regset->set)(target, regset, pos, regset->size,
516 if (!access_ok(VERIFY_WRITE, data, regset->size))
519 ret = (*regset->get)(target, regset, pos, regset->size,
527 ptrace_layout_access(struct task_struct *target,
528 struct utrace_attached_engine *engine,
529 const struct utrace_regset_view *view,
530 const struct ptrace_layout_segment layout[],
531 unsigned long addr, unsigned int size,
532 void __user *udata, void *kdata, int write)
534 const struct ptrace_layout_segment *seg;
538 !access_ok(write ? VERIFY_READ : VERIFY_WRITE, udata, size))
545 while (addr >= seg->end && seg->end != 0)
548 if (addr < seg->start || addr >= seg->end)
551 pos = addr - seg->start + seg->offset;
552 n = min(size, seg->end - (unsigned int) addr);
554 if (unlikely(seg->regset == (unsigned int) -1)) {
556 * This is a no-op/zero-fill portion of struct user.
562 else if (clear_user(udata, n))
568 const struct utrace_regset *regset = utrace_regset(
569 target, engine, view, seg->regset);
570 if (unlikely(regset == NULL))
574 * A ptrace compatibility layout can do a misaligned
575 * regset access, e.g. word access to larger data.
576 * An arch's compat layout can be this way only if
577 * it is actually ok with the regset code despite the
578 * regset->align setting.
580 align = min(regset->align, size);
581 if ((pos & (align - 1))
582 || pos >= regset->n * regset->size)
586 ret = (*regset->set)(target, regset,
587 pos, n, kdata, udata);
589 ret = (*regset->get)(target, regset,
590 pos, n, kdata, udata);
599 } while (ret == 0 && size > 0);
606 ptrace_start(long pid, long request,
607 struct task_struct **childp,
608 struct utrace_attached_engine **enginep,
609 struct ptrace_state **statep)
612 struct task_struct *child;
613 struct utrace_attached_engine *engine;
614 struct ptrace_state *state;
617 if (request == PTRACE_TRACEME)
618 return ptrace_traceme();
621 read_lock(&tasklist_lock);
622 child = find_task_by_pid(pid);
624 get_task_struct(child);
625 read_unlock(&tasklist_lock);
627 printk("ptrace pid %ld => %p\n", pid, child);
633 if (pid == 1) /* you may not mess with init */
636 if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
639 if (request == PTRACE_ATTACH) {
640 ret = ptrace_attach(child);
644 engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
645 &ptrace_utrace_ops, 0);
647 if (IS_ERR(engine) || engine == NULL)
650 state = rcu_dereference((struct ptrace_state *) engine->data);
651 if (state == NULL || state->parent != current) {
658 * Traditional ptrace behavior demands that the target already be
659 * quiescent, but not dead.
661 if (request != PTRACE_KILL && !state->u.live.stopped) {
663 printk("%d not stopped (%lx)\n", child->pid, child->state);
665 if (child->state != TASK_STOPPED)
667 utrace_set_flags(child, engine,
668 engine->flags | UTRACE_ACTION_QUIESCE);
672 * We do this for all requests to match traditional ptrace behavior.
673 * If the machine state synchronization done at context switch time
674 * includes e.g. writing back to user memory, we want to make sure
675 * that has finished before a PTRACE_PEEKDATA can fetch the results.
676 * On most machines, only regset data is affected by context switch
677 * and calling utrace_regset later on will take care of that, so
678 * this is superfluous.
680 * To do this purely in utrace terms, we could do:
681 * (void) utrace_regset(child, engine, utrace_native_view(child), 0);
683 wait_task_inactive(child);
685 if (child->exit_state)
694 put_task_struct(child);
700 ptrace_common(long request, struct task_struct *child,
701 struct utrace_attached_engine *engine,
702 struct ptrace_state *state,
703 unsigned long addr, long data)
711 * Detach a process that was attached.
713 ret = ptrace_induce_signal(child, engine, data);
715 ret = ptrace_detach(child, engine);
719 * These are the operations that resume the child running.
727 case PTRACE_SYSEMU_SINGLESTEP:
729 #ifdef PTRACE_SINGLEBLOCK
730 case PTRACE_SINGLEBLOCK:
731 # ifdef ARCH_HAS_BLOCK_STEP
732 if (! ARCH_HAS_BLOCK_STEP)
734 if (request == PTRACE_SINGLEBLOCK)
737 case PTRACE_SINGLESTEP:
738 #ifdef ARCH_HAS_SINGLE_STEP
739 if (! ARCH_HAS_SINGLE_STEP)
741 if (request == PTRACE_SINGLESTEP
742 #ifdef PTRACE_SYSEMU_SINGLESTEP
743 || request == PTRACE_SYSEMU_SINGLESTEP
748 ret = ptrace_induce_signal(child, engine, data);
754 * Reset the action flags without QUIESCE, so it resumes.
758 state->u.live.sysemu = (request == PTRACE_SYSEMU_SINGLESTEP
759 || request == PTRACE_SYSEMU);
761 if (request == PTRACE_SINGLESTEP
763 || request == PTRACE_SYSEMU_SINGLESTEP
766 flags |= UTRACE_ACTION_SINGLESTEP;
767 #ifdef PTRACE_SINGLEBLOCK
768 else if (request == PTRACE_SINGLEBLOCK)
769 flags |= UTRACE_ACTION_BLOCKSTEP;
771 if (request == PTRACE_SYSCALL)
772 flags |= UTRACE_EVENT_SYSCALL;
774 else if (request == PTRACE_SYSEMU
775 || request == PTRACE_SYSEMU_SINGLESTEP)
776 flags |= UTRACE_EVENT(SYSCALL_ENTRY);
778 ptrace_update(child, engine, flags);
782 #ifdef PTRACE_OLDSETOPTIONS
783 case PTRACE_OLDSETOPTIONS:
785 case PTRACE_SETOPTIONS:
787 if (data & ~PTRACE_O_MASK)
789 state->u.live.options = data;
790 ptrace_update(child, engine, UTRACE_ACTION_QUIESCE);
799 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
801 struct task_struct *child;
802 struct utrace_attached_engine *engine;
803 struct ptrace_state *state;
807 printk("%d sys_ptrace(%ld, %ld, %lx, %lx)\n",
808 current->pid, request, pid, addr, data);
811 ret = ptrace_start(pid, request, &child, &engine, &state);
816 ret = arch_ptrace(&request, child, engine, addr, data, &val);
817 if (ret != -ENOSYS) {
820 force_successful_syscall_return();
827 ret = ptrace_common(request, child, engine, state, addr, data);
830 case PTRACE_PEEKTEXT: /* read word at location addr. */
831 case PTRACE_PEEKDATA: {
835 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
837 if (copied != sizeof(tmp))
839 ret = put_user(tmp, (unsigned long __user *) data);
843 case PTRACE_POKETEXT: /* write the word at location addr. */
844 case PTRACE_POKEDATA:
846 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
851 case PTRACE_GETEVENTMSG:
852 ret = put_user(state->u.live.have_eventmsg
853 ? state->u.live.u.eventmsg : 0L,
854 (unsigned long __user *) data);
856 case PTRACE_GETSIGINFO:
858 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
859 ret = copy_siginfo_to_user((siginfo_t __user *) data,
860 state->u.live.u.siginfo);
862 case PTRACE_SETSIGINFO:
864 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
865 && copy_from_user(state->u.live.u.siginfo,
866 (siginfo_t __user *) data,
873 put_task_struct(child);
876 printk("%d ptrace -> %x\n", current->pid, ret);
883 #include <linux/compat.h>
885 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
886 compat_ulong_t addr, compat_long_t cdata)
888 const unsigned long data = (unsigned long) (compat_ulong_t) cdata;
889 struct task_struct *child;
890 struct utrace_attached_engine *engine;
891 struct ptrace_state *state;
892 compat_long_t ret, val;
895 printk("%d compat_sys_ptrace(%d, %d, %x, %x)\n",
896 current->pid, request, pid, addr, cdata);
898 ret = ptrace_start(pid, request, &child, &engine, &state);
903 ret = arch_compat_ptrace(&request, child, engine, addr, cdata, &val);
904 if (ret != -ENOSYS) {
907 force_successful_syscall_return();
914 ret = ptrace_common(request, child, engine, state, addr, data);
917 case PTRACE_PEEKTEXT: /* read word at location addr. */
918 case PTRACE_PEEKDATA: {
922 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
924 if (copied != sizeof(tmp))
926 ret = put_user(tmp, (compat_ulong_t __user *) data);
930 case PTRACE_POKETEXT: /* write the word at location addr. */
931 case PTRACE_POKEDATA:
933 if (access_process_vm(child, addr, &cdata, sizeof(cdata), 1) == sizeof(cdata))
938 case PTRACE_GETEVENTMSG:
939 ret = put_user(state->u.live.have_eventmsg
940 ? state->u.live.u.eventmsg : 0L,
941 (compat_long_t __user *) data);
943 case PTRACE_GETSIGINFO:
945 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo)
946 ret = copy_siginfo_to_user32(
947 (struct compat_siginfo __user *) data,
948 state->u.live.u.siginfo);
950 case PTRACE_SETSIGINFO:
952 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo
953 && copy_siginfo_from_user32(
954 state->u.live.u.siginfo,
955 (struct compat_siginfo __user *) data))
961 put_task_struct(child);
964 printk("%d ptrace -> %x\n", current->pid, ret);
972 * We're called with tasklist_lock held for reading.
973 * If we return -ECHILD or zero, next_thread(tsk) must still be valid to use.
974 * If we return another error code, or a successful PID value, we
975 * release tasklist_lock first.
978 ptrace_do_wait(struct task_struct *tsk,
979 pid_t pid, int options, struct siginfo __user *infop,
980 int __user *stat_addr, struct rusage __user *rusagep)
982 struct ptrace_state *state;
983 struct task_struct *p;
988 list_for_each_entry_rcu(state, &tsk->ptracees, entry) {
995 if (process_group(p) != process_group(current))
997 } else if (pid != -1) {
998 if (process_group(p) != -pid)
1001 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1002 && !(options & __WALL))
1004 if (security_task_wait(p))
1008 if (state->u.live.reported)
1011 if (state->u.live.stopped)
1013 if ((p->state & (TASK_TRACED | TASK_STOPPED))
1014 && (p->signal->flags & SIGNAL_STOP_STOPPED))
1016 if (p->exit_state == EXIT_ZOMBIE) {
1017 if (!likely(options & WEXITED))
1019 if (delay_group_leader(p))
1023 // XXX should handle WCONTINUED
1031 BUG_ON(state->parent != tsk);
1033 if (p->exit_state) {
1034 if (unlikely(p->parent == state->parent))
1036 * This is our natural child we were ptracing.
1037 * When it dies it detaches (see ptrace_report_death).
1038 * So we're seeing it here in a race. When it
1039 * finishes detaching it will become reapable in
1040 * the normal wait_task_zombie path instead.
1043 if ((p->exit_code & 0x7f) == 0) {
1045 status = p->exit_code >> 8;
1047 why = (p->exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
1048 status = p->exit_code & 0xff;
1053 status = (p->exit_code << 8) | 0x7f;
1057 * At this point we are committed to a successful return
1058 * or a user error return. Release the tasklist_lock.
1060 read_unlock(&tasklist_lock);
1063 err = getrusage(p, RUSAGE_BOTH, rusagep);
1066 err = put_user(SIGCHLD, &infop->si_signo);
1068 err = put_user(0, &infop->si_errno);
1070 err = put_user((short)why, &infop->si_code);
1072 err = put_user(p->pid, &infop->si_pid);
1074 err = put_user(p->uid, &infop->si_uid);
1076 err = put_user(status, &infop->si_status);
1078 if (!err && stat_addr)
1079 err = put_user(status, stat_addr);
1082 struct utrace *utrace;
1087 * If this was a non-death report, the child might now be
1088 * detaching on death in the same race possible in the
1089 * p->exit_state check above. So check for p->utrace being
1090 * NULL, then we don't need to update the state any more.
1093 utrace = rcu_dereference(p->utrace);
1094 if (likely(utrace != NULL)) {
1095 utrace_lock(utrace);
1096 if (unlikely(state->u.live.reported))
1098 * Another thread in the group got here
1099 * first and reaped it before we locked.
1101 err = -ERESTARTNOINTR;
1102 state->u.live.reported = 1;
1103 utrace_unlock(utrace);
1107 if (err > 0 && why != CLD_TRAPPED)
1108 ptrace_detach(p, state->engine);
1115 do_notify(struct task_struct *tsk, struct task_struct *parent, int why)
1117 struct siginfo info;
1118 unsigned long flags;
1119 struct sighand_struct *sighand;
1122 info.si_signo = SIGCHLD;
1124 info.si_pid = tsk->pid;
1125 info.si_uid = tsk->uid;
1127 /* FIXME: find out whether or not this is supposed to be c*time. */
1128 info.si_utime = cputime_to_jiffies(tsk->utime);
1129 info.si_stime = cputime_to_jiffies(tsk->stime);
1131 sa_mask = SA_NOCLDSTOP;
1133 info.si_status = tsk->exit_code & 0x7f;
1134 if (why == CLD_CONTINUED)
1135 info.si_status = SIGCONT;
1136 else if (why == CLD_STOPPED)
1137 info.si_status = tsk->signal->group_exit_code & 0x7f;
1138 else if (why == CLD_EXITED) {
1139 sa_mask = SA_NOCLDWAIT;
1140 if (tsk->exit_code & 0x80)
1141 info.si_code = CLD_DUMPED;
1142 else if (tsk->exit_code & 0x7f)
1143 info.si_code = CLD_KILLED;
1145 info.si_code = CLD_EXITED;
1146 info.si_status = tsk->exit_code >> 8;
1150 sighand = parent->sighand;
1151 spin_lock_irqsave(&sighand->siglock, flags);
1152 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1153 !(sighand->action[SIGCHLD-1].sa.sa_flags & sa_mask))
1154 __group_send_sig_info(SIGCHLD, &info, parent);
1156 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1158 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1159 spin_unlock_irqrestore(&sighand->siglock, flags);
1163 ptrace_report(struct utrace_attached_engine *engine, struct task_struct *tsk,
1166 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1167 const struct utrace_regset *regset;
1170 printk("%d ptrace_report %d engine %p state %p code %x parent %d (%p)\n",
1171 current->pid, tsk->pid, engine, state, code,
1172 state->parent->pid, state->parent);
1173 if (!state->u.live.have_eventmsg && state->u.live.u.siginfo) {
1174 const siginfo_t *si = state->u.live.u.siginfo;
1175 printk(" si %d code %x errno %d addr %p\n",
1176 si->si_signo, si->si_code, si->si_errno,
1181 BUG_ON(state->u.live.stopped);
1184 * Set our QUIESCE flag right now, before notifying the tracer.
1185 * We do this before setting state->u.live.stopped rather than
1186 * by using UTRACE_ACTION_NEWSTATE in our return value, to
1187 * ensure that the tracer can't get the notification and then
1188 * try to resume us with PTRACE_CONT before we set the flag.
1190 utrace_set_flags(tsk, engine, engine->flags | UTRACE_ACTION_QUIESCE);
1193 * If regset 0 has a writeback call, do it now. On register window
1194 * machines, this makes sure the user memory backing the register
1195 * data is up to date by the time wait_task_inactive returns to
1196 * ptrace_start in our tracer doing a PTRACE_PEEKDATA or the like.
1198 regset = utrace_regset(tsk, engine, utrace_native_view(tsk), 0);
1199 if (regset->writeback)
1200 (*regset->writeback)(tsk, regset, 0);
1202 state->u.live.stopped = 1;
1203 state->u.live.reported = 0;
1204 tsk->exit_code = code;
1205 do_notify(tsk, state->parent, CLD_TRAPPED);
1208 printk("%d ptrace_report quiescing exit_code %x\n",
1209 current->pid, current->exit_code);
1212 return UTRACE_ACTION_RESUME;
1216 ptrace_event(struct utrace_attached_engine *engine, struct task_struct *tsk,
1219 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1220 state->u.live.syscall = 0;
1221 return ptrace_report(engine, tsk, (event << 8) | SIGTRAP);
1226 ptrace_report_death(struct utrace_attached_engine *engine,
1227 struct task_struct *tsk)
1229 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1231 if (tsk->parent == state->parent) {
1233 * This is a natural child, so we detach and let the normal
1234 * reporting happen once our NOREAP action is gone. But
1235 * first, generate a SIGCHLD for those cases where normal
1236 * behavior won't. A ptrace'd child always generates SIGCHLD.
1238 if (tsk->exit_signal == -1 || !thread_group_empty(tsk))
1239 do_notify(tsk, state->parent, CLD_EXITED);
1240 ptrace_state_unlink(state);
1241 rcu_assign_pointer(engine->data, 0UL);
1243 return UTRACE_ACTION_DETACH;
1246 state->u.live.reported = 0;
1247 do_notify(tsk, state->parent, CLD_EXITED);
1248 return UTRACE_ACTION_RESUME;
1252 * We get this only in the case where our UTRACE_ACTION_NOREAP was ignored.
1253 * That happens solely when a non-leader exec reaps the old leader.
1256 ptrace_report_reap(struct utrace_attached_engine *engine,
1257 struct task_struct *tsk)
1259 struct ptrace_state *state;
1261 state = rcu_dereference((struct ptrace_state *) engine->data);
1262 if (state != NULL) {
1263 ptrace_state_unlink(state);
1264 rcu_assign_pointer(engine->data, 0UL);
1272 ptrace_report_clone(struct utrace_attached_engine *engine,
1273 struct task_struct *parent,
1274 unsigned long clone_flags, struct task_struct *child)
1276 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1277 struct utrace_attached_engine *child_engine;
1278 int event = PTRACE_EVENT_FORK;
1279 int option = PTRACE_O_TRACEFORK;
1282 printk("%d (%p) engine %p ptrace_report_clone child %d (%p) fl %lx\n",
1283 parent->pid, parent, engine, child->pid, child, clone_flags);
1286 if (clone_flags & CLONE_UNTRACED)
1289 if (clone_flags & CLONE_VFORK) {
1290 event = PTRACE_EVENT_VFORK;
1291 option = PTRACE_O_TRACEVFORK;
1293 else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1294 event = PTRACE_EVENT_CLONE;
1295 option = PTRACE_O_TRACECLONE;
1298 if (!(clone_flags & CLONE_PTRACE) && !(state->u.live.options & option))
1301 child_engine = utrace_attach(child, (UTRACE_ATTACH_CREATE
1302 | UTRACE_ATTACH_EXCLUSIVE
1303 | UTRACE_ATTACH_MATCH_OPS),
1304 &ptrace_utrace_ops, 0UL);
1305 if (unlikely(IS_ERR(child_engine))) {
1306 BUG_ON(PTR_ERR(child_engine) != -ENOMEM);
1308 "ptrace out of memory, lost child %d of %d",
1309 child->pid, parent->pid);
1312 int ret = ptrace_setup(child, child_engine,
1314 state->u.live.options,
1315 state->u.live.cap_sys_ptrace);
1316 if (unlikely(ret != 0)) {
1317 BUG_ON(ret != -ENOMEM);
1319 "ptrace out of memory, lost child %d of %d",
1320 child->pid, parent->pid);
1321 utrace_detach(child, child_engine);
1324 sigaddset(&child->pending.signal, SIGSTOP);
1325 set_tsk_thread_flag(child, TIF_SIGPENDING);
1326 ptrace_update(child, child_engine, 0);
1330 if (state->u.live.options & option) {
1331 state->u.live.have_eventmsg = 1;
1332 state->u.live.u.eventmsg = child->pid;
1333 return ptrace_event(engine, parent, event);
1337 return UTRACE_ACTION_RESUME;
1342 ptrace_report_vfork_done(struct utrace_attached_engine *engine,
1343 struct task_struct *parent, pid_t child_pid)
1345 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1346 state->u.live.have_eventmsg = 1;
1347 state->u.live.u.eventmsg = child_pid;
1348 return ptrace_event(engine, parent, PTRACE_EVENT_VFORK_DONE);
1353 ptrace_report_signal(struct utrace_attached_engine *engine,
1354 struct task_struct *tsk, struct pt_regs *regs,
1355 u32 action, siginfo_t *info,
1356 const struct k_sigaction *orig_ka,
1357 struct k_sigaction *return_ka)
1359 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1360 int signo = info == NULL ? SIGTRAP : info->si_signo;
1361 state->u.live.syscall = 0;
1362 state->u.live.have_eventmsg = 0;
1363 state->u.live.u.siginfo = info;
1364 return ptrace_report(engine, tsk, signo) | UTRACE_SIGNAL_IGN;
1368 ptrace_report_jctl(struct utrace_attached_engine *engine,
1369 struct task_struct *tsk, int type)
1371 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1372 do_notify(tsk, state->parent, type);
1373 return UTRACE_JCTL_NOSIGCHLD;
1377 ptrace_report_exec(struct utrace_attached_engine *engine,
1378 struct task_struct *tsk,
1379 const struct linux_binprm *bprm,
1380 struct pt_regs *regs)
1382 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1383 if (state->u.live.options & PTRACE_O_TRACEEXEC)
1384 return ptrace_event(engine, tsk, PTRACE_EVENT_EXEC);
1385 state->u.live.syscall = 0;
1386 return ptrace_report(engine, tsk, SIGTRAP);
1390 ptrace_report_syscall(struct utrace_attached_engine *engine,
1391 struct task_struct *tsk, struct pt_regs *regs,
1394 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1395 #ifdef PTRACE_SYSEMU
1396 if (entry && state->u.live.sysemu)
1397 tracehook_abort_syscall(regs);
1399 state->u.live.syscall = 1;
1400 return ptrace_report(engine, tsk,
1401 ((state->u.live.options & PTRACE_O_TRACESYSGOOD)
1402 ? 0x80 : 0) | SIGTRAP);
1406 ptrace_report_syscall_entry(struct utrace_attached_engine *engine,
1407 struct task_struct *tsk, struct pt_regs *regs)
1409 return ptrace_report_syscall(engine, tsk, regs, 1);
1413 ptrace_report_syscall_exit(struct utrace_attached_engine *engine,
1414 struct task_struct *tsk, struct pt_regs *regs)
1416 return ptrace_report_syscall(engine, tsk, regs, 0);
1420 ptrace_report_exit(struct utrace_attached_engine *engine,
1421 struct task_struct *tsk, long orig_code, long *code)
1423 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1424 state->u.live.have_eventmsg = 1;
1425 state->u.live.u.eventmsg = *code;
1426 return ptrace_event(engine, tsk, PTRACE_EVENT_EXIT);
1430 ptrace_unsafe_exec(struct utrace_attached_engine *engine,
1431 struct task_struct *tsk)
1433 struct ptrace_state *state = (struct ptrace_state *) engine->data;
1434 int unsafe = LSM_UNSAFE_PTRACE;
1435 if (state->u.live.cap_sys_ptrace)
1436 unsafe = LSM_UNSAFE_PTRACE_CAP;
1440 static struct task_struct *
1441 ptrace_tracer_task(struct utrace_attached_engine *engine,
1442 struct task_struct *target)
1444 struct ptrace_state *state;
1447 * This call is not necessarily made by the target task,
1448 * so ptrace might be getting detached while we run here.
1449 * The state pointer will be NULL if that happens.
1451 state = rcu_dereference((struct ptrace_state *) engine->data);
1453 return state == NULL ? NULL : state->parent;
1457 ptrace_allow_access_process_vm(struct utrace_attached_engine *engine,
1458 struct task_struct *target,
1459 struct task_struct *caller)
1461 struct ptrace_state *state;
1465 * This call is not necessarily made by the target task,
1466 * so ptrace might be getting detached while we run here.
1467 * The state pointer will be NULL if that happens.
1470 state = rcu_dereference((struct ptrace_state *) engine->data);
1471 ours = (state != NULL
1472 && ((engine->flags & UTRACE_ACTION_QUIESCE)
1473 || (target->state == TASK_STOPPED))
1474 && state->parent == caller);
1477 return ours && security_ptrace(caller, target) == 0;
1481 static const struct utrace_engine_ops ptrace_utrace_ops =
1483 .report_syscall_entry = ptrace_report_syscall_entry,
1484 .report_syscall_exit = ptrace_report_syscall_exit,
1485 .report_exec = ptrace_report_exec,
1486 .report_jctl = ptrace_report_jctl,
1487 .report_signal = ptrace_report_signal,
1488 .report_vfork_done = ptrace_report_vfork_done,
1489 .report_clone = ptrace_report_clone,
1490 .report_exit = ptrace_report_exit,
1491 .report_death = ptrace_report_death,
1492 .report_reap = ptrace_report_reap,
1493 .unsafe_exec = ptrace_unsafe_exec,
1494 .tracer_task = ptrace_tracer_task,
1495 .allow_access_process_vm = ptrace_allow_access_process_vm,