2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/vs_cvirt.h>
23 #include <asm/pgtable.h>
24 #include <asm/uaccess.h>
27 * ptrace a task: make the debugger its new parent and
28 * move it to the ptrace list.
30 * Must be called with the tasklist lock write-held.
32 void __ptrace_link(task_t *child, task_t *new_parent)
34 if (!list_empty(&child->ptrace_list))
36 if (child->parent == new_parent)
38 list_add(&child->ptrace_list, &child->parent->ptrace_children);
40 child->parent = new_parent;
45 * Turn a tracing stop into a normal stop now, since with no tracer there
46 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
47 * signal sent that would resume the child, but didn't because it was in
48 * TASK_TRACED, resume it now.
49 * Requires that irqs be disabled.
51 void ptrace_untrace(task_t *child)
53 spin_lock(&child->sighand->siglock);
54 if (child->state == TASK_TRACED) {
55 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
56 child->state = TASK_STOPPED;
58 signal_wake_up(child, 1);
61 spin_unlock(&child->sighand->siglock);
65 * unptrace a task: move it back to its original parent and
66 * remove it from the ptrace list.
68 * Must be called with the tasklist lock write-held.
70 void __ptrace_unlink(task_t *child)
72 BUG_ON(!child->ptrace);
75 if (!list_empty(&child->ptrace_list)) {
76 list_del_init(&child->ptrace_list);
78 child->parent = child->real_parent;
82 if (child->state == TASK_TRACED)
83 ptrace_untrace(child);
87 * Check that we have indeed attached to the thing..
89 int ptrace_check_attach(struct task_struct *child, int kill)
94 * We take the read lock around doing both checks to close a
95 * possible race where someone else was tracing our child and
96 * detached between these two checks. After this locked check,
97 * we are sure that this is our traced child and that can only
98 * be changed by us so it's not changing right after this.
100 read_lock(&tasklist_lock);
101 if ((child->ptrace & PT_PTRACED) && child->parent == current &&
102 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
103 && child->signal != NULL) {
105 spin_lock_irq(&child->sighand->siglock);
106 if (child->state == TASK_STOPPED) {
107 child->state = TASK_TRACED;
108 } else if (child->state != TASK_TRACED && !kill) {
111 spin_unlock_irq(&child->sighand->siglock);
113 read_unlock(&tasklist_lock);
116 wait_task_inactive(child);
119 /* All systems go.. */
123 static int may_attach(struct task_struct *task)
127 if (((current->uid != task->euid) ||
128 (current->uid != task->suid) ||
129 (current->uid != task->uid) ||
130 (current->gid != task->egid) ||
131 (current->gid != task->sgid) ||
132 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
135 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
138 return security_ptrace(current, task);
141 int ptrace_may_attach(struct task_struct *task)
145 err = may_attach(task);
150 int ptrace_attach(struct task_struct *task)
157 if (task->tgid == current->tgid)
164 * We want to hold both the task-lock and the
165 * tasklist_lock for writing at the same time.
166 * But that's against the rules (tasklist_lock
167 * is taken for reading by interrupts on other
168 * cpu's that may have task_lock).
172 if (!write_trylock(&tasklist_lock)) {
177 } while (!write_can_lock(&tasklist_lock));
181 /* the same process cannot be attached many times */
182 if (task->ptrace & PT_PTRACED)
184 retval = may_attach(task);
189 task->ptrace |= PT_PTRACED | ((task->real_parent != current)
191 if (capable(CAP_SYS_PTRACE))
192 task->ptrace |= PT_PTRACE_CAP;
194 __ptrace_link(task, current);
196 force_sig_specific(SIGSTOP, task);
199 write_unlock_irq(&tasklist_lock);
205 void __ptrace_detach(struct task_struct *child, unsigned int data)
207 child->exit_code = data;
208 /* .. re-parent .. */
209 __ptrace_unlink(child);
210 /* .. and wake it up. */
211 if (child->exit_state != EXIT_ZOMBIE)
212 wake_up_process(child);
215 int ptrace_detach(struct task_struct *child, unsigned int data)
217 if (!valid_signal(data))
220 /* Architecture-specific hardware disable .. */
221 ptrace_disable(child);
223 write_lock_irq(&tasklist_lock);
225 __ptrace_detach(child, data);
226 write_unlock_irq(&tasklist_lock);
232 * Access another process' address space.
233 * Source/target buffer must be kernel space,
234 * Do not walk the page table directly, use get_user_pages
237 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
239 struct mm_struct *mm;
240 struct vm_area_struct *vma;
244 mm = get_task_mm(tsk);
248 down_read(&mm->mmap_sem);
249 /* ignore errors, just check how much was sucessfully transfered */
251 int bytes, ret, offset;
254 ret = get_user_pages(tsk, mm, addr, 1,
255 write, 1, &page, &vma);
260 offset = addr & (PAGE_SIZE-1);
261 if (bytes > PAGE_SIZE-offset)
262 bytes = PAGE_SIZE-offset;
266 copy_to_user_page(vma, page, addr,
267 maddr + offset, buf, bytes);
268 set_page_dirty_lock(page);
270 copy_from_user_page(vma, page, addr,
271 buf, maddr + offset, bytes);
274 page_cache_release(page);
279 up_read(&mm->mmap_sem);
282 return buf - old_buf;
285 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
291 int this_len, retval;
293 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
294 retval = access_process_vm(tsk, src, buf, this_len, 0);
300 if (copy_to_user(dst, buf, retval))
310 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
316 int this_len, retval;
318 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
319 if (copy_from_user(buf, src, this_len))
321 retval = access_process_vm(tsk, dst, buf, this_len, 1);
335 static int ptrace_setoptions(struct task_struct *child, long data)
337 child->ptrace &= ~PT_TRACE_MASK;
339 if (data & PTRACE_O_TRACESYSGOOD)
340 child->ptrace |= PT_TRACESYSGOOD;
342 if (data & PTRACE_O_TRACEFORK)
343 child->ptrace |= PT_TRACE_FORK;
345 if (data & PTRACE_O_TRACEVFORK)
346 child->ptrace |= PT_TRACE_VFORK;
348 if (data & PTRACE_O_TRACECLONE)
349 child->ptrace |= PT_TRACE_CLONE;
351 if (data & PTRACE_O_TRACEEXEC)
352 child->ptrace |= PT_TRACE_EXEC;
354 if (data & PTRACE_O_TRACEVFORKDONE)
355 child->ptrace |= PT_TRACE_VFORK_DONE;
357 if (data & PTRACE_O_TRACEEXIT)
358 child->ptrace |= PT_TRACE_EXIT;
360 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
363 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
368 read_lock(&tasklist_lock);
369 if (likely(child->sighand != NULL)) {
371 spin_lock_irq(&child->sighand->siglock);
372 if (likely(child->last_siginfo != NULL)) {
373 lastinfo = *child->last_siginfo;
376 spin_unlock_irq(&child->sighand->siglock);
378 read_unlock(&tasklist_lock);
380 return copy_siginfo_to_user(data, &lastinfo);
384 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
389 if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
392 read_lock(&tasklist_lock);
393 if (likely(child->sighand != NULL)) {
395 spin_lock_irq(&child->sighand->siglock);
396 if (likely(child->last_siginfo != NULL)) {
397 *child->last_siginfo = newinfo;
400 spin_unlock_irq(&child->sighand->siglock);
402 read_unlock(&tasklist_lock);
406 int ptrace_request(struct task_struct *child, long request,
407 long addr, long data)
412 #ifdef PTRACE_OLDSETOPTIONS
413 case PTRACE_OLDSETOPTIONS:
415 case PTRACE_SETOPTIONS:
416 ret = ptrace_setoptions(child, data);
418 case PTRACE_GETEVENTMSG:
419 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
421 case PTRACE_GETSIGINFO:
422 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
424 case PTRACE_SETSIGINFO:
425 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
435 * ptrace_traceme -- helper for PTRACE_TRACEME
437 * Performs checks and sets PT_PTRACED.
438 * Should be used by all ptrace implementations for PTRACE_TRACEME.
440 int ptrace_traceme(void)
445 * Are we already being traced?
448 if (!(current->ptrace & PT_PTRACED)) {
449 ret = security_ptrace(current->parent, current);
451 * Set the ptrace bit in the process ptrace flags.
454 current->ptrace |= PT_PTRACED;
456 task_unlock(current);
461 * ptrace_get_task_struct -- grab a task struct reference for ptrace
462 * @pid: process id to grab a task_struct reference of
464 * This function is a helper for ptrace implementations. It checks
465 * permissions and then grabs a task struct for use of the actual
466 * ptrace implementation.
468 * Returns the task_struct for @pid or an ERR_PTR() on failure.
470 struct task_struct *ptrace_get_task_struct(pid_t pid)
472 struct task_struct *child;
475 * Tracing init is not allowed.
478 return ERR_PTR(-EPERM);
480 read_lock(&tasklist_lock);
481 child = find_task_by_pid(pid);
483 get_task_struct(child);
484 read_unlock(&tasklist_lock);
486 return ERR_PTR(-ESRCH);
490 #ifndef __ARCH_SYS_PTRACE
491 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
493 struct task_struct *child;
497 * This lock_kernel fixes a subtle race with suid exec
500 if (request == PTRACE_TRACEME) {
501 ret = ptrace_traceme();
505 child = ptrace_get_task_struct(pid);
507 ret = PTR_ERR(child);
512 if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
513 goto out_put_task_struct;
515 if (request == PTRACE_ATTACH) {
516 ret = ptrace_attach(child);
517 goto out_put_task_struct;
520 ret = ptrace_check_attach(child, request == PTRACE_KILL);
522 goto out_put_task_struct;
524 ret = arch_ptrace(child, request, addr, data);
526 goto out_put_task_struct;
529 put_task_struct(child);
534 #endif /* __ARCH_SYS_PTRACE */