2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/vs_cvirt.h>
23 #include <asm/pgtable.h>
24 #include <asm/uaccess.h>
27 * ptrace a task: make the debugger its new parent and
28 * move it to the ptrace list.
30 * Must be called with the tasklist lock write-held.
32 void __ptrace_link(task_t *child, task_t *new_parent)
34 BUG_ON(!list_empty(&child->ptrace_list));
35 if (child->parent == new_parent)
37 list_add(&child->ptrace_list, &child->parent->ptrace_children);
39 child->parent = new_parent;
44 * Turn a tracing stop into a normal stop now, since with no tracer there
45 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
46 * signal sent that would resume the child, but didn't because it was in
47 * TASK_TRACED, resume it now.
48 * Requires that irqs be disabled.
50 void ptrace_untrace(task_t *child)
52 spin_lock(&child->sighand->siglock);
53 if (child->state == TASK_TRACED) {
54 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
55 child->state = TASK_STOPPED;
57 signal_wake_up(child, 1);
60 spin_unlock(&child->sighand->siglock);
64 * unptrace a task: move it back to its original parent and
65 * remove it from the ptrace list.
67 * Must be called with the tasklist lock write-held.
69 void __ptrace_unlink(task_t *child)
71 BUG_ON(!child->ptrace);
74 if (!list_empty(&child->ptrace_list)) {
75 list_del_init(&child->ptrace_list);
77 child->parent = child->real_parent;
81 if (child->state == TASK_TRACED)
82 ptrace_untrace(child);
86 * Check that we have indeed attached to the thing..
88 int ptrace_check_attach(struct task_struct *child, int kill)
93 * We take the read lock around doing both checks to close a
94 * possible race where someone else was tracing our child and
95 * detached between these two checks. After this locked check,
96 * we are sure that this is our traced child and that can only
97 * be changed by us so it's not changing right after this.
99 read_lock(&tasklist_lock);
100 if ((child->ptrace & PT_PTRACED) && child->parent == current &&
101 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
102 && child->signal != NULL) {
104 spin_lock_irq(&child->sighand->siglock);
105 if (child->state == TASK_STOPPED) {
106 child->state = TASK_TRACED;
107 } else if (child->state != TASK_TRACED && !kill) {
110 spin_unlock_irq(&child->sighand->siglock);
112 read_unlock(&tasklist_lock);
114 /* All systems go.. */
118 int __ptrace_may_attach(struct task_struct *task)
122 if (((current->uid != task->euid) ||
123 (current->uid != task->suid) ||
124 (current->uid != task->uid) ||
125 (current->gid != task->egid) ||
126 (current->gid != task->sgid) ||
127 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
130 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
133 return security_ptrace(current, task);
136 int ptrace_may_attach(struct task_struct *task)
140 err = __ptrace_may_attach(task);
145 int ptrace_attach(struct task_struct *task)
152 if (task->tgid == current->tgid)
159 * We want to hold both the task-lock and the
160 * tasklist_lock for writing at the same time.
161 * But that's against the rules (tasklist_lock
162 * is taken for reading by interrupts on other
163 * cpu's that may have task_lock).
167 if (!write_trylock(&tasklist_lock)) {
172 } while (!write_can_lock(&tasklist_lock));
176 /* the same process cannot be attached many times */
177 if (task->ptrace & PT_PTRACED)
179 retval = __ptrace_may_attach(task);
184 task->ptrace |= PT_PTRACED | ((task->real_parent != current)
186 if (capable(CAP_SYS_PTRACE))
187 task->ptrace |= PT_PTRACE_CAP;
189 __ptrace_link(task, current);
191 force_sig_specific(SIGSTOP, task);
194 write_unlock_irq(&tasklist_lock);
200 void __ptrace_detach(struct task_struct *child, unsigned int data)
202 child->exit_code = data;
203 /* .. re-parent .. */
204 __ptrace_unlink(child);
205 /* .. and wake it up. */
206 if (child->exit_state != EXIT_ZOMBIE)
207 wake_up_process(child);
210 int ptrace_detach(struct task_struct *child, unsigned int data)
212 if (!valid_signal(data))
215 /* Architecture-specific hardware disable .. */
216 ptrace_disable(child);
218 write_lock_irq(&tasklist_lock);
220 __ptrace_detach(child, data);
221 write_unlock_irq(&tasklist_lock);
227 * Access another process' address space.
228 * Source/target buffer must be kernel space,
229 * Do not walk the page table directly, use get_user_pages
232 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
234 struct mm_struct *mm;
235 struct vm_area_struct *vma;
239 mm = get_task_mm(tsk);
243 down_read(&mm->mmap_sem);
244 /* ignore errors, just check how much was sucessfully transfered */
246 int bytes, ret, offset;
249 ret = get_user_pages(tsk, mm, addr, 1,
250 write, 1, &page, &vma);
255 offset = addr & (PAGE_SIZE-1);
256 if (bytes > PAGE_SIZE-offset)
257 bytes = PAGE_SIZE-offset;
261 copy_to_user_page(vma, page, addr,
262 maddr + offset, buf, bytes);
263 set_page_dirty_lock(page);
265 copy_from_user_page(vma, page, addr,
266 buf, maddr + offset, bytes);
269 page_cache_release(page);
274 up_read(&mm->mmap_sem);
277 return buf - old_buf;
280 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
286 int this_len, retval;
288 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
289 retval = access_process_vm(tsk, src, buf, this_len, 0);
295 if (copy_to_user(dst, buf, retval))
305 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
311 int this_len, retval;
313 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
314 if (copy_from_user(buf, src, this_len))
316 retval = access_process_vm(tsk, dst, buf, this_len, 1);
330 static int ptrace_setoptions(struct task_struct *child, long data)
332 child->ptrace &= ~PT_TRACE_MASK;
334 if (data & PTRACE_O_TRACESYSGOOD)
335 child->ptrace |= PT_TRACESYSGOOD;
337 if (data & PTRACE_O_TRACEFORK)
338 child->ptrace |= PT_TRACE_FORK;
340 if (data & PTRACE_O_TRACEVFORK)
341 child->ptrace |= PT_TRACE_VFORK;
343 if (data & PTRACE_O_TRACECLONE)
344 child->ptrace |= PT_TRACE_CLONE;
346 if (data & PTRACE_O_TRACEEXEC)
347 child->ptrace |= PT_TRACE_EXEC;
349 if (data & PTRACE_O_TRACEVFORKDONE)
350 child->ptrace |= PT_TRACE_VFORK_DONE;
352 if (data & PTRACE_O_TRACEEXIT)
353 child->ptrace |= PT_TRACE_EXIT;
355 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
358 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
363 read_lock(&tasklist_lock);
364 if (likely(child->sighand != NULL)) {
366 spin_lock_irq(&child->sighand->siglock);
367 if (likely(child->last_siginfo != NULL)) {
368 lastinfo = *child->last_siginfo;
371 spin_unlock_irq(&child->sighand->siglock);
373 read_unlock(&tasklist_lock);
375 return copy_siginfo_to_user(data, &lastinfo);
379 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
384 if (copy_from_user(&newinfo, data, sizeof (siginfo_t)))
387 read_lock(&tasklist_lock);
388 if (likely(child->sighand != NULL)) {
390 spin_lock_irq(&child->sighand->siglock);
391 if (likely(child->last_siginfo != NULL)) {
392 *child->last_siginfo = newinfo;
395 spin_unlock_irq(&child->sighand->siglock);
397 read_unlock(&tasklist_lock);
401 int ptrace_request(struct task_struct *child, long request,
402 long addr, long data)
407 #ifdef PTRACE_OLDSETOPTIONS
408 case PTRACE_OLDSETOPTIONS:
410 case PTRACE_SETOPTIONS:
411 ret = ptrace_setoptions(child, data);
413 case PTRACE_GETEVENTMSG:
414 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
416 case PTRACE_GETSIGINFO:
417 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
419 case PTRACE_SETSIGINFO:
420 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
430 * ptrace_traceme -- helper for PTRACE_TRACEME
432 * Performs checks and sets PT_PTRACED.
433 * Should be used by all ptrace implementations for PTRACE_TRACEME.
435 int ptrace_traceme(void)
440 * Are we already being traced?
443 if (!(current->ptrace & PT_PTRACED)) {
444 ret = security_ptrace(current->parent, current);
446 * Set the ptrace bit in the process ptrace flags.
449 current->ptrace |= PT_PTRACED;
451 task_unlock(current);
456 * ptrace_get_task_struct -- grab a task struct reference for ptrace
457 * @pid: process id to grab a task_struct reference of
459 * This function is a helper for ptrace implementations. It checks
460 * permissions and then grabs a task struct for use of the actual
461 * ptrace implementation.
463 * Returns the task_struct for @pid or an ERR_PTR() on failure.
465 struct task_struct *ptrace_get_task_struct(pid_t pid)
467 struct task_struct *child;
470 * Tracing init is not allowed.
473 return ERR_PTR(-EPERM);
475 read_lock(&tasklist_lock);
476 child = find_task_by_pid(pid);
478 get_task_struct(child);
479 read_unlock(&tasklist_lock);
481 return ERR_PTR(-ESRCH);
485 #ifndef __ARCH_SYS_PTRACE
486 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
488 struct task_struct *child;
492 * This lock_kernel fixes a subtle race with suid exec
495 if (request == PTRACE_TRACEME) {
496 ret = ptrace_traceme();
500 child = ptrace_get_task_struct(pid);
502 ret = PTR_ERR(child);
507 if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
508 goto out_put_task_struct;
510 if (request == PTRACE_ATTACH) {
511 ret = ptrace_attach(child);
512 goto out_put_task_struct;
515 ret = ptrace_check_attach(child, request == PTRACE_KILL);
517 goto out_put_task_struct;
519 ret = arch_ptrace(child, request, addr, data);
521 goto out_put_task_struct;
524 put_task_struct(child);
529 #endif /* __ARCH_SYS_PTRACE */