2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
20 #include <asm/pgtable.h>
21 #include <asm/uaccess.h>
24 * ptrace a task: make the debugger its new parent and
25 * move it to the ptrace list.
27 * Must be called with the tasklist lock write-held.
29 void __ptrace_link(task_t *child, task_t *new_parent)
31 if (!list_empty(&child->ptrace_list))
33 if (child->parent == new_parent)
35 list_add(&child->ptrace_list, &child->parent->ptrace_children);
37 child->parent = new_parent;
42 * unptrace a task: move it back to its original parent and
43 * remove it from the ptrace list.
45 * Must be called with the tasklist lock write-held.
47 void __ptrace_unlink(task_t *child)
52 if (list_empty(&child->ptrace_list))
54 list_del_init(&child->ptrace_list);
56 child->parent = child->real_parent;
61 * Check that we have indeed attached to the thing..
63 int ptrace_check_attach(struct task_struct *child, int kill)
65 if (!(child->ptrace & PT_PTRACED))
68 if (child->parent != current)
72 if (child->state != TASK_STOPPED)
74 wait_task_inactive(child);
77 /* All systems go.. */
81 int ptrace_attach(struct task_struct *task)
92 if(((current->uid != task->euid) ||
93 (current->uid != task->suid) ||
94 (current->uid != task->uid) ||
95 (current->gid != task->egid) ||
96 (current->gid != task->sgid) ||
97 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
100 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
102 /* the same process cannot be attached many times */
103 if (task->ptrace & PT_PTRACED)
105 retval = security_ptrace(current, task);
110 task->ptrace |= PT_PTRACED;
111 if (capable(CAP_SYS_PTRACE))
112 task->ptrace |= PT_PTRACE_CAP;
115 write_lock_irq(&tasklist_lock);
116 __ptrace_link(task, current);
117 write_unlock_irq(&tasklist_lock);
119 force_sig_specific(SIGSTOP, task);
127 int ptrace_detach(struct task_struct *child, unsigned int data)
129 if ((unsigned long) data > _NSIG)
132 /* Architecture-specific hardware disable .. */
133 ptrace_disable(child);
135 /* .. re-parent .. */
136 child->exit_code = data;
138 write_lock_irq(&tasklist_lock);
139 __ptrace_unlink(child);
140 /* .. and wake it up. */
141 if (child->state != TASK_ZOMBIE)
142 wake_up_process(child);
143 write_unlock_irq(&tasklist_lock);
149 * Access another process' address space.
150 * Source/target buffer must be kernel space,
151 * Do not walk the page table directly, use get_user_pages
154 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
156 struct mm_struct *mm;
157 struct vm_area_struct *vma;
161 mm = get_task_mm(tsk);
165 down_read(&mm->mmap_sem);
166 /* ignore errors, just check how much was sucessfully transfered */
168 int bytes, ret, offset;
171 ret = get_user_pages(tsk, mm, addr, 1,
172 write, 1, &page, &vma);
177 offset = addr & (PAGE_SIZE-1);
178 if (bytes > PAGE_SIZE-offset)
179 bytes = PAGE_SIZE-offset;
181 flush_cache_page(vma, addr);
185 copy_to_user_page(vma, page, addr,
186 maddr + offset, buf, bytes);
187 set_page_dirty_lock(page);
189 copy_from_user_page(vma, page, addr,
190 buf, maddr + offset, bytes);
193 page_cache_release(page);
198 up_read(&mm->mmap_sem);
201 return buf - old_buf;
204 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
210 int this_len, retval;
212 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
213 retval = access_process_vm(tsk, src, buf, this_len, 0);
219 if (copy_to_user(dst, buf, retval))
229 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
235 int this_len, retval;
237 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
238 if (copy_from_user(buf, src, this_len))
240 retval = access_process_vm(tsk, dst, buf, this_len, 1);
254 static int ptrace_setoptions(struct task_struct *child, long data)
256 child->ptrace &= ~PT_TRACE_MASK;
258 if (data & PTRACE_O_TRACESYSGOOD)
259 child->ptrace |= PT_TRACESYSGOOD;
261 if (data & PTRACE_O_TRACEFORK)
262 child->ptrace |= PT_TRACE_FORK;
264 if (data & PTRACE_O_TRACEVFORK)
265 child->ptrace |= PT_TRACE_VFORK;
267 if (data & PTRACE_O_TRACECLONE)
268 child->ptrace |= PT_TRACE_CLONE;
270 if (data & PTRACE_O_TRACEEXEC)
271 child->ptrace |= PT_TRACE_EXEC;
273 if (data & PTRACE_O_TRACEVFORKDONE)
274 child->ptrace |= PT_TRACE_VFORK_DONE;
276 if (data & PTRACE_O_TRACEEXIT)
277 child->ptrace |= PT_TRACE_EXIT;
279 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
282 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t __user * data)
284 if (child->last_siginfo == NULL)
286 return copy_siginfo_to_user(data, child->last_siginfo);
289 static int ptrace_setsiginfo(struct task_struct *child, siginfo_t __user * data)
291 if (child->last_siginfo == NULL)
293 if (copy_from_user(child->last_siginfo, data, sizeof (siginfo_t)) != 0)
298 int ptrace_request(struct task_struct *child, long request,
299 long addr, long data)
304 #ifdef PTRACE_OLDSETOPTIONS
305 case PTRACE_OLDSETOPTIONS:
307 case PTRACE_SETOPTIONS:
308 ret = ptrace_setoptions(child, data);
310 case PTRACE_GETEVENTMSG:
311 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
313 case PTRACE_GETSIGINFO:
314 ret = ptrace_getsiginfo(child, (siginfo_t __user *) data);
316 case PTRACE_SETSIGINFO:
317 ret = ptrace_setsiginfo(child, (siginfo_t __user *) data);
326 void ptrace_notify(int exit_code)
328 BUG_ON (!(current->ptrace & PT_PTRACED));
330 /* Let the debugger run. */
331 current->exit_code = exit_code;
332 set_current_state(TASK_STOPPED);
333 notify_parent(current, SIGCHLD);
337 * Signals sent while we were stopped might set TIF_SIGPENDING.
340 spin_lock_irq(¤t->sighand->siglock);
342 spin_unlock_irq(¤t->sighand->siglock);
345 EXPORT_SYMBOL(ptrace_notify);