2 /* By Ross Biro 1/23/92 */
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 port 2000-2002 Andi Kleen
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
21 #include <asm/uaccess.h>
22 #include <asm/pgtable.h>
23 #include <asm/system.h>
24 #include <asm/processor.h>
26 #include <asm/debugreg.h>
29 #include <asm/proto.h>
33 * does not yet catch signals sent when the child dies.
34 * in exit.c or in signal.c.
37 /* determines which flags the user has access to. */
38 /* 1 = access 0 = no access */
39 #define FLAG_MASK 0x44dd5UL
41 /* set's the trap flag. */
42 #define TRAP_FLAG 0x100UL
45 * eflags and offset of eflags on child stack..
47 #define EFLAGS offsetof(struct pt_regs, eflags)
48 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
51 * this routine will get a word off of the processes privileged stack.
52 * the offset is how far from the base addr as stored in the TSS.
53 * this routine assumes that all the privileged stacks are in our
56 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
60 stack = (unsigned char *)task->thread.rsp0;
62 return (*((unsigned long *)stack));
66 * this routine will put a word on the processes privileged stack.
67 * the offset is how far from the base addr as stored in the TSS.
68 * this routine assumes that all the privileged stacks are in our
71 static inline long put_stack_long(struct task_struct *task, int offset,
74 unsigned char * stack;
76 stack = (unsigned char *) task->thread.rsp0;
78 *(unsigned long *) stack = data;
83 * Called by kernel/ptrace.c when detaching..
85 * Make sure the single step bit is not set.
87 void ptrace_disable(struct task_struct *child)
91 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
92 tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
93 put_stack_long(child, EFL_OFFSET, tmp);
96 static int putreg(struct task_struct *child,
97 unsigned long regno, unsigned long value)
101 /* Some code in the 64bit emulation may not be 64bit clean.
102 Don't take any chances. */
103 if (test_tsk_thread_flag(child, TIF_IA32))
106 case offsetof(struct user_regs_struct,fs):
107 if (value && (value & 3) != 3)
109 child->thread.fsindex = value & 0xffff;
111 case offsetof(struct user_regs_struct,gs):
112 if (value && (value & 3) != 3)
114 child->thread.gsindex = value & 0xffff;
116 case offsetof(struct user_regs_struct,ds):
117 if (value && (value & 3) != 3)
119 child->thread.ds = value & 0xffff;
121 case offsetof(struct user_regs_struct,es):
122 if (value && (value & 3) != 3)
124 child->thread.es = value & 0xffff;
126 case offsetof(struct user_regs_struct,ss):
127 if ((value & 3) != 3)
131 case offsetof(struct user_regs_struct,fs_base):
132 if (value >= TASK_SIZE)
134 child->thread.fs = value;
136 case offsetof(struct user_regs_struct,gs_base):
137 if (value >= TASK_SIZE)
139 child->thread.gs = value;
141 case offsetof(struct user_regs_struct, eflags):
143 tmp = get_stack_long(child, EFL_OFFSET);
147 case offsetof(struct user_regs_struct,cs):
148 if ((value & 3) != 3)
152 case offsetof(struct user_regs_struct, rip):
153 /* Check if the new RIP address is canonical */
154 if (value >= TASK_SIZE)
158 put_stack_long(child, regno - sizeof(struct pt_regs), value);
162 static unsigned long getreg(struct task_struct *child, unsigned long regno)
166 case offsetof(struct user_regs_struct, fs):
167 return child->thread.fsindex;
168 case offsetof(struct user_regs_struct, gs):
169 return child->thread.gsindex;
170 case offsetof(struct user_regs_struct, ds):
171 return child->thread.ds;
172 case offsetof(struct user_regs_struct, es):
173 return child->thread.es;
174 case offsetof(struct user_regs_struct, fs_base):
175 return child->thread.fs;
176 case offsetof(struct user_regs_struct, gs_base):
177 return child->thread.gs;
179 regno = regno - sizeof(struct pt_regs);
180 val = get_stack_long(child, regno);
181 if (test_tsk_thread_flag(child, TIF_IA32))
188 asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data)
190 struct task_struct *child;
194 /* This lock_kernel fixes a subtle race with suid exec */
197 if (request == PTRACE_TRACEME) {
198 /* are we already being traced? */
199 if (current->ptrace & PT_PTRACED)
201 ret = security_ptrace(current->parent, current);
204 /* set the ptrace bit in the process flags. */
205 current->ptrace |= PT_PTRACED;
210 read_lock(&tasklist_lock);
211 child = find_task_by_pid(pid);
213 get_task_struct(child);
214 read_unlock(&tasklist_lock);
217 if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
221 if (pid == 1) /* you may not mess with init */
224 if (request == PTRACE_ATTACH) {
225 ret = ptrace_attach(child);
228 ret = ptrace_check_attach(child, request == PTRACE_KILL);
233 /* when I and D space are separate, these will need to be fixed. */
234 case PTRACE_PEEKTEXT: /* read word at location addr. */
235 case PTRACE_PEEKDATA: {
239 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
241 if (copied != sizeof(tmp))
243 ret = put_user(tmp,(unsigned long __user *) data);
247 /* read the word at location addr in the USER area. */
248 case PTRACE_PEEKUSR: {
253 addr > sizeof(struct user) - 7)
257 case 0 ... sizeof(struct user_regs_struct):
258 tmp = getreg(child, addr);
260 case offsetof(struct user, u_debugreg[0]):
261 tmp = child->thread.debugreg0;
263 case offsetof(struct user, u_debugreg[1]):
264 tmp = child->thread.debugreg1;
266 case offsetof(struct user, u_debugreg[2]):
267 tmp = child->thread.debugreg2;
269 case offsetof(struct user, u_debugreg[3]):
270 tmp = child->thread.debugreg3;
272 case offsetof(struct user, u_debugreg[6]):
273 tmp = child->thread.debugreg6;
275 case offsetof(struct user, u_debugreg[7]):
276 tmp = child->thread.debugreg7;
282 ret = put_user(tmp,(unsigned long __user *) data);
286 /* when I and D space are separate, this will have to be fixed. */
287 case PTRACE_POKETEXT: /* write the word at location addr. */
288 case PTRACE_POKEDATA:
290 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
295 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
298 addr > sizeof(struct user) - 7)
302 case 0 ... sizeof(struct user_regs_struct):
303 ret = putreg(child, addr, data);
305 /* Disallows to set a breakpoint into the vsyscall */
306 case offsetof(struct user, u_debugreg[0]):
307 if (data >= TASK_SIZE-7) break;
308 child->thread.debugreg0 = data;
311 case offsetof(struct user, u_debugreg[1]):
312 if (data >= TASK_SIZE-7) break;
313 child->thread.debugreg1 = data;
316 case offsetof(struct user, u_debugreg[2]):
317 if (data >= TASK_SIZE-7) break;
318 child->thread.debugreg2 = data;
321 case offsetof(struct user, u_debugreg[3]):
322 if (data >= TASK_SIZE-7) break;
323 child->thread.debugreg3 = data;
326 case offsetof(struct user, u_debugreg[6]):
329 child->thread.debugreg6 = data;
332 case offsetof(struct user, u_debugreg[7]):
333 /* See arch/i386/kernel/ptrace.c for an explanation of
334 * this awkward check.*/
335 data &= ~DR_CONTROL_RESERVED;
337 if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
340 child->thread.debugreg7 = data;
346 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
347 case PTRACE_CONT: { /* restart after signal. */
351 if ((unsigned long) data > _NSIG)
353 if (request == PTRACE_SYSCALL)
354 set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
356 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
357 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
358 child->exit_code = data;
359 /* make sure the single step bit is not set. */
360 tmp = get_stack_long(child, EFL_OFFSET);
362 put_stack_long(child, EFL_OFFSET,tmp);
363 wake_up_process(child);
368 #ifdef CONFIG_IA32_EMULATION
369 /* This makes only sense with 32bit programs. Allow a
370 64bit debugger to fully examine them too. Better
371 don't use it against 64bit processes, use
372 PTRACE_ARCH_PRCTL instead. */
373 case PTRACE_SET_THREAD_AREA: {
374 struct user_desc __user *p;
376 p = (struct user_desc __user *)data;
377 get_user(old, &p->entry_number);
378 put_user(addr, &p->entry_number);
379 ret = do_set_thread_area(&child->thread, p);
380 put_user(old, &p->entry_number);
382 case PTRACE_GET_THREAD_AREA:
383 p = (struct user_desc __user *)data;
384 get_user(old, &p->entry_number);
385 put_user(addr, &p->entry_number);
386 ret = do_get_thread_area(&child->thread, p);
387 put_user(old, &p->entry_number);
391 /* normal 64bit interface to access TLS data.
392 Works just like arch_prctl, except that the arguments
394 case PTRACE_ARCH_PRCTL:
395 ret = do_arch_prctl(child, data, addr);
399 * make the child exit. Best I can do is send it a sigkill.
400 * perhaps it should be put in the status that it wants to
407 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
409 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
410 child->exit_code = SIGKILL;
411 /* make sure the single step bit is not set. */
412 tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
413 put_stack_long(child, EFL_OFFSET, tmp);
414 wake_up_process(child);
418 case PTRACE_SINGLESTEP: { /* set the trap flag. */
422 if ((unsigned long) data > _NSIG)
424 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
425 if ((child->ptrace & PT_DTRACE) == 0) {
426 /* Spurious delayed TF traps may occur */
427 child->ptrace |= PT_DTRACE;
429 tmp = get_stack_long(child, EFL_OFFSET) | TRAP_FLAG;
430 put_stack_long(child, EFL_OFFSET, tmp);
431 set_tsk_thread_flag(child, TIF_SINGLESTEP);
432 child->exit_code = data;
433 /* give it a chance to run. */
434 wake_up_process(child);
440 /* detach a process that was attached. */
441 ret = ptrace_detach(child, data);
444 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
445 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
446 sizeof(struct user_regs_struct))) {
451 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
452 ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
453 data += sizeof(long);
458 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
460 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
461 sizeof(struct user_regs_struct))) {
466 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
467 ret |= __get_user(tmp, (unsigned long __user *) data);
468 putreg(child, ui, tmp);
469 data += sizeof(long);
474 case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
475 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
476 sizeof(struct user_i387_struct))) {
480 ret = get_fpregs((struct user_i387_struct __user *)data, child);
484 case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
485 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
486 sizeof(struct user_i387_struct))) {
490 set_stopped_child_used_math(child);
491 ret = set_fpregs(child, (struct user_i387_struct __user *)data);
496 ret = ptrace_request(child, request, addr, data);
500 put_task_struct(child);
506 static void syscall_trace(struct pt_regs *regs)
510 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
512 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
513 current_thread_info()->flags, current->ptrace);
516 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
519 * this isn't the same as continuing with a signal, but it will do
520 * for normal use. strace only continues with a signal if the
521 * stopping signal is not SIGTRAP. -brl
523 if (current->exit_code) {
524 send_sig(current->exit_code, current, 1);
525 current->exit_code = 0;
529 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
531 if (unlikely(current->audit_context))
532 audit_syscall_entry(current, regs->orig_rax,
533 regs->rdi, regs->rsi,
534 regs->rdx, regs->r10);
536 if (test_thread_flag(TIF_SYSCALL_TRACE)
537 && (current->ptrace & PT_PTRACED))
541 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
543 if (unlikely(current->audit_context))
544 audit_syscall_exit(current, regs->rax);
546 if ((test_thread_flag(TIF_SYSCALL_TRACE)
547 || test_thread_flag(TIF_SINGLESTEP))
548 && (current->ptrace & PT_PTRACED))