2 /* By Ross Biro 1/23/92 */
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 port 2000-2002 Andi Kleen
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/tracehook.h>
17 #include <linux/ptrace.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
20 #include <linux/audit.h>
21 #include <linux/seccomp.h>
22 #include <linux/signal.h>
23 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
30 #include <asm/debugreg.h>
33 #include <asm/proto.h>
35 #include <asm/prctl.h>
38 * does not yet catch signals sent when the child dies.
39 * in exit.c or in signal.c.
43 * Determines which flags the user has access to [1 = access, 0 = no access].
44 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
45 * Also masks reserved bits (63-22, 15, 5, 3, 1).
47 #define FLAG_MASK 0x54dd5UL
49 /* set's the trap flag. */
50 #define TRAP_FLAG 0x100UL
53 * eflags and offset of eflags on child stack..
55 #define EFLAGS offsetof(struct pt_regs, eflags)
56 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
59 * this routine will get a word off of the processes privileged stack.
60 * the offset is how far from the base addr as stored in the TSS.
61 * this routine assumes that all the privileged stacks are in our
64 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
68 stack = (unsigned char *)task->thread.rsp0;
70 return (*((unsigned long *)stack));
74 * this routine will put a word on the processes privileged stack.
75 * the offset is how far from the base addr as stored in the TSS.
76 * this routine assumes that all the privileged stacks are in our
79 static inline long put_stack_long(struct task_struct *task, int offset,
82 unsigned char * stack;
84 stack = (unsigned char *) task->thread.rsp0;
86 *(unsigned long *) stack = data;
92 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
94 unsigned long addr, seg;
97 seg = regs->cs & 0xffff;
100 * We'll assume that the code segments in the GDT
101 * are all zero-based. That is largely true: the
102 * TLS segments are used for data, and the PNPBIOS
103 * and APM bios ones we just ignore here.
105 if (seg & LDT_SEGMENT) {
109 down(&child->mm->context.sem);
110 desc = child->mm->context.ldt + (seg & ~7);
111 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
113 /* 16-bit code segment? */
114 if (!((desc[1] >> 22) & 1))
117 up(&child->mm->context.sem);
122 static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
125 unsigned char opcode[15];
126 unsigned long addr = convert_rip_to_linear(child, regs);
128 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
129 for (i = 0; i < copied; i++) {
132 case 0x9d: case 0xcf:
137 /* opcode and address size prefixes */
138 case 0x66: case 0x67:
140 /* irrelevant prefixes (segment overrides and repeats) */
141 case 0x26: case 0x2e:
142 case 0x36: case 0x3e:
143 case 0x64: case 0x65:
144 case 0xf2: case 0xf3:
148 if (regs->cs != __USER_CS)
149 /* 32-bit mode: register increment */
151 /* 64-bit mode: REX prefix */
154 /* CHECKME: f2, f3 */
157 * pushf: NOTE! We should probably not let
158 * the user see the TF bit being set. But
159 * it's more pain than it's worth to avoid
160 * it, and a debugger could emulate this
161 * all in user space if it _really_ cares.
171 void tracehook_enable_single_step(struct task_struct *child)
173 struct pt_regs *regs = task_pt_regs(child);
176 * Always set TIF_SINGLESTEP - this guarantees that
177 * we single-step system calls etc.. This will also
178 * cause us to set TF when returning to user mode.
180 set_tsk_thread_flag(child, TIF_SINGLESTEP);
183 * If TF was already set, don't do anything else
185 if (regs->eflags & TRAP_FLAG)
188 /* Set TF on the kernel stack.. */
189 regs->eflags |= TRAP_FLAG;
192 * ..but if TF is changed by the instruction we will trace,
193 * don't mark it as being "us" that set it, so that we
194 * won't clear it by hand later.
196 if (is_setting_trap_flag(child, regs))
199 set_tsk_thread_flag(child, TIF_FORCED_TF);
202 void tracehook_disable_single_step(struct task_struct *child)
204 /* Always clear TIF_SINGLESTEP... */
205 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
207 /* But touch TF only if it was set by us.. */
208 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
209 struct pt_regs *regs = task_pt_regs(child);
210 regs->eflags &= ~TRAP_FLAG;
215 * Called by kernel/ptrace.c when detaching..
217 * Make sure the single step bit is not set.
219 void ptrace_disable(struct task_struct *child)
221 tracehook_disable_single_step(child);
224 static int putreg(struct task_struct *child,
225 unsigned long regno, unsigned long value)
229 /* Some code in the 64bit emulation may not be 64bit clean.
230 Don't take any chances. */
231 if (test_tsk_thread_flag(child, TIF_IA32))
234 case offsetof(struct user_regs_struct,fs):
235 if (value && (value & 3) != 3)
237 child->thread.fsindex = value & 0xffff;
239 case offsetof(struct user_regs_struct,gs):
240 if (value && (value & 3) != 3)
242 child->thread.gsindex = value & 0xffff;
244 case offsetof(struct user_regs_struct,ds):
245 if (value && (value & 3) != 3)
247 child->thread.ds = value & 0xffff;
249 case offsetof(struct user_regs_struct,es):
250 if (value && (value & 3) != 3)
252 child->thread.es = value & 0xffff;
254 case offsetof(struct user_regs_struct,ss):
255 if ((value & 3) != 3)
259 case offsetof(struct user_regs_struct,fs_base):
260 if (value >= TASK_SIZE_OF(child))
262 child->thread.fs = value;
264 case offsetof(struct user_regs_struct,gs_base):
265 if (value >= TASK_SIZE_OF(child))
267 child->thread.gs = value;
269 case offsetof(struct user_regs_struct, eflags):
271 tmp = get_stack_long(child, EFL_OFFSET);
274 clear_tsk_thread_flag(child, TIF_FORCED_TF);
276 case offsetof(struct user_regs_struct,cs):
277 if ((value & 3) != 3)
282 put_stack_long(child, regno - sizeof(struct pt_regs), value);
286 static unsigned long getreg(struct task_struct *child, unsigned long regno)
290 case offsetof(struct user_regs_struct, fs):
291 return child->thread.fsindex;
292 case offsetof(struct user_regs_struct, gs):
293 return child->thread.gsindex;
294 case offsetof(struct user_regs_struct, ds):
295 return child->thread.ds;
296 case offsetof(struct user_regs_struct, es):
297 return child->thread.es;
298 case offsetof(struct user_regs_struct, fs_base):
299 return child->thread.fs;
300 case offsetof(struct user_regs_struct, gs_base):
301 return child->thread.gs;
303 regno = regno - sizeof(struct pt_regs);
304 val = get_stack_long(child, regno);
305 if (test_tsk_thread_flag(child, TIF_IA32))
307 if (regno == (offsetof(struct user_regs_struct, eflags)
308 - sizeof(struct pt_regs))
309 && test_tsk_thread_flag(child, TIF_FORCED_TF))
310 val &= ~X86_EFLAGS_TF;
317 genregs_get(struct task_struct *target,
318 const struct utrace_regset *regset,
319 unsigned int pos, unsigned int count,
320 void *kbuf, void __user *ubuf)
323 unsigned long *kp = kbuf;
325 *kp++ = getreg(target, pos);
327 count -= sizeof(long);
331 unsigned long __user *up = ubuf;
333 if (__put_user(getreg(target, pos), up++))
336 count -= sizeof(long);
344 genregs_set(struct task_struct *target,
345 const struct utrace_regset *regset,
346 unsigned int pos, unsigned int count,
347 const void *kbuf, const void __user *ubuf)
352 const unsigned long *kp = kbuf;
353 while (!ret && count > 0) {
354 ret = putreg(target, pos, *kp++);
356 count -= sizeof(long);
361 const unsigned long __user *up = ubuf;
362 while (!ret && count > 0) {
364 ret = __get_user(val, up++);
366 ret = putreg(target, pos, val);
368 count -= sizeof(long);
377 dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
379 if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
385 dbregs_get(struct task_struct *target,
386 const struct utrace_regset *regset,
387 unsigned int pos, unsigned int count,
388 void *kbuf, void __user *ubuf)
390 for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
394 * The hardware updates the status register on a debug trap,
395 * but do_debug (traps.c) saves it for us when that happens.
396 * So whether the target is current or not, debugregN is good.
400 case 0: val = target->thread.debugreg0; break;
401 case 1: val = target->thread.debugreg1; break;
402 case 2: val = target->thread.debugreg2; break;
403 case 3: val = target->thread.debugreg3; break;
404 case 6: val = target->thread.debugreg6; break;
405 case 7: val = target->thread.debugreg7; break;
409 *(unsigned long *) kbuf = val;
410 kbuf += sizeof(unsigned long);
413 if (__put_user(val, (unsigned long __user *) ubuf))
415 ubuf += sizeof(unsigned long);
423 dbregs_set(struct task_struct *target,
424 const struct utrace_regset *regset,
425 unsigned int pos, unsigned int count,
426 const void *kbuf, const void __user *ubuf)
429 unsigned long maxaddr = TASK_SIZE_OF(target);
430 maxaddr -= test_tsk_thread_flag(target, TIF_IA32) ? 3 : 7;
432 for (pos >>= 3, count >>= 3; count > 0; --count, ++pos) {
437 val = *(const unsigned long *) kbuf;
438 kbuf += sizeof(unsigned long);
441 if (__get_user(val, (unsigned long __user *) ubuf))
443 ubuf += sizeof(unsigned long);
447 #define SET_DBREG(n) \
448 target->thread.debugreg##n = val; \
449 if (target == current) \
450 set_debugreg(target->thread.debugreg##n, n)
484 * See arch/i386/kernel/ptrace.c for an explanation
485 * of this awkward check.
487 val &= ~DR_CONTROL_RESERVED;
488 for (i = 0; i < 4; i++)
489 if ((0x5554 >> ((val >> (16 + 4*i)) & 0xf))
493 set_tsk_thread_flag(target, TIF_DEBUG);
495 clear_tsk_thread_flag(target, TIF_DEBUG);
507 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
509 return tsk_used_math(target) ? regset->n : 0;
513 fpregs_get(struct task_struct *target,
514 const struct utrace_regset *regset,
515 unsigned int pos, unsigned int count,
516 void *kbuf, void __user *ubuf)
518 if (tsk_used_math(target)) {
519 if (target == current)
525 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
526 &target->thread.i387.fxsave, 0, -1);
530 fpregs_set(struct task_struct *target,
531 const struct utrace_regset *regset,
532 unsigned int pos, unsigned int count,
533 const void *kbuf, const void __user *ubuf)
537 if (tsk_used_math(target)) {
538 if (target == current)
541 else if (pos == 0 && count == sizeof(struct user_i387_struct))
542 set_stopped_child_used_math(target);
546 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
547 &target->thread.i387.fxsave, 0, -1);
549 target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
555 fsgs_active(struct task_struct *tsk, const struct utrace_regset *regset)
557 if (tsk->thread.gsindex == GS_TLS_SEL || tsk->thread.gs)
559 if (tsk->thread.fsindex == FS_TLS_SEL || tsk->thread.fs)
564 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
566 struct desc_struct *desc = (void *)t->thread.tls_array;
569 (((u32)desc->base1) << 16) |
570 (((u32)desc->base2) << 24);
574 fsgs_get(struct task_struct *target,
575 const struct utrace_regset *regset,
576 unsigned int pos, unsigned int count,
577 void *kbuf, void __user *ubuf)
579 const unsigned long *kaddr = kbuf;
580 const unsigned long __user *uaddr = ubuf;
584 * XXX why the MSR reads here?
585 * Can anything change the MSRs without changing thread.fs first?
587 if (pos == 0) { /* FS */
590 else if (__get_user(addr, uaddr++))
592 if (target->thread.fsindex == FS_TLS_SEL)
593 addr = read_32bit_tls(target, FS_TLS);
594 else if (target == current) {
595 rdmsrl(MSR_FS_BASE, addr);
598 addr = target->thread.fs;
601 if (count > sizeof(unsigned long)) { /* GS */
604 else if (__get_user(addr, uaddr))
606 if (target->thread.fsindex == GS_TLS_SEL)
607 addr = read_32bit_tls(target, GS_TLS);
608 else if (target == current) {
609 rdmsrl(MSR_GS_BASE, addr);
612 addr = target->thread.fs;
619 fsgs_set(struct task_struct *target,
620 const struct utrace_regset *regset,
621 unsigned int pos, unsigned int count,
622 const void *kbuf, const void __user *ubuf)
624 const unsigned long *kaddr = kbuf;
625 const unsigned long __user *uaddr = ubuf;
629 if (pos == 0) { /* FS */
632 else if (__get_user(addr, uaddr++))
634 ret = do_arch_prctl(target, ARCH_SET_FS, addr);
637 if (!ret && count > sizeof(unsigned long)) { /* GS */
640 else if (__get_user(addr, uaddr))
642 ret = do_arch_prctl(target, ARCH_SET_GS, addr);
650 * These are our native regset flavors.
653 static const struct utrace_regset native_regsets[] = {
655 .n = sizeof(struct user_regs_struct)/8, .size = 8, .align = 8,
656 .get = genregs_get, .set = genregs_set
659 .n = sizeof(struct user_i387_struct) / sizeof(long),
660 .size = sizeof(long), .align = sizeof(long),
661 .active = fpregs_active,
662 .get = fpregs_get, .set = fpregs_set
665 .n = 2, .size = sizeof(long), .align = sizeof(long),
666 .active = fsgs_active,
667 .get = fsgs_get, .set = fsgs_set
670 .n = 8, .size = sizeof(long), .align = sizeof(long),
671 .active = dbregs_active,
672 .get = dbregs_get, .set = dbregs_set
676 const struct utrace_regset_view utrace_x86_64_native = {
677 .name = "x86-64", .e_machine = EM_X86_64,
678 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
680 EXPORT_SYMBOL_GPL(utrace_x86_64_native);
684 static const struct ptrace_layout_segment x86_64_uarea[] = {
685 {0, sizeof(struct user_regs_struct), 0, 0},
686 {sizeof(struct user_regs_struct),
687 offsetof(struct user, u_debugreg[0]), -1, 0},
688 {offsetof(struct user, u_debugreg[0]),
689 offsetof(struct user, u_debugreg[8]), 3, 0},
693 int arch_ptrace(long *req, struct task_struct *child,
694 struct utrace_attached_engine *engine,
695 unsigned long addr, unsigned long data, long *val)
699 return ptrace_peekusr(child, engine, x86_64_uarea, addr, data);
701 return ptrace_pokeusr(child, engine, x86_64_uarea, addr, data);
703 return ptrace_whole_regset(child, engine, data, 0, 0);
705 return ptrace_whole_regset(child, engine, data, 0, 1);
706 case PTRACE_GETFPREGS:
707 return ptrace_whole_regset(child, engine, data, 1, 0);
708 case PTRACE_SETFPREGS:
709 return ptrace_whole_regset(child, engine, data, 1, 1);
710 #ifdef CONFIG_IA32_EMULATION
711 case PTRACE_GET_THREAD_AREA:
712 case PTRACE_SET_THREAD_AREA:
713 return ptrace_onereg_access(child, engine,
714 &utrace_ia32_view, 3,
715 addr, (void __user *)data,
716 *req == PTRACE_SET_THREAD_AREA);
718 /* normal 64bit interface to access TLS data.
719 Works just like arch_prctl, except that the arguments
721 case PTRACE_ARCH_PRCTL:
722 return do_arch_prctl(child, data, addr);
726 #endif /* CONFIG_PTRACE */
729 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
731 /* do the secure computing check first */
732 secure_computing(regs->orig_rax);
734 if (test_thread_flag(TIF_SYSCALL_TRACE))
735 tracehook_report_syscall(regs, 0);
737 if (unlikely(current->audit_context)) {
738 if (test_thread_flag(TIF_IA32)) {
739 audit_syscall_entry(AUDIT_ARCH_I386,
741 regs->rbx, regs->rcx,
742 regs->rdx, regs->rsi);
744 audit_syscall_entry(AUDIT_ARCH_X86_64,
746 regs->rdi, regs->rsi,
747 regs->rdx, regs->r10);
752 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
754 if (unlikely(current->audit_context))
755 audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
757 if (test_thread_flag(TIF_SYSCALL_TRACE))
758 tracehook_report_syscall(regs, 1);
760 if (test_thread_flag(TIF_SINGLESTEP)) {
761 force_sig(SIGTRAP, current); /* XXX */
762 tracehook_report_syscall_step(regs);