2 * 32bit ptrace for x86-64.
4 * Copyright 2001,2002 Andi Kleen, SuSE Labs.
5 * Some parts copied from arch/i386/kernel/ptrace.c. See that file for earlier
8 * This allows to access 64bit processes too; but there is no way to see the extended
12 #include <linux/kernel.h>
13 #include <linux/stddef.h>
14 #include <linux/sched.h>
15 #include <linux/syscalls.h>
16 #include <linux/unistd.h>
18 #include <linux/ptrace.h>
19 #include <linux/tracehook.h>
20 #include <linux/module.h>
21 #include <linux/elf.h>
22 #include <asm/ptrace.h>
23 #include <asm/tracehook.h>
24 #include <asm/compat.h>
25 #include <asm/uaccess.h>
26 #include <asm/user32.h>
28 #include <asm/errno.h>
29 #include <asm/debugreg.h>
31 #include <asm/fpu32.h>
36 * Determines which flags the user has access to [1 = access, 0 = no access].
37 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
38 * Also masks reserved bits (31-22, 15, 5, 3, 1).
40 #define FLAG_MASK 0x54dd5UL
43 case offsetof(struct user_regs_struct32, l): stack[offsetof(struct pt_regs, q)/8] = val; break
45 static int putreg32(struct task_struct *child, unsigned regno, u32 val)
47 __u64 *stack = (__u64 *)task_pt_regs(child);
50 case offsetof(struct user_regs_struct32, fs):
51 if (val && (val & 3) != 3) return -EIO;
52 child->thread.fsindex = val & 0xffff;
54 case offsetof(struct user_regs_struct32, gs):
55 if (val && (val & 3) != 3) return -EIO;
56 child->thread.gsindex = val & 0xffff;
58 case offsetof(struct user_regs_struct32, ds):
59 if (val && (val & 3) != 3) return -EIO;
60 child->thread.ds = val & 0xffff;
62 case offsetof(struct user_regs_struct32, es):
63 child->thread.es = val & 0xffff;
65 case offsetof(struct user_regs_struct32, ss):
66 if ((val & 3) != 3) return -EIO;
67 stack[offsetof(struct pt_regs, ss)/8] = val & 0xffff;
69 case offsetof(struct user_regs_struct32, cs):
70 if ((val & 3) != 3) return -EIO;
71 stack[offsetof(struct pt_regs, cs)/8] = val & 0xffff;
81 R32(orig_eax, orig_rax);
85 case offsetof(struct user_regs_struct32, eflags): {
86 __u64 *flags = &stack[offsetof(struct pt_regs, eflags)/8];
88 *flags = val | (*flags & ~FLAG_MASK);
89 clear_tsk_thread_flag(child, TIF_FORCED_TF);
102 case offsetof(struct user_regs_struct32, l): val = stack[offsetof(struct pt_regs, q)/8]; break
104 static int getreg32(struct task_struct *child, unsigned regno)
106 __u64 *stack = (__u64 *)task_pt_regs(child);
110 case offsetof(struct user_regs_struct32, fs):
111 val = child->thread.fsindex;
113 case offsetof(struct user_regs_struct32, gs):
114 val = child->thread.gsindex;
116 case offsetof(struct user_regs_struct32, ds):
117 val = child->thread.ds;
119 case offsetof(struct user_regs_struct32, es):
120 val = child->thread.es;
132 R32(orig_eax, orig_rax);
136 case offsetof(struct user_regs_struct32, eflags):
137 val = stack[offsetof(struct pt_regs, eflags) / 8];
138 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
139 val &= ~X86_EFLAGS_TF;
154 ia32_genregs_get(struct task_struct *target,
155 const struct utrace_regset *regset,
156 unsigned int pos, unsigned int count,
157 void *kbuf, void __user *ubuf)
162 *kp++ = getreg32(target, pos);
168 u32 __user *up = ubuf;
170 if (__put_user(getreg32(target, pos), up++))
181 ia32_genregs_set(struct task_struct *target,
182 const struct utrace_regset *regset,
183 unsigned int pos, unsigned int count,
184 const void *kbuf, const void __user *ubuf)
189 const u32 *kp = kbuf;
190 while (!ret && count > 0) {
191 ret = putreg32(target, pos, *kp++);
198 const u32 __user *up = ubuf;
199 while (!ret && count > 0) {
201 ret = __get_user(val, up++);
203 ret = putreg32(target, pos, val);
213 ia32_fpregs_active(struct task_struct *target,
214 const struct utrace_regset *regset)
216 return tsk_used_math(target) ? regset->n : 0;
220 ia32_fpregs_get(struct task_struct *target,
221 const struct utrace_regset *regset,
222 unsigned int pos, unsigned int count,
223 void *kbuf, void __user *ubuf)
225 struct user_i387_ia32_struct fp;
228 if (tsk_used_math(target)) {
229 if (target == current)
235 ret = get_fpregs32(&fp, target);
237 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
244 ia32_fpregs_set(struct task_struct *target,
245 const struct utrace_regset *regset,
246 unsigned int pos, unsigned int count,
247 const void *kbuf, const void __user *ubuf)
249 struct user_i387_ia32_struct fp;
252 if (tsk_used_math(target)) {
253 if (target == current)
256 else if (pos == 0 && count == sizeof(fp))
257 set_stopped_child_used_math(target);
261 if (pos > 0 || count < sizeof(fp)) {
262 ret = get_fpregs32(&fp, target);
264 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
270 else if (kbuf == NULL) {
271 if (__copy_from_user(&fp, ubuf, sizeof(fp)))
276 return set_fpregs32(target, kbuf);
280 ia32_fpxregs_active(struct task_struct *target,
281 const struct utrace_regset *regset)
283 return tsk_used_math(target) ? regset->n : 0;
287 ia32_fpxregs_get(struct task_struct *target,
288 const struct utrace_regset *regset,
289 unsigned int pos, unsigned int count,
290 void *kbuf, void __user *ubuf)
292 if (tsk_used_math(target)) {
293 if (target == current)
299 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
300 &target->thread.i387.fxsave, 0, -1);
304 ia32_fpxregs_set(struct task_struct *target,
305 const struct utrace_regset *regset,
306 unsigned int pos, unsigned int count,
307 const void *kbuf, const void __user *ubuf)
312 if (tsk_used_math(target)) {
313 if (target == current)
316 else if (pos == 0 && count == sizeof(struct i387_fxsave_struct))
317 set_stopped_child_used_math(target);
321 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
322 &target->thread.i387.fxsave, 0, -1);
324 target->thread.i387.fxsave.mxcsr &= mxcsr_feature_mask;
330 ia32_dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
332 if (tsk->thread.debugreg6 | tsk->thread.debugreg7)
338 ia32_dbregs_get(struct task_struct *target,
339 const struct utrace_regset *regset,
340 unsigned int pos, unsigned int count,
341 void *kbuf, void __user *ubuf)
343 for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
347 * The hardware updates the status register on a debug trap,
348 * but do_debug (traps.c) saves it for us when that happens.
349 * So whether the target is current or not, debugregN is good.
353 case 0: val = target->thread.debugreg0; break;
354 case 1: val = target->thread.debugreg1; break;
355 case 2: val = target->thread.debugreg2; break;
356 case 3: val = target->thread.debugreg3; break;
357 case 6: val = target->thread.debugreg6; break;
358 case 7: val = target->thread.debugreg7; break;
366 if (__put_user(val, (u32 __user *) ubuf))
376 ia32_dbregs_set(struct task_struct *target,
377 const struct utrace_regset *regset,
378 unsigned int pos, unsigned int count,
379 const void *kbuf, const void __user *ubuf)
382 * We'll just hijack the native setter to do the real work for us.
384 const struct utrace_regset *dbregset = &utrace_x86_64_native.regsets[2];
388 for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
392 val = *(const u32 *) kbuf;
396 if (__get_user(val, (u32 __user *) ubuf))
401 ret = (*dbregset->set)(target, dbregset, pos * sizeof(long),
402 sizeof(val), &val, NULL);
412 * Perform get_thread_area on behalf of the traced child.
415 ia32_tls_get(struct task_struct *target,
416 const struct utrace_regset *regset,
417 unsigned int pos, unsigned int count,
418 void *kbuf, void __user *ubuf)
420 struct user_desc info, *ip;
421 const struct n_desc_struct *desc;
422 const struct n_desc_struct *tls;
425 * Get the current Thread-Local Storage area:
428 #define GET_BASE(desc) ( \
429 (((desc)->a >> 16) & 0x0000ffff) | \
430 (((desc)->b << 16) & 0x00ff0000) | \
431 ( (desc)->b & 0xff000000) )
433 #define GET_LIMIT(desc) ( \
434 ((desc)->a & 0x0ffff) | \
435 ((desc)->b & 0xf0000) )
437 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
438 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
439 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
440 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
441 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
442 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
444 tls = (struct n_desc_struct *) target->thread.tls_array;
447 memset(ip, 0, sizeof *ip);
448 for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
449 ip->entry_number = desc - tls + GDT_ENTRY_TLS_MIN;
450 ip->base_addr = GET_BASE(desc);
451 ip->limit = GET_LIMIT(desc);
452 ip->seg_32bit = GET_32BIT(desc);
453 ip->contents = GET_CONTENTS(desc);
454 ip->read_exec_only = !GET_WRITABLE(desc);
455 ip->limit_in_pages = GET_LIMIT_PAGES(desc);
456 ip->seg_not_present = !GET_PRESENT(desc);
457 ip->useable = GET_USEABLE(desc);
462 if (__copy_to_user(ubuf, &info, sizeof(info)))
464 ubuf += sizeof(info);
472 * Perform set_thread_area on behalf of the traced child.
475 ia32_tls_set(struct task_struct *target,
476 const struct utrace_regset *regset,
477 unsigned int pos, unsigned int count,
478 const void *kbuf, const void __user *ubuf)
480 struct user_desc info;
481 struct n_desc_struct *desc;
482 struct n_desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
486 pos /= sizeof(struct user_desc);
487 count /= sizeof(struct user_desc);
490 for (i = 0; i < count; ++i, ++desc) {
491 const struct user_desc *ip;
494 kbuf += sizeof(struct user_desc);
498 if (__copy_from_user(&info, ubuf, sizeof(info)))
500 ubuf += sizeof(struct user_desc);
507 desc->a = LDT_entry_a(ip);
508 desc->b = LDT_entry_b(ip);
513 * We must not get preempted while modifying the TLS.
516 memcpy(&target->thread.tls_array[pos], newtls,
517 count * sizeof(newtls[0]));
518 if (target == current)
519 load_TLS(&target->thread, cpu);
526 * Determine how many TLS slots are in use.
529 ia32_tls_active(struct task_struct *target, const struct utrace_regset *regset)
532 for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
533 struct n_desc_struct *desc = (struct n_desc_struct *)
534 &target->thread.tls_array[i - 1];
535 if ((desc->a | desc->b) != 0)
543 * This should match arch/i386/kernel/ptrace.c:native_regsets.
546 static const struct utrace_regset ia32_regsets[] = {
548 .n = sizeof(struct user_regs_struct32)/4,
549 .size = 4, .align = 4,
550 .get = ia32_genregs_get, .set = ia32_genregs_set
553 .n = sizeof(struct user_i387_ia32_struct) / 4,
554 .size = 4, .align = 4,
555 .active = ia32_fpregs_active,
556 .get = ia32_fpregs_get, .set = ia32_fpregs_set
559 .n = sizeof(struct user32_fxsr_struct) / 4,
560 .size = 4, .align = 4,
561 .active = ia32_fpxregs_active,
562 .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
565 .n = GDT_ENTRY_TLS_ENTRIES,
566 .bias = GDT_ENTRY_TLS_MIN,
567 .size = sizeof(struct user_desc),
568 .align = sizeof(struct user_desc),
569 .active = ia32_tls_active,
570 .get = ia32_tls_get, .set = ia32_tls_set
573 .n = 8, .size = 4, .align = 4,
574 .active = ia32_dbregs_active,
575 .get = ia32_dbregs_get, .set = ia32_dbregs_set
579 const struct utrace_regset_view utrace_ia32_view = {
580 .name = "i386", .e_machine = EM_386,
581 .regsets = ia32_regsets,
582 .n = sizeof ia32_regsets / sizeof ia32_regsets[0],
584 EXPORT_SYMBOL_GPL(utrace_ia32_view);
589 * This matches the arch/i386/kernel/ptrace.c definitions.
592 static const struct ptrace_layout_segment ia32_uarea[] = {
593 {0, sizeof(struct user_regs_struct32), 0, 0},
594 {offsetof(struct user32, u_debugreg[0]),
595 offsetof(struct user32, u_debugreg[8]), 4, 0},
599 fastcall int arch_compat_ptrace(compat_long_t *req, struct task_struct *child,
600 struct utrace_attached_engine *engine,
601 compat_ulong_t addr, compat_ulong_t data,
606 return ptrace_compat_peekusr(child, engine, ia32_uarea,
609 return ptrace_compat_pokeusr(child, engine, ia32_uarea,
612 return ptrace_whole_regset(child, engine, data, 0, 0);
614 return ptrace_whole_regset(child, engine, data, 0, 1);
615 case PTRACE_GETFPREGS:
616 return ptrace_whole_regset(child, engine, data, 1, 0);
617 case PTRACE_SETFPREGS:
618 return ptrace_whole_regset(child, engine, data, 1, 1);
619 case PTRACE_GETFPXREGS:
620 return ptrace_whole_regset(child, engine, data, 2, 0);
621 case PTRACE_SETFPXREGS:
622 return ptrace_whole_regset(child, engine, data, 2, 1);
623 case PTRACE_GET_THREAD_AREA:
624 case PTRACE_SET_THREAD_AREA:
625 return ptrace_onereg_access(child, engine,
626 &utrace_ia32_view, 3,
628 (void __user *)(unsigned long)data,
629 *req == PTRACE_SET_THREAD_AREA);
633 #endif /* CONFIG_PTRACE */