2 /* By Ross Biro 1/23/92 */
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/module.h>
23 #include <asm/tracehook.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/system.h>
27 #include <asm/processor.h>
29 #include <asm/debugreg.h>
35 * Determines which flags the user has access to [1 = access, 0 = no access].
36 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
37 * Also masks reserved bits (31-22, 15, 5, 3, 1).
39 #define FLAG_MASK 0x00050dd5
42 * Offset of eflags on child stack..
44 #define EFL_OFFSET offsetof(struct pt_regs, eflags)
46 static inline struct pt_regs *get_child_regs(struct task_struct *task)
48 void *stack_top = (void *)task->thread.esp0;
49 return stack_top - sizeof(struct pt_regs);
53 * This routine will get a word off of the processes privileged stack.
54 * the offset is bytes into the pt_regs structure on the stack.
55 * This routine assumes that all the privileged stacks are in our
58 static inline int get_stack_long(struct task_struct *task, int offset)
62 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
64 return (*((int *)stack));
68 * This routine will put a word on the processes privileged stack.
69 * the offset is bytes into the pt_regs structure on the stack.
70 * This routine assumes that all the privileged stacks are in our
73 static inline int put_stack_long(struct task_struct *task, int offset,
76 unsigned char * stack;
78 stack = (unsigned char *)task->thread.esp0 - sizeof(struct pt_regs);
80 *(unsigned long *) stack = data;
84 static int putreg(struct task_struct *child,
85 unsigned long regno, unsigned long value)
89 if (value && (value & 3) != 3)
91 child->thread.fs = value;
96 if (value && (value & 3) != 3)
102 if ((value & 3) != 3)
108 value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
109 clear_tsk_thread_flag(child, TIF_FORCED_TF);
114 put_stack_long(child, regno, value);
118 static unsigned long getreg(struct task_struct *child,
121 unsigned long retval = ~0UL;
123 switch (regno >> 2) {
125 retval = child->thread.fs;
128 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
129 retval &= ~X86_EFLAGS_TF;
142 retval &= get_stack_long(child, regno);
148 #define LDT_SEGMENT 4
150 static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
152 unsigned long addr, seg;
155 seg = regs->xcs & 0xffff;
156 if (regs->eflags & VM_MASK) {
157 addr = (addr & 0xffff) + (seg << 4);
162 * We'll assume that the code segments in the GDT
163 * are all zero-based. That is largely true: the
164 * TLS segments are used for data, and the PNPBIOS
165 * and APM bios ones we just ignore here.
167 if (seg & LDT_SEGMENT) {
171 down(&child->mm->context.sem);
172 desc = child->mm->context.ldt + (seg & ~7);
173 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
175 /* 16-bit code segment? */
176 if (!((desc[1] >> 22) & 1))
179 up(&child->mm->context.sem);
184 static inline int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
187 unsigned char opcode[15];
188 unsigned long addr = convert_eip_to_linear(child, regs);
190 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
191 for (i = 0; i < copied; i++) {
194 case 0x9d: case 0xcf:
196 /* opcode and address size prefixes */
197 case 0x66: case 0x67:
199 /* irrelevant prefixes (segment overrides and repeats) */
200 case 0x26: case 0x2e:
201 case 0x36: case 0x3e:
202 case 0x64: case 0x65:
203 case 0xf0: case 0xf2: case 0xf3:
207 * pushf: NOTE! We should probably not let
208 * the user see the TF bit being set. But
209 * it's more pain than it's worth to avoid
210 * it, and a debugger could emulate this
211 * all in user space if it _really_ cares.
221 void tracehook_enable_single_step(struct task_struct *child)
223 struct pt_regs *regs = get_child_regs(child);
226 * Always set TIF_SINGLESTEP - this guarantees that
227 * we single-step system calls etc.. This will also
228 * cause us to set TF when returning to user mode.
230 set_tsk_thread_flag(child, TIF_SINGLESTEP);
233 * If TF was already set, don't do anything else
235 if (regs->eflags & X86_EFLAGS_TF)
238 /* Set TF on the kernel stack.. */
239 regs->eflags |= X86_EFLAGS_TF;
242 * ..but if TF is changed by the instruction we will trace,
243 * don't mark it as being "us" that set it, so that we
244 * won't clear it by hand later.
246 if (is_setting_trap_flag(child, regs))
249 set_tsk_thread_flag(child, TIF_FORCED_TF);
252 void tracehook_disable_single_step(struct task_struct *child)
254 /* Always clear TIF_SINGLESTEP... */
255 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
257 /* But touch TF only if it was set by us.. */
258 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
259 struct pt_regs *regs = get_child_regs(child);
260 regs->eflags &= ~X86_EFLAGS_TF;
266 genregs_get(struct task_struct *target,
267 const struct utrace_regset *regset,
268 unsigned int pos, unsigned int count,
269 void *kbuf, void __user *ubuf)
272 unsigned long *kp = kbuf;
274 *kp++ = getreg(target, pos);
280 unsigned long __user *up = ubuf;
282 if (__put_user(getreg(target, pos), up++))
293 genregs_set(struct task_struct *target,
294 const struct utrace_regset *regset,
295 unsigned int pos, unsigned int count,
296 const void *kbuf, const void __user *ubuf)
301 const unsigned long *kp = kbuf;
302 while (!ret && count > 0) {
303 ret = putreg(target, pos, *kp++);
310 const unsigned long __user *up = ubuf;
311 while (!ret && count > 0) {
313 ret = __get_user(val, up++);
315 ret = putreg(target, pos, val);
325 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
327 return tsk_used_math(target) ? regset->n : 0;
331 fpregs_get(struct task_struct *target,
332 const struct utrace_regset *regset,
333 unsigned int pos, unsigned int count,
334 void *kbuf, void __user *ubuf)
336 struct user_i387_struct fp;
339 if (tsk_used_math(target)) {
340 if (target == current)
346 ret = get_fpregs(&fp, target);
348 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
355 fpregs_set(struct task_struct *target,
356 const struct utrace_regset *regset,
357 unsigned int pos, unsigned int count,
358 const void *kbuf, const void __user *ubuf)
360 struct user_i387_struct fp;
363 if (tsk_used_math(target)) {
364 if (target == current)
367 else if (pos == 0 && count == sizeof(fp))
368 set_stopped_child_used_math(target);
372 if (pos > 0 || count < sizeof(fp)) {
373 ret = get_fpregs(&fp, target);
375 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
381 else if (kbuf == NULL) {
382 if (__copy_from_user(&fp, ubuf, sizeof(fp)))
387 return set_fpregs(target, kbuf);
391 fpxregs_active(struct task_struct *target, const struct utrace_regset *regset)
393 return !cpu_has_fxsr ? -ENODEV : tsk_used_math(target) ? regset->n : 0;
397 fpxregs_get(struct task_struct *target,
398 const struct utrace_regset *regset,
399 unsigned int pos, unsigned int count,
400 void *kbuf, void __user *ubuf)
405 if (tsk_used_math(target))
410 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
411 &target->thread.i387.fxsave, 0, -1);
415 fpxregs_set(struct task_struct *target,
416 const struct utrace_regset *regset,
417 unsigned int pos, unsigned int count,
418 const void *kbuf, const void __user *ubuf)
425 if (tsk_used_math(target))
427 else if (pos == 0 && count == sizeof(target->thread.i387.fxsave))
428 set_stopped_child_used_math(target);
432 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
433 &target->thread.i387.fxsave, 0, -1);
435 updated_fpxregs(target);
442 dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
444 if (tsk->thread.debugreg[DR_CONTROL] | tsk->thread.debugreg[DR_STATUS])
450 dbregs_get(struct task_struct *target,
451 const struct utrace_regset *regset,
452 unsigned int pos, unsigned int count,
453 void *kbuf, void __user *ubuf)
456 * The hardware updates the status register on a debug trap,
457 * but do_debug (traps.c) save it for us when that happens.
458 * So whether the target is current or not, thread.debugreg is good.
461 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
462 target->thread.debugreg, 0, -1);
466 dbregs_set(struct task_struct *target,
467 const struct utrace_regset *regset,
468 unsigned int pos, unsigned int count,
469 const void *kbuf, const void __user *ubuf)
471 for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
476 val = *(const unsigned long *) kbuf;
477 kbuf += sizeof(unsigned long);
480 if (__get_user(val, (unsigned long __user *) ubuf))
482 ubuf += sizeof(unsigned long);
486 if (val >= TASK_SIZE - 3)
498 /* Sanity-check data. Take one half-byte at once with
499 * check = (val >> (16 + 4*i)) & 0xf. It contains the
500 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
501 * 2 and 3 are LENi. Given a list of invalid values,
502 * we do mask |= 1 << invalid_value, so that
503 * (mask >> check) & 1 is a correct test for invalid
506 * R/Wi contains the type of the breakpoint /
507 * watchpoint, LENi contains the length of the watched
508 * data in the watchpoint case.
510 * The invalid values are:
511 * - LENi == 0x10 (undefined), so mask |= 0x0f00.
512 * - R/Wi == 0x10 (break on I/O reads or writes), so
514 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
517 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
519 * See the Intel Manual "System Programming Guide",
522 * Note that LENi == 0x10 is defined on x86_64 in long
523 * mode (i.e. even for 32-bit userspace software, but
524 * 64-bit kernel), so the x86_64 mask value is 0x5454.
525 * See the AMD manual no. 24593 (AMD64 System
527 val &= ~DR_CONTROL_RESERVED;
528 for (i = 0; i < 4; i++)
529 if ((0x5f54 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
532 set_tsk_thread_flag(target, TIF_DEBUG);
534 clear_tsk_thread_flag(target, TIF_DEBUG);
537 target->thread.debugreg[pos] = val;
538 if (target == current)
540 #define DBREG(n) case n: set_debugreg(target->thread.debugreg[n], n); break
556 * Perform get_thread_area on behalf of the traced child.
559 tls_get(struct task_struct *target,
560 const struct utrace_regset *regset,
561 unsigned int pos, unsigned int count,
562 void *kbuf, void __user *ubuf)
564 struct user_desc info, *ip;
565 const struct desc_struct *desc;
568 * Get the current Thread-Local Storage area:
571 #define GET_BASE(desc) ( \
572 (((desc)->a >> 16) & 0x0000ffff) | \
573 (((desc)->b << 16) & 0x00ff0000) | \
574 ( (desc)->b & 0xff000000) )
576 #define GET_LIMIT(desc) ( \
577 ((desc)->a & 0x0ffff) | \
578 ((desc)->b & 0xf0000) )
580 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
581 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
582 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
583 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
584 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
585 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
587 desc = &target->thread.tls_array[pos / sizeof(struct user_desc)];
589 memset(ip, 0, sizeof *ip);
590 for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
591 ip->entry_number = (desc - &target->thread.tls_array[0]
592 + GDT_ENTRY_TLS_MIN);
593 ip->base_addr = GET_BASE(desc);
594 ip->limit = GET_LIMIT(desc);
595 ip->seg_32bit = GET_32BIT(desc);
596 ip->contents = GET_CONTENTS(desc);
597 ip->read_exec_only = !GET_WRITABLE(desc);
598 ip->limit_in_pages = GET_LIMIT_PAGES(desc);
599 ip->seg_not_present = !GET_PRESENT(desc);
600 ip->useable = GET_USEABLE(desc);
605 if (__copy_to_user(ubuf, &info, sizeof(info)))
607 ubuf += sizeof(info);
615 * Perform set_thread_area on behalf of the traced child.
618 tls_set(struct task_struct *target,
619 const struct utrace_regset *regset,
620 unsigned int pos, unsigned int count,
621 const void *kbuf, const void __user *ubuf)
623 struct user_desc info;
624 struct desc_struct *desc;
625 struct desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
629 pos /= sizeof(struct user_desc);
630 count /= sizeof(struct user_desc);
633 for (i = 0; i < count; ++i, ++desc) {
634 const struct user_desc *ip;
637 kbuf += sizeof(struct user_desc);
641 if (__copy_from_user(&info, ubuf, sizeof(info)))
643 ubuf += sizeof(struct user_desc);
650 desc->a = LDT_entry_a(ip);
651 desc->b = LDT_entry_b(ip);
656 * We must not get preempted while modifying the TLS.
659 memcpy(&target->thread.tls_array[pos], newtls,
660 count * sizeof(newtls[0]));
661 if (target == current)
662 load_TLS(&target->thread, cpu);
670 * Determine how many TLS slots are in use.
673 tls_active(struct task_struct *target, const struct utrace_regset *regset)
676 for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
677 struct desc_struct *desc = &target->thread.tls_array[i - 1];
678 if ((desc->a | desc->b) != 0)
686 * These are our native regset flavors.
689 static const struct utrace_regset native_regsets[] = {
691 .n = FRAME_SIZE, .size = sizeof(long), .align = sizeof(long),
692 .get = genregs_get, .set = genregs_set
695 .n = sizeof(struct user_i387_struct) / sizeof(long),
696 .size = sizeof(long), .align = sizeof(long),
697 .active = fpregs_active,
698 .get = fpregs_get, .set = fpregs_set
701 .n = sizeof(struct user_fxsr_struct) / sizeof(long),
702 .size = sizeof(long), .align = sizeof(long),
703 .active = fpxregs_active,
704 .get = fpxregs_get, .set = fpxregs_set
707 .n = GDT_ENTRY_TLS_ENTRIES,
708 .bias = GDT_ENTRY_TLS_MIN,
709 .size = sizeof(struct user_desc),
710 .align = sizeof(struct user_desc),
711 .active = tls_active, .get = tls_get, .set = tls_set
714 .n = 8, .size = sizeof(long), .align = sizeof(long),
715 .active = dbregs_active,
716 .get = dbregs_get, .set = dbregs_set
720 const struct utrace_regset_view utrace_i386_native = {
721 .name = "i386", .e_machine = EM_386,
722 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
724 EXPORT_SYMBOL_GPL(utrace_i386_native);
727 static const struct ptrace_layout_segment i386_uarea[] = {
728 {0, FRAME_SIZE*4, 0, 0},
729 {FRAME_SIZE*4, offsetof(struct user, u_debugreg[0]), -1, 0},
730 {offsetof(struct user, u_debugreg[0]),
731 offsetof(struct user, u_debugreg[8]), 4, 0},
735 int arch_ptrace(long *req, struct task_struct *child,
736 struct utrace_attached_engine *engine,
737 unsigned long addr, unsigned long data, long *val)
741 return ptrace_peekusr(child, engine, i386_uarea, addr, data);
743 return ptrace_pokeusr(child, engine, i386_uarea, addr, data);
745 return ptrace_whole_regset(child, engine, data, 0, 0);
747 return ptrace_whole_regset(child, engine, data, 0, 1);
748 case PTRACE_GETFPREGS:
749 return ptrace_whole_regset(child, engine, data, 1, 0);
750 case PTRACE_SETFPREGS:
751 return ptrace_whole_regset(child, engine, data, 1, 1);
752 case PTRACE_GETFPXREGS:
753 return ptrace_whole_regset(child, engine, data, 2, 0);
754 case PTRACE_SETFPXREGS:
755 return ptrace_whole_regset(child, engine, data, 2, 1);
756 case PTRACE_GET_THREAD_AREA:
757 case PTRACE_SET_THREAD_AREA:
758 return ptrace_onereg_access(child, engine,
759 utrace_native_view(current), 3,
760 addr, (void __user *)data,
761 *req == PTRACE_SET_THREAD_AREA);
767 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
771 tsk->thread.trap_no = 1;
772 tsk->thread.error_code = error_code;
774 memset(&info, 0, sizeof(info));
775 info.si_signo = SIGTRAP;
776 info.si_code = TRAP_BRKPT;
779 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
781 /* Send us the fakey SIGTRAP */
782 force_sig_info(SIGTRAP, &info, tsk);
785 /* notification of system call entry/exit
786 * - triggered by current->work.syscall_trace
788 __attribute__((regparm(3)))
789 void do_syscall_trace(struct pt_regs *regs, int entryexit)
791 /* do the secure computing check first */
793 secure_computing(regs->orig_eax);
795 if (unlikely(current->audit_context) && entryexit)
796 audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
798 if (test_thread_flag(TIF_SYSCALL_TRACE))
799 tracehook_report_syscall(regs, entryexit);
801 if (test_thread_flag(TIF_SINGLESTEP) && entryexit) {
802 send_sigtrap(current, regs, 0); /* XXX */
803 tracehook_report_syscall_step(regs);
806 if (unlikely(current->audit_context) && !entryexit)
807 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
808 regs->ebx, regs->ecx, regs->edx, regs->esi);