2 /* By Ross Biro 1/23/92 */
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/smp_lock.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/tracehook.h>
16 #include <linux/user.h>
17 #include <linux/security.h>
18 #include <linux/audit.h>
19 #include <linux/seccomp.h>
20 #include <linux/signal.h>
21 #include <linux/module.h>
23 #include <asm/tracehook.h>
24 #include <asm/uaccess.h>
25 #include <asm/pgtable.h>
26 #include <asm/system.h>
27 #include <asm/processor.h>
29 #include <asm/debugreg.h>
35 * Determines which flags the user has access to [1 = access, 0 = no access].
36 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), NT(14), IOPL(12-13), IF(9).
37 * Also masks reserved bits (31-22, 15, 5, 3, 1).
39 #define FLAG_MASK 0x00050dd5
42 * Offset of eflags on child stack..
44 #define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs))
46 static inline struct pt_regs *get_child_regs(struct task_struct *task)
48 void *stack_top = (void *)task->thread.esp0;
49 return stack_top - sizeof(struct pt_regs);
53 * this routine will get a word off of the processes privileged stack.
54 * the offset is how far from the base addr as stored in the TSS.
55 * this routine assumes that all the privileged stacks are in our
58 static inline int get_stack_long(struct task_struct *task, int offset)
62 stack = (unsigned char *)task->thread.esp0;
64 return (*((int *)stack));
68 * this routine will put a word on the processes privileged stack.
69 * the offset is how far from the base addr as stored in the TSS.
70 * this routine assumes that all the privileged stacks are in our
73 static inline int put_stack_long(struct task_struct *task, int offset,
76 unsigned char * stack;
78 stack = (unsigned char *) task->thread.esp0;
80 *(unsigned long *) stack = data;
84 static int putreg(struct task_struct *child,
85 unsigned long regno, unsigned long value)
89 if (value && (value & 3) != 3)
91 child->thread.fs = value;
94 if (value && (value & 3) != 3)
96 child->thread.gs = value;
100 if (value && (value & 3) != 3)
106 if ((value & 3) != 3)
112 value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
113 clear_tsk_thread_flag(child, TIF_FORCED_TF);
118 put_stack_long(child, regno - sizeof(struct pt_regs), value);
122 static unsigned long getreg(struct task_struct *child,
125 unsigned long retval = ~0UL;
127 switch (regno >> 2) {
129 retval = child->thread.fs;
132 retval = child->thread.gs;
135 if (test_tsk_thread_flag(child, TIF_FORCED_TF))
136 retval &= ~X86_EFLAGS_TF;
148 regno = regno - sizeof(struct pt_regs);
149 retval &= get_stack_long(child, regno);
155 #define LDT_SEGMENT 4
157 static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs)
159 unsigned long addr, seg;
162 seg = regs->xcs & 0xffff;
163 if (regs->eflags & VM_MASK) {
164 addr = (addr & 0xffff) + (seg << 4);
169 * We'll assume that the code segments in the GDT
170 * are all zero-based. That is largely true: the
171 * TLS segments are used for data, and the PNPBIOS
172 * and APM bios ones we just ignore here.
174 if (seg & LDT_SEGMENT) {
178 down(&child->mm->context.sem);
179 desc = child->mm->context.ldt + (seg & ~7);
180 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
182 /* 16-bit code segment? */
183 if (!((desc[1] >> 22) & 1))
186 up(&child->mm->context.sem);
191 static inline int is_at_popf(struct task_struct *child, struct pt_regs *regs)
194 unsigned char opcode[16];
195 unsigned long addr = convert_eip_to_linear(child, regs);
197 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
198 for (i = 0; i < copied; i++) {
203 /* opcode and address size prefixes */
204 case 0x66: case 0x67:
206 /* irrelevant prefixes (segment overrides and repeats) */
207 case 0x26: case 0x2e:
208 case 0x36: case 0x3e:
209 case 0x64: case 0x65:
210 case 0xf0: case 0xf2: case 0xf3:
214 * pushf: NOTE! We should probably not let
215 * the user see the TF bit being set. But
216 * it's more pain than it's worth to avoid
217 * it, and a debugger could emulate this
218 * all in user space if it _really_ cares.
228 void tracehook_enable_single_step(struct task_struct *child)
230 struct pt_regs *regs = get_child_regs(child);
233 * Always set TIF_SINGLESTEP - this guarantees that
234 * we single-step system calls etc.. This will also
235 * cause us to set TF when returning to user mode.
237 set_tsk_thread_flag(child, TIF_SINGLESTEP);
240 * If TF was already set, don't do anything else
242 if (regs->eflags & X86_EFLAGS_TF)
245 /* Set TF on the kernel stack.. */
246 regs->eflags |= X86_EFLAGS_TF;
249 * ..but if TF is changed by the instruction we will trace,
250 * don't mark it as being "us" that set it, so that we
251 * won't clear it by hand later.
253 if (is_at_popf(child, regs))
256 set_tsk_thread_flag(child, TIF_FORCED_TF);
259 void tracehook_disable_single_step(struct task_struct *child)
261 /* Always clear TIF_SINGLESTEP... */
262 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
264 /* But touch TF only if it was set by us.. */
265 if (test_and_clear_tsk_thread_flag(child, TIF_FORCED_TF)) {
266 struct pt_regs *regs = get_child_regs(child);
267 regs->eflags &= ~X86_EFLAGS_TF;
273 genregs_get(struct task_struct *target,
274 const struct utrace_regset *regset,
275 unsigned int pos, unsigned int count,
276 void *kbuf, void __user *ubuf)
279 unsigned long *kp = kbuf;
281 *kp++ = getreg(target, pos);
287 unsigned long __user *up = ubuf;
289 if (__put_user(getreg(target, pos), up++))
300 genregs_set(struct task_struct *target,
301 const struct utrace_regset *regset,
302 unsigned int pos, unsigned int count,
303 const void *kbuf, const void __user *ubuf)
308 const unsigned long *kp = kbuf;
309 while (!ret && count > 0) {
310 ret = putreg(target, pos, *kp++);
317 const unsigned long __user *up = ubuf;
318 while (!ret && count > 0) {
320 ret = __get_user(val, up++);
322 ret = putreg(target, pos, val);
332 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
334 return tsk_used_math(target) ? regset->n : 0;
338 fpregs_get(struct task_struct *target,
339 const struct utrace_regset *regset,
340 unsigned int pos, unsigned int count,
341 void *kbuf, void __user *ubuf)
343 struct user_i387_struct fp;
346 if (tsk_used_math(target)) {
347 if (target == current)
353 ret = get_fpregs(&fp, target);
355 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
362 fpregs_set(struct task_struct *target,
363 const struct utrace_regset *regset,
364 unsigned int pos, unsigned int count,
365 const void *kbuf, const void __user *ubuf)
367 struct user_i387_struct fp;
370 if (tsk_used_math(target)) {
371 if (target == current)
374 else if (pos == 0 && count == sizeof(fp))
375 set_stopped_child_used_math(target);
379 if (pos > 0 || count < sizeof(fp)) {
380 ret = get_fpregs(&fp, target);
382 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
388 else if (kbuf == NULL) {
389 if (__copy_from_user(&fp, ubuf, sizeof(fp)))
394 return set_fpregs(target, kbuf);
398 fpxregs_active(struct task_struct *target, const struct utrace_regset *regset)
400 return !cpu_has_fxsr ? -ENODEV : tsk_used_math(target) ? regset->n : 0;
404 fpxregs_get(struct task_struct *target,
405 const struct utrace_regset *regset,
406 unsigned int pos, unsigned int count,
407 void *kbuf, void __user *ubuf)
412 if (tsk_used_math(target))
417 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
418 &target->thread.i387.fxsave, 0, -1);
422 fpxregs_set(struct task_struct *target,
423 const struct utrace_regset *regset,
424 unsigned int pos, unsigned int count,
425 const void *kbuf, const void __user *ubuf)
432 if (tsk_used_math(target))
434 else if (pos == 0 && count == sizeof(target->thread.i387.fxsave))
435 set_stopped_child_used_math(target);
439 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
440 &target->thread.i387.fxsave, 0, -1);
442 updated_fpxregs(target);
449 dbregs_active(struct task_struct *tsk, const struct utrace_regset *regset)
451 if (tsk->thread.debugreg[DR_CONTROL] | tsk->thread.debugreg[DR_STATUS])
457 dbregs_get(struct task_struct *target,
458 const struct utrace_regset *regset,
459 unsigned int pos, unsigned int count,
460 void *kbuf, void __user *ubuf)
463 * The hardware updates the status register on a debug trap,
464 * but do_debug (traps.c) save it for us when that happens.
465 * So whether the target is current or not, thread.debugreg is good.
468 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
469 target->thread.debugreg, 0, -1);
473 dbregs_set(struct task_struct *target,
474 const struct utrace_regset *regset,
475 unsigned int pos, unsigned int count,
476 const void *kbuf, const void __user *ubuf)
478 for (pos >>= 2, count >>= 2; count > 0; --count, ++pos) {
483 val = *(const unsigned long *) kbuf;
484 kbuf += sizeof(unsigned long);
487 if (__get_user(val, (unsigned long __user *) ubuf))
489 ubuf += sizeof(unsigned long);
493 if (val >= TASK_SIZE - 3)
505 /* Sanity-check data. Take one half-byte at once with
506 * check = (val >> (16 + 4*i)) & 0xf. It contains the
507 * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits
508 * 2 and 3 are LENi. Given a list of invalid values,
509 * we do mask |= 1 << invalid_value, so that
510 * (mask >> check) & 1 is a correct test for invalid
513 * R/Wi contains the type of the breakpoint /
514 * watchpoint, LENi contains the length of the watched
515 * data in the watchpoint case.
517 * The invalid values are:
518 * - LENi == 0x10 (undefined), so mask |= 0x0f00.
519 * - R/Wi == 0x10 (break on I/O reads or writes), so
521 * - R/Wi == 0x00 && LENi != 0x00, so we have mask |=
524 * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54.
526 * See the Intel Manual "System Programming Guide",
529 * Note that LENi == 0x10 is defined on x86_64 in long
530 * mode (i.e. even for 32-bit userspace software, but
531 * 64-bit kernel), so the x86_64 mask value is 0x5454.
532 * See the AMD manual no. 24593 (AMD64 System
534 val &= ~DR_CONTROL_RESERVED;
535 for (i = 0; i < 4; i++)
536 if ((0x5f54 >> ((val >> (16 + 4*i)) & 0xf)) & 1)
539 set_tsk_thread_flag(target, TIF_DEBUG);
541 clear_tsk_thread_flag(target, TIF_DEBUG);
544 target->thread.debugreg[pos] = val;
545 if (target == current)
547 #define DBREG(n) case n: set_debugreg(target->thread.debugreg[n], n); break
563 * Perform get_thread_area on behalf of the traced child.
566 tls_get(struct task_struct *target,
567 const struct utrace_regset *regset,
568 unsigned int pos, unsigned int count,
569 void *kbuf, void __user *ubuf)
571 struct user_desc info, *ip;
572 const struct desc_struct *desc;
575 * Get the current Thread-Local Storage area:
578 #define GET_BASE(desc) ( \
579 (((desc)->a >> 16) & 0x0000ffff) | \
580 (((desc)->b << 16) & 0x00ff0000) | \
581 ( (desc)->b & 0xff000000) )
583 #define GET_LIMIT(desc) ( \
584 ((desc)->a & 0x0ffff) | \
585 ((desc)->b & 0xf0000) )
587 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
588 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
589 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
590 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
591 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
592 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
594 desc = &target->thread.tls_array[pos / sizeof(struct user_desc)];
596 memset(ip, 0, sizeof *ip);
597 for (; count > 0; count -= sizeof(struct user_desc), ++desc) {
598 ip->entry_number = (desc - &target->thread.tls_array[0]
599 + GDT_ENTRY_TLS_MIN);
600 ip->base_addr = GET_BASE(desc);
601 ip->limit = GET_LIMIT(desc);
602 ip->seg_32bit = GET_32BIT(desc);
603 ip->contents = GET_CONTENTS(desc);
604 ip->read_exec_only = !GET_WRITABLE(desc);
605 ip->limit_in_pages = GET_LIMIT_PAGES(desc);
606 ip->seg_not_present = !GET_PRESENT(desc);
607 ip->useable = GET_USEABLE(desc);
612 if (__copy_to_user(ubuf, &info, sizeof(info)))
614 ubuf += sizeof(info);
622 * Perform set_thread_area on behalf of the traced child.
625 tls_set(struct task_struct *target,
626 const struct utrace_regset *regset,
627 unsigned int pos, unsigned int count,
628 const void *kbuf, const void __user *ubuf)
630 struct user_desc info;
631 struct desc_struct *desc;
632 struct desc_struct newtls[GDT_ENTRY_TLS_ENTRIES];
636 pos /= sizeof(struct user_desc);
637 count /= sizeof(struct user_desc);
640 for (i = 0; i < count; ++i, ++desc) {
641 const struct user_desc *ip;
644 kbuf += sizeof(struct user_desc);
648 if (__copy_from_user(&info, ubuf, sizeof(info)))
650 ubuf += sizeof(struct user_desc);
657 desc->a = LDT_entry_a(ip);
658 desc->b = LDT_entry_b(ip);
663 * We must not get preempted while modifying the TLS.
666 memcpy(&target->thread.tls_array[pos], newtls,
667 count * sizeof(newtls[0]));
668 if (target == current)
669 load_TLS(&target->thread, cpu);
677 * Determine how many TLS slots are in use.
680 tls_active(struct task_struct *target, const struct utrace_regset *regset)
683 for (i = GDT_ENTRY_TLS_ENTRIES; i > 0; --i) {
684 struct desc_struct *desc = &target->thread.tls_array[i - 1];
685 if ((desc->a | desc->b) != 0)
693 * These are our native regset flavors.
696 static const struct utrace_regset native_regsets[] = {
698 .n = FRAME_SIZE, .size = sizeof(long), .align = sizeof(long),
699 .get = genregs_get, .set = genregs_set
702 .n = sizeof(struct user_i387_struct) / sizeof(long),
703 .size = sizeof(long), .align = sizeof(long),
704 .active = fpregs_active,
705 .get = fpregs_get, .set = fpregs_set
708 .n = sizeof(struct user_fxsr_struct) / sizeof(long),
709 .size = sizeof(long), .align = sizeof(long),
710 .active = fpxregs_active,
711 .get = fpxregs_get, .set = fpxregs_set
714 .n = GDT_ENTRY_TLS_ENTRIES,
715 .bias = GDT_ENTRY_TLS_MIN,
716 .size = sizeof(struct user_desc),
717 .align = sizeof(struct user_desc),
718 .active = tls_active, .get = tls_get, .set = tls_set
721 .n = 8, .size = sizeof(long), .align = sizeof(long),
722 .active = dbregs_active,
723 .get = dbregs_get, .set = dbregs_set
727 const struct utrace_regset_view utrace_i386_native = {
728 .name = "i386", .e_machine = EM_386,
729 .regsets = native_regsets,
730 .n = sizeof native_regsets / sizeof native_regsets[0],
732 EXPORT_SYMBOL_GPL(utrace_i386_native);
735 static const struct ptrace_layout_segment i386_uarea[] = {
736 {0, FRAME_SIZE*4, 0, 0},
737 {offsetof(struct user, u_debugreg[0]),
738 offsetof(struct user, u_debugreg[8]), 4, 0},
742 fastcall int arch_ptrace(long *req, struct task_struct *child,
743 struct utrace_attached_engine *engine,
744 unsigned long addr, unsigned long data, long *val)
748 return ptrace_peekusr(child, engine, i386_uarea, addr, data);
750 return ptrace_pokeusr(child, engine, i386_uarea, addr, data);
752 return ptrace_whole_regset(child, engine, data, 0, 0);
754 return ptrace_whole_regset(child, engine, data, 0, 1);
755 case PTRACE_GETFPREGS:
756 return ptrace_whole_regset(child, engine, data, 1, 0);
757 case PTRACE_SETFPREGS:
758 return ptrace_whole_regset(child, engine, data, 1, 1);
759 case PTRACE_GETFPXREGS:
760 return ptrace_whole_regset(child, engine, data, 2, 0);
761 case PTRACE_SETFPXREGS:
762 return ptrace_whole_regset(child, engine, data, 2, 1);
763 case PTRACE_GET_THREAD_AREA:
764 case PTRACE_SET_THREAD_AREA:
765 return ptrace_onereg_access(child, engine,
766 utrace_native_view(current), 3,
767 addr, (void __user *)data,
768 *req == PTRACE_SET_THREAD_AREA);
774 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
778 tsk->thread.trap_no = 1;
779 tsk->thread.error_code = error_code;
781 memset(&info, 0, sizeof(info));
782 info.si_signo = SIGTRAP;
783 info.si_code = TRAP_BRKPT;
786 info.si_addr = user_mode_vm(regs) ? (void __user *) regs->eip : NULL;
788 /* Send us the fakey SIGTRAP */
789 force_sig_info(SIGTRAP, &info, tsk);
792 /* notification of system call entry/exit
793 * - triggered by current->work.syscall_trace
795 __attribute__((regparm(3)))
796 void do_syscall_trace(struct pt_regs *regs, int entryexit)
798 /* do the secure computing check first */
800 secure_computing(regs->orig_eax);
802 if (unlikely(current->audit_context) && entryexit)
803 audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
805 if (test_thread_flag(TIF_SYSCALL_TRACE))
806 tracehook_report_syscall(regs, entryexit);
808 if (test_thread_flag(TIF_SINGLESTEP) && entryexit) {
809 send_sigtrap(current, regs, 0); /* XXX */
810 tracehook_report_syscall_step(regs);
813 if (unlikely(current->audit_context) && !entryexit)
814 audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
815 regs->ebx, regs->ecx, regs->edx, regs->esi);