2 * arch/s390/kernel/ptrace.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/errno.h>
31 #include <linux/ptrace.h>
32 #include <linux/tracehook.h>
33 #include <linux/module.h>
34 #include <linux/user.h>
35 #include <linux/security.h>
36 #include <linux/audit.h>
37 #include <linux/signal.h>
38 #include <linux/vs_base.h>
40 #include <asm/segment.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
44 #include <asm/system.h>
45 #include <asm/uaccess.h>
46 #include <asm/unistd.h>
50 #include "compat_ptrace.h"
54 FixPerRegisters(struct task_struct *task)
59 regs = task_pt_regs(task);
60 per_info = (per_struct *) &task->thread.per_info;
61 per_info->control_regs.bits.em_instruction_fetch =
62 per_info->single_step | per_info->instruction_fetch;
64 if (per_info->single_step) {
65 per_info->control_regs.bits.starting_addr = 0;
67 if (test_thread_flag(TIF_31BIT))
68 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
71 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
73 per_info->control_regs.bits.starting_addr =
74 per_info->starting_addr;
75 per_info->control_regs.bits.ending_addr =
76 per_info->ending_addr;
79 * if any of the control reg tracing bits are on
80 * we switch on per in the psw
82 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
83 regs->psw.mask |= PSW_MASK_PER;
85 regs->psw.mask &= ~PSW_MASK_PER;
87 if (per_info->control_regs.bits.em_storage_alteration)
88 per_info->control_regs.bits.storage_alt_space_ctl = 1;
90 per_info->control_regs.bits.storage_alt_space_ctl = 0;
94 * These registers are loaded in __switch_to on
95 * context switch. We must load them now if
96 * touching the current thread.
98 __ctl_load(per_info->control_regs.words.cr, 9, 11);
102 tracehook_enable_single_step(struct task_struct *task)
104 task->thread.per_info.single_step = 1;
105 FixPerRegisters(task);
109 tracehook_disable_single_step(struct task_struct *task)
111 task->thread.per_info.single_step = 0;
112 FixPerRegisters(task);
113 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
117 tracehook_single_step_enabled(struct task_struct *task)
119 return task->thread.per_info.single_step;
124 genregs_get(struct task_struct *target,
125 const struct utrace_regset *regset,
126 unsigned int pos, unsigned int count,
127 void *kbuf, void __user *ubuf)
129 struct pt_regs *regs = task_pt_regs(target);
130 unsigned long pswmask;
133 /* Remove per bit from user psw. */
134 pswmask = regs->psw.mask & ~PSW_MASK_PER;
135 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
136 &pswmask, PT_PSWMASK, PT_PSWADDR);
138 /* The rest of the PSW and the GPRs are directly on the stack. */
140 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
141 ®s->psw.addr, PT_PSWADDR,
144 /* The ACRs are kept in the thread_struct. */
145 if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
146 if (target == current)
147 save_access_regs(target->thread.acrs);
149 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
151 PT_ACR0, PT_ORIGGPR2);
154 /* Finally, the ORIG_GPR2 value. */
156 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
157 ®s->orig_gpr2, PT_ORIGGPR2, -1);
163 genregs_set(struct task_struct *target,
164 const struct utrace_regset *regset,
165 unsigned int pos, unsigned int count,
166 const void *kbuf, const void __user *ubuf)
168 struct pt_regs *regs = task_pt_regs(target);
171 /* Check for an invalid PSW mask. */
172 if (count > 0 && pos == PT_PSWMASK) {
173 unsigned long pswmask = regs->psw.mask;
174 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
175 &pswmask, PT_PSWMASK, PT_PSWADDR);
176 if (pswmask != PSW_MASK_MERGE(PSW_USER_BITS, pswmask)
178 && pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask)
181 /* Invalid psw mask. */
183 regs->psw.mask = pswmask;
184 FixPerRegisters(target);
187 /* The rest of the PSW and the GPRs are directly on the stack. */
189 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
190 ®s->psw.addr, PT_PSWADDR,
193 /* I'd like to reject addresses without the
194 high order bit but older gdb's rely on it */
195 regs->psw.addr |= PSW_ADDR_AMODE;
199 /* The ACRs are kept in the thread_struct. */
200 if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
201 if (target == current
202 && (pos != PT_ACR0 || count < sizeof(target->thread.acrs)))
203 save_access_regs(target->thread.acrs);
205 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
207 PT_ACR0, PT_ORIGGPR2);
208 if (ret == 0 && target == current)
209 restore_access_regs(target->thread.acrs);
212 /* Finally, the ORIG_GPR2 value. */
214 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
215 ®s->orig_gpr2, PT_ORIGGPR2, -1);
221 fpregs_get(struct task_struct *target,
222 const struct utrace_regset *regset,
223 unsigned int pos, unsigned int count,
224 void *kbuf, void __user *ubuf)
226 if (target == current)
227 save_fp_regs(&target->thread.fp_regs);
229 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
230 &target->thread.fp_regs, 0, -1);
234 fpregs_set(struct task_struct *target,
235 const struct utrace_regset *regset,
236 unsigned int pos, unsigned int count,
237 const void *kbuf, const void __user *ubuf)
241 if (target == current && (pos != 0 || count != sizeof(s390_fp_regs)))
242 save_fp_regs(&target->thread.fp_regs);
244 /* If setting FPC, must validate it first. */
245 if (count > 0 && pos == 0) {
247 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
248 &fpc, 0, sizeof(fpc));
252 if ((fpc & ~((unsigned long) FPC_VALID_MASK
253 << (BITS_PER_LONG - 32))) != 0)
256 memcpy(&target->thread.fp_regs, &fpc, sizeof(fpc));
260 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
261 &target->thread.fp_regs, 0, -1);
263 if (ret == 0 && target == current)
264 restore_fp_regs(&target->thread.fp_regs);
270 per_info_get(struct task_struct *target,
271 const struct utrace_regset *regset,
272 unsigned int pos, unsigned int count,
273 void *kbuf, void __user *ubuf)
275 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
276 &target->thread.per_info, 0, -1);
280 per_info_set(struct task_struct *target,
281 const struct utrace_regset *regset,
282 unsigned int pos, unsigned int count,
283 const void *kbuf, const void __user *ubuf)
285 int ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
286 &target->thread.per_info, 0, -1);
288 FixPerRegisters(target);
295 * These are our native regset flavors.
297 static const struct utrace_regset native_regsets[] = {
299 .size = sizeof(long), .align = sizeof(long),
300 .n = sizeof(s390_regs) / sizeof(long),
301 .get = genregs_get, .set = genregs_set
304 .size = sizeof(long), .align = sizeof(long),
305 .n = sizeof(s390_fp_regs) / sizeof(long),
306 .get = fpregs_get, .set = fpregs_set
309 .size = sizeof(long), .align = sizeof(long),
310 .n = sizeof(per_struct) / sizeof(long),
311 .get = per_info_get, .set = per_info_set
315 const struct utrace_regset_view utrace_s390_native_view = {
316 .name = UTS_MACHINE, .e_machine = ELF_ARCH,
317 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
319 EXPORT_SYMBOL_GPL(utrace_s390_native_view);
324 s390_genregs_get(struct task_struct *target,
325 const struct utrace_regset *regset,
326 unsigned int pos, unsigned int count,
327 void *kbuf, void __user *ubuf)
329 struct pt_regs *regs = task_pt_regs(target);
332 /* Fake a 31 bit psw mask. */
333 if (count > 0 && pos == PT_PSWMASK / 2) {
334 u32 pswmask = PSW32_MASK_MERGE(PSW32_USER_BITS,
335 (u32) (regs->psw.mask >> 32));
336 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
337 &pswmask, PT_PSWMASK / 2,
341 /* Fake a 31 bit psw address. */
342 if (ret == 0 && count > 0 && pos == PT_PSWADDR / 2) {
343 u32 pswaddr = (u32) regs->psw.addr | PSW32_ADDR_AMODE31;
344 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
345 &pswaddr, PT_PSWADDR / 2,
349 /* The GPRs are directly on the stack. Just truncate them. */
350 while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
351 u32 value = regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)];
353 *(u32 *) kbuf = value;
356 else if (put_user(value, (u32 __user *) ubuf))
361 count -= sizeof(u32);
364 /* The ACRs are kept in the thread_struct. */
365 if (ret == 0 && count > 0 && pos < PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE) {
366 if (target == current)
367 save_access_regs(target->thread.acrs);
369 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
372 PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
375 /* Finally, the ORIG_GPR2 value. */
378 *(u32 *) kbuf = regs->orig_gpr2;
379 else if (put_user((u32) regs->orig_gpr2,
380 (u32 __user *) ubuf))
388 s390_genregs_set(struct task_struct *target,
389 const struct utrace_regset *regset,
390 unsigned int pos, unsigned int count,
391 const void *kbuf, const void __user *ubuf)
393 struct pt_regs *regs = task_pt_regs(target);
396 /* Check for an invalid PSW mask. */
397 if (count > 0 && pos == PT_PSWMASK / 2) {
399 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
400 &pswmask, PT_PSWMASK / 2,
405 if (pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask))
406 /* Invalid psw mask. */
409 /* Build a 64 bit psw mask from 31 bit mask. */
410 regs->psw.mask = PSW_MASK_MERGE(PSW_USER32_BITS,
411 (u64) pswmask << 32);
412 FixPerRegisters(target);
415 /* Build a 64 bit psw address from 31 bit address. */
416 if (count > 0 && pos == PT_PSWADDR / 2) {
418 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
419 &pswaddr, PT_PSWADDR / 2,
422 /* Build a 64 bit psw mask from 31 bit mask. */
423 regs->psw.addr = pswaddr & PSW32_ADDR_INSN;
426 /* The GPRs are directly onto the stack. */
427 while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
431 value = *(const u32 *) kbuf;
434 else if (get_user(value, (const u32 __user *) ubuf))
439 count -= sizeof(u32);
441 regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)] = value;
444 /* The ACRs are kept in the thread_struct. */
445 if (count > 0 && pos < PT_ORIGGPR2 / 2) {
446 if (target == current
447 && (pos != PT_ACR0 / 2
448 || count < sizeof(target->thread.acrs)))
449 save_access_regs(target->thread.acrs);
451 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
454 PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
456 if (ret == 0 && target == current)
457 restore_access_regs(target->thread.acrs);
460 /* Finally, the ORIG_GPR2 value. */
461 if (ret == 0 && count > 0) {
464 value = *(const u32 *) kbuf;
465 else if (get_user(value, (const u32 __user *) ubuf))
467 regs->orig_gpr2 = value;
475 * This is magic. See per_struct and per_struct32.
476 * By incident the offsets in per_struct are exactly
477 * twice the offsets in per_struct32 for all fields.
478 * The 8 byte fields need special handling though,
479 * because the second half (bytes 4-7) is needed and
480 * not the first half.
483 offset_from_per32(unsigned int offset)
485 BUILD_BUG_ON(offsetof(per_struct32, control_regs) != 0);
486 if (offset - offsetof(per_struct32, control_regs) < 3*sizeof(u32)
487 || (offset >= offsetof(per_struct32, starting_addr) &&
488 offset <= offsetof(per_struct32, ending_addr))
489 || offset == offsetof(per_struct32, lowcore.words.address))
490 offset = offset*2 + 4;
497 s390_per_info_get(struct task_struct *target,
498 const struct utrace_regset *regset,
499 unsigned int pos, unsigned int count,
500 void *kbuf, void __user *ubuf)
503 u32 val = *(u32 *) ((char *) &target->thread.per_info
504 + offset_from_per32 (pos));
509 else if (put_user(val, (u32 __user *) ubuf))
514 count -= sizeof(u32);
520 s390_per_info_set(struct task_struct *target,
521 const struct utrace_regset *regset,
522 unsigned int pos, unsigned int count,
523 const void *kbuf, const void __user *ubuf)
529 val = *(const u32 *) kbuf;
532 else if (get_user(val, (const u32 __user *) ubuf))
537 count -= sizeof(u32);
539 *(u32 *) ((char *) &target->thread.per_info
540 + offset_from_per32 (pos)) = val;
546 static const struct utrace_regset s390_compat_regsets[] = {
548 .size = sizeof(u32), .align = sizeof(u32),
549 .n = sizeof(s390_regs) / sizeof(long),
550 .get = s390_genregs_get, .set = s390_genregs_set
553 .size = sizeof(u32), .align = sizeof(u32),
554 .n = sizeof(s390_fp_regs) / sizeof(u32),
555 .get = fpregs_get, .set = fpregs_set
558 .size = sizeof(u32), .align = sizeof(u32),
559 .n = sizeof(per_struct) / sizeof(u32),
560 .get = s390_per_info_get, .set = s390_per_info_set
564 const struct utrace_regset_view utrace_s390_compat_view = {
565 .name = "s390", .e_machine = EM_S390,
566 .regsets = s390_compat_regsets, .n = ARRAY_SIZE(s390_compat_regsets)
568 EXPORT_SYMBOL_GPL(utrace_s390_compat_view);
569 #endif /* CONFIG_COMPAT */
573 static const struct ptrace_layout_segment s390_uarea[] = {
574 {PT_PSWMASK, PT_FPC, 0, 0},
575 {PT_FPC, PT_CR_9, 1, 0},
576 {PT_CR_9, PT_IEEE_IP, 2, 0},
577 {PT_IEEE_IP, sizeof(struct user), -1, -1},
581 int arch_ptrace(long *request, struct task_struct *child,
582 struct utrace_attached_engine *engine,
583 unsigned long addr, unsigned long data, long *val)
593 * Stupid gdb peeks/pokes the access registers in 64 bit with
594 * an alignment of 4. Programmers from hell...
596 if (addr >= PT_ACR0 && addr < PT_ACR15) {
599 tmp = *(unsigned long *)
600 ((char *) child->thread.acrs + addr - PT_ACR0);
601 return put_user(tmp, (unsigned long __user *) data);
603 else if (addr == PT_ACR15) {
605 * Very special case: old & broken 64 bit gdb reading
606 * from acrs[15]. Result is a 64 bit value. Read the
607 * 32 bit acrs[15] value and shift it by 32. Sick...
609 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
610 return put_user(tmp, (unsigned long __user *) data);
613 return ptrace_peekusr(child, engine, s390_uarea, addr, data);
616 if (addr >= PT_ACR0 && addr < PT_ACR15) {
619 *(unsigned long *) ((char *) child->thread.acrs
620 + addr - PT_ACR0) = data;
623 else if (addr == PT_ACR15) {
625 * Very special case: old & broken 64 bit gdb writing
626 * to acrs[15] with a 64 bit value. Ignore the lower
627 * half of the value and write the upper 32 bit to
630 child->thread.acrs[15] = data >> 32;
634 return ptrace_pokeusr(child, engine, s390_uarea, addr, data);
636 case PTRACE_PEEKUSR_AREA:
637 case PTRACE_POKEUSR_AREA:
638 if (copy_from_user(&parea, (ptrace_area __user *) addr,
641 if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
643 return ptrace_layout_access(child, engine,
644 utrace_native_view(current),
646 parea.kernel_addr, parea.len,
647 (void __user *) parea.process_addr,
649 *request == PTRACE_POKEUSR_AREA);
651 case PTRACE_PEEKTEXT:
652 case PTRACE_PEEKDATA:
653 /* Remove high order bit from address (only for 31 bit). */
654 addr &= PSW_ADDR_INSN;
655 /* read word at location addr. */
656 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
657 if (copied != sizeof(tmp))
659 return put_user(tmp, (unsigned long __user *) data);
661 case PTRACE_POKETEXT:
662 case PTRACE_POKEDATA:
663 /* Remove high order bit from address (only for 31 bit). */
664 addr &= PSW_ADDR_INSN;
665 /* write the word at location addr. */
666 copied = access_process_vm(child, addr, &data, sizeof(data),1);
667 if (copied != sizeof(data))
676 static const struct ptrace_layout_segment s390_compat_uarea[] = {
677 {PT_PSWMASK / 2, PT_FPC / 2, 0, 0},
678 {PT_FPC / 2, PT_CR_9 / 2, 1, 0},
679 {PT_CR_9 / 2, PT_IEEE_IP / 2, 2, 0},
680 {PT_IEEE_IP / 2, sizeof(struct user32), -1, -1},
684 int arch_compat_ptrace(compat_long_t *request,
685 struct task_struct *child,
686 struct utrace_attached_engine *engine,
687 compat_ulong_t addr, compat_ulong_t data,
690 ptrace_area_emu31 parea;
694 return ptrace_compat_peekusr(child, engine, s390_compat_uarea,
697 return ptrace_compat_pokeusr(child, engine, s390_compat_uarea,
699 case PTRACE_PEEKUSR_AREA:
700 case PTRACE_POKEUSR_AREA:
701 if (copy_from_user(&parea, ((ptrace_area_emu31 __user *)
702 (unsigned long) addr),
705 if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
707 return ptrace_layout_access(child, engine,
708 utrace_native_view(current),
710 parea.kernel_addr, parea.len,
712 (unsigned long) parea.process_addr,
714 *request == PTRACE_POKEUSR_AREA);
719 #endif /* CONFIG_COMPAT */
720 #endif /* CONFIG_PTRACE */
724 syscall_trace(struct pt_regs *regs, int entryexit)
726 if (unlikely(current->audit_context) && entryexit)
727 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
729 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
730 tracehook_report_syscall(regs, entryexit);
733 * If the debugger has set an invalid system call number,
734 * we prepare to skip the system call restart handling.
736 if (!entryexit && regs->gprs[2] >= NR_syscalls)
740 if (unlikely(current->audit_context) && !entryexit)
741 audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
742 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
743 regs->gprs[4], regs->gprs[5]);