2 * arch/s390/kernel/ptrace.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Based on PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Derived from "arch/m68k/kernel/ptrace.c"
13 * Copyright (C) 1994 by Hamish Macdonald
14 * Taken from linux/kernel/ptrace.c and modified for M680x0.
15 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
17 * Modified by Cort Dougan (cort@cs.nmt.edu)
20 * This file is subject to the terms and conditions of the GNU General
21 * Public License. See the file README.legal in the main directory of
22 * this archive for more details.
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/errno.h>
31 #include <linux/ptrace.h>
32 #include <linux/tracehook.h>
33 #include <linux/module.h>
34 #include <linux/user.h>
35 #include <linux/security.h>
36 #include <linux/audit.h>
37 #include <linux/signal.h>
39 #include <asm/segment.h>
41 #include <asm/pgtable.h>
42 #include <asm/pgalloc.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
49 #include "compat_ptrace.h"
53 FixPerRegisters(struct task_struct *task)
58 regs = task_pt_regs(task);
59 per_info = (per_struct *) &task->thread.per_info;
60 per_info->control_regs.bits.em_instruction_fetch =
61 per_info->single_step | per_info->instruction_fetch;
63 if (per_info->single_step) {
64 per_info->control_regs.bits.starting_addr = 0;
66 if (test_thread_flag(TIF_31BIT))
67 per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
70 per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
72 per_info->control_regs.bits.starting_addr =
73 per_info->starting_addr;
74 per_info->control_regs.bits.ending_addr =
75 per_info->ending_addr;
78 * if any of the control reg tracing bits are on
79 * we switch on per in the psw
81 if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
82 regs->psw.mask |= PSW_MASK_PER;
84 regs->psw.mask &= ~PSW_MASK_PER;
86 if (per_info->control_regs.bits.em_storage_alteration)
87 per_info->control_regs.bits.storage_alt_space_ctl = 1;
89 per_info->control_regs.bits.storage_alt_space_ctl = 0;
93 * These registers are loaded in __switch_to on
94 * context switch. We must load them now if
95 * touching the current thread.
97 __ctl_load(per_info->control_regs.words.cr, 9, 11);
101 tracehook_enable_single_step(struct task_struct *task)
103 task->thread.per_info.single_step = 1;
104 FixPerRegisters(task);
108 tracehook_disable_single_step(struct task_struct *task)
110 task->thread.per_info.single_step = 0;
111 FixPerRegisters(task);
112 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
116 tracehook_single_step_enabled(struct task_struct *task)
118 return task->thread.per_info.single_step;
123 genregs_get(struct task_struct *target,
124 const struct utrace_regset *regset,
125 unsigned int pos, unsigned int count,
126 void *kbuf, void __user *ubuf)
128 struct pt_regs *regs = task_pt_regs(target);
129 unsigned long pswmask;
132 /* Remove per bit from user psw. */
133 pswmask = regs->psw.mask & ~PSW_MASK_PER;
134 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
135 &pswmask, PT_PSWMASK, PT_PSWADDR);
137 /* The rest of the PSW and the GPRs are directly on the stack. */
139 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
140 ®s->psw.addr, PT_PSWADDR,
143 /* The ACRs are kept in the thread_struct. */
144 if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
145 if (target == current)
146 save_access_regs(target->thread.acrs);
148 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
150 PT_ACR0, PT_ORIGGPR2);
153 /* Finally, the ORIG_GPR2 value. */
155 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
156 ®s->orig_gpr2, PT_ORIGGPR2, -1);
162 genregs_set(struct task_struct *target,
163 const struct utrace_regset *regset,
164 unsigned int pos, unsigned int count,
165 const void *kbuf, const void __user *ubuf)
167 struct pt_regs *regs = task_pt_regs(target);
170 /* Check for an invalid PSW mask. */
171 if (count > 0 && pos == PT_PSWMASK) {
172 unsigned long pswmask = regs->psw.mask;
173 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
174 &pswmask, PT_PSWMASK, PT_PSWADDR);
175 if (pswmask != PSW_MASK_MERGE(PSW_USER_BITS, pswmask)
177 && pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask)
180 /* Invalid psw mask. */
182 regs->psw.mask = pswmask;
183 FixPerRegisters(target);
186 /* The rest of the PSW and the GPRs are directly on the stack. */
188 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
189 ®s->psw.addr, PT_PSWADDR,
192 /* I'd like to reject addresses without the
193 high order bit but older gdb's rely on it */
194 regs->psw.addr |= PSW_ADDR_AMODE;
198 /* The ACRs are kept in the thread_struct. */
199 if (ret == 0 && count > 0 && pos < PT_ORIGGPR2) {
200 if (target == current
201 && (pos != PT_ACR0 || count < sizeof(target->thread.acrs)))
202 save_access_regs(target->thread.acrs);
204 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
206 PT_ACR0, PT_ORIGGPR2);
207 if (ret == 0 && target == current)
208 restore_access_regs(target->thread.acrs);
211 /* Finally, the ORIG_GPR2 value. */
213 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
214 ®s->orig_gpr2, PT_ORIGGPR2, -1);
220 fpregs_get(struct task_struct *target,
221 const struct utrace_regset *regset,
222 unsigned int pos, unsigned int count,
223 void *kbuf, void __user *ubuf)
225 if (target == current)
226 save_fp_regs(&target->thread.fp_regs);
228 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
229 &target->thread.fp_regs, 0, -1);
233 fpregs_set(struct task_struct *target,
234 const struct utrace_regset *regset,
235 unsigned int pos, unsigned int count,
236 const void *kbuf, const void __user *ubuf)
240 if (target == current && (pos != 0 || count != sizeof(s390_fp_regs)))
241 save_fp_regs(&target->thread.fp_regs);
243 /* If setting FPC, must validate it first. */
244 if (count > 0 && pos == 0) {
246 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
247 &fpc, 0, sizeof(fpc));
251 if ((fpc & ~((unsigned long) FPC_VALID_MASK
252 << (BITS_PER_LONG - 32))) != 0)
255 memcpy(&target->thread.fp_regs, &fpc, sizeof(fpc));
259 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
260 &target->thread.fp_regs, 0, -1);
262 if (ret == 0 && target == current)
263 restore_fp_regs(&target->thread.fp_regs);
269 per_info_get(struct task_struct *target,
270 const struct utrace_regset *regset,
271 unsigned int pos, unsigned int count,
272 void *kbuf, void __user *ubuf)
274 return utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
275 &target->thread.per_info, 0, -1);
279 per_info_set(struct task_struct *target,
280 const struct utrace_regset *regset,
281 unsigned int pos, unsigned int count,
282 const void *kbuf, const void __user *ubuf)
284 int ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
285 &target->thread.per_info, 0, -1);
287 FixPerRegisters(target);
294 * These are our native regset flavors.
296 static const struct utrace_regset native_regsets[] = {
298 .size = sizeof(long), .align = sizeof(long),
299 .n = sizeof(s390_regs) / sizeof(long),
300 .get = genregs_get, .set = genregs_set
303 .size = sizeof(long), .align = sizeof(long),
304 .n = sizeof(s390_fp_regs) / sizeof(long),
305 .get = fpregs_get, .set = fpregs_set
308 .size = sizeof(long), .align = sizeof(long),
309 .n = sizeof(per_struct) / sizeof(long),
310 .get = per_info_get, .set = per_info_set
314 const struct utrace_regset_view utrace_s390_native_view = {
315 .name = UTS_MACHINE, .e_machine = ELF_ARCH,
316 .regsets = native_regsets,
317 .n = sizeof native_regsets / sizeof native_regsets[0],
319 EXPORT_SYMBOL_GPL(utrace_s390_native_view);
324 s390_genregs_get(struct task_struct *target,
325 const struct utrace_regset *regset,
326 unsigned int pos, unsigned int count,
327 void *kbuf, void __user *ubuf)
329 struct pt_regs *regs = task_pt_regs(target);
332 /* Fake a 31 bit psw mask. */
333 if (count > 0 && pos == PT_PSWMASK / 2) {
334 u32 pswmask = PSW32_MASK_MERGE(PSW32_USER_BITS,
335 (u32) (regs->psw.mask >> 32));
336 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
337 &pswmask, PT_PSWMASK / 2,
341 /* Fake a 31 bit psw address. */
342 if (ret == 0 && count > 0 && pos == PT_PSWADDR / 2) {
343 u32 pswaddr = (u32) regs->psw.addr | PSW32_ADDR_AMODE31;
344 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
345 &pswaddr, PT_PSWADDR / 2,
349 /* The GPRs are directly on the stack. Just truncate them. */
350 while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
351 u32 value = regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)];
353 *(u32 *) kbuf = value;
356 else if (put_user(value, (u32 __user *) ubuf))
361 count -= sizeof(u32);
364 /* The ACRs are kept in the thread_struct. */
365 if (ret == 0 && count > 0 && pos < PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE) {
366 if (target == current)
367 save_access_regs(target->thread.acrs);
369 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
372 PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
375 /* Finally, the ORIG_GPR2 value. */
378 *(u32 *) kbuf = regs->orig_gpr2;
379 else if (put_user((u32) regs->orig_gpr2,
380 (u32 __user *) ubuf))
388 s390_genregs_set(struct task_struct *target,
389 const struct utrace_regset *regset,
390 unsigned int pos, unsigned int count,
391 const void *kbuf, const void __user *ubuf)
393 struct pt_regs *regs = task_pt_regs(target);
396 /* Check for an invalid PSW mask. */
397 if (count > 0 && pos == PT_PSWMASK / 2) {
399 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
400 &pswmask, PT_PSWMASK / 2,
405 if (pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask))
406 /* Invalid psw mask. */
409 /* Build a 64 bit psw mask from 31 bit mask. */
410 regs->psw.mask = PSW_MASK_MERGE(PSW_USER32_BITS,
411 (u64) pswmask << 32);
412 FixPerRegisters(target);
415 /* Build a 64 bit psw address from 31 bit address. */
416 if (count > 0 && pos == PT_PSWADDR / 2) {
418 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
419 &pswaddr, PT_PSWADDR / 2,
422 /* Build a 64 bit psw mask from 31 bit mask. */
423 regs->psw.addr = pswaddr & PSW32_ADDR_INSN;
426 /* The GPRs are directly onto the stack. */
427 while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
431 value = *(const u32 *) kbuf;
434 else if (get_user(value, (const u32 __user *) ubuf))
439 count -= sizeof(u32);
441 regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)] = value;
444 /* The ACRs are kept in the thread_struct. */
445 if (count > 0 && pos < PT_ORIGGPR2 / 2) {
446 if (target == current
447 && (pos != PT_ACR0 / 2
448 || count < sizeof(target->thread.acrs)))
449 save_access_regs(target->thread.acrs);
451 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
454 PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
456 if (ret == 0 && target == current)
457 restore_access_regs(target->thread.acrs);
460 /* Finally, the ORIG_GPR2 value. */
461 if (ret == 0 && count > 0) {
464 value = *(const u32 *) kbuf;
465 else if (get_user(value, (const u32 __user *) ubuf))
467 regs->orig_gpr2 = value;
475 * This is magic. See per_struct and per_struct32.
476 * By incident the offsets in per_struct are exactly
477 * twice the offsets in per_struct32 for all fields.
478 * The 8 byte fields need special handling though,
479 * because the second half (bytes 4-7) is needed and
480 * not the first half.
483 offset_from_per32(unsigned int offset)
485 BUILD_BUG_ON(offsetof(per_struct32, control_regs) != 0);
486 if (offset - offsetof(per_struct32, control_regs) < 3*sizeof(u32)
487 || (offset >= offsetof(per_struct32, starting_addr) &&
488 offset <= offsetof(per_struct32, ending_addr))
489 || offset == offsetof(per_struct32, lowcore.words.address))
490 offset = offset*2 + 4;
497 s390_per_info_get(struct task_struct *target,
498 const struct utrace_regset *regset,
499 unsigned int pos, unsigned int count,
500 void *kbuf, void __user *ubuf)
503 u32 val = *(u32 *) ((char *) &target->thread.per_info
504 + offset_from_per32 (pos));
509 else if (put_user(val, (u32 __user *) ubuf))
514 count -= sizeof(u32);
520 s390_per_info_set(struct task_struct *target,
521 const struct utrace_regset *regset,
522 unsigned int pos, unsigned int count,
523 const void *kbuf, const void __user *ubuf)
529 val = *(const u32 *) kbuf;
532 else if (get_user(val, (const u32 __user *) ubuf))
537 count -= sizeof(u32);
539 *(u32 *) ((char *) &target->thread.per_info
540 + offset_from_per32 (pos)) = val;
546 static const struct utrace_regset s390_compat_regsets[] = {
548 .size = sizeof(u32), .align = sizeof(u32),
549 .n = sizeof(s390_regs) / sizeof(long),
550 .get = s390_genregs_get, .set = s390_genregs_set
553 .size = sizeof(u32), .align = sizeof(u32),
554 .n = sizeof(s390_fp_regs) / sizeof(u32),
555 .get = fpregs_get, .set = fpregs_set
558 .size = sizeof(u32), .align = sizeof(u32),
559 .n = sizeof(per_struct) / sizeof(u32),
560 .get = s390_per_info_get, .set = s390_per_info_set
564 const struct utrace_regset_view utrace_s390_compat_view = {
565 .name = "s390", .e_machine = EM_S390,
566 .regsets = s390_compat_regsets,
567 .n = sizeof s390_compat_regsets / sizeof s390_compat_regsets[0],
569 EXPORT_SYMBOL_GPL(utrace_s390_compat_view);
570 #endif /* CONFIG_COMPAT */
574 static const struct ptrace_layout_segment s390_uarea[] = {
575 {PT_PSWMASK, PT_FPC, 0, 0},
576 {PT_FPC, PT_CR_9, 1, 0},
577 {PT_CR_9, PT_IEEE_IP, 2, 0},
578 {PT_IEEE_IP, sizeof(struct user), -1, -1},
582 fastcall int arch_ptrace(long *request, struct task_struct *child,
583 struct utrace_attached_engine *engine,
584 unsigned long addr, unsigned long data, long *val)
592 return ptrace_peekusr(child, engine, s390_uarea, addr, data);
594 return ptrace_pokeusr(child, engine, s390_uarea, addr, data);
596 case PTRACE_PEEKUSR_AREA:
597 case PTRACE_POKEUSR_AREA:
598 if (copy_from_user(&parea, (ptrace_area __user *) addr,
601 if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
603 return ptrace_layout_access(child, engine,
604 utrace_native_view(current),
606 parea.kernel_addr, parea.len,
607 (void __user *) parea.process_addr,
609 *request == PTRACE_POKEUSR_AREA);
611 case PTRACE_PEEKTEXT:
612 case PTRACE_PEEKDATA:
613 /* Remove high order bit from address (only for 31 bit). */
614 addr &= PSW_ADDR_INSN;
615 /* read word at location addr. */
616 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
617 if (copied != sizeof(tmp))
619 return put_user(tmp, (unsigned long __user *) data);
621 case PTRACE_POKETEXT:
622 case PTRACE_POKEDATA:
623 /* Remove high order bit from address (only for 31 bit). */
624 addr &= PSW_ADDR_INSN;
625 /* write the word at location addr. */
626 copied = access_process_vm(child, addr, &data, sizeof(data),1);
627 if (copied != sizeof(data))
636 static const struct ptrace_layout_segment s390_compat_uarea[] = {
637 {PT_PSWMASK / 2, PT_FPC / 2, 0, 0},
638 {PT_FPC / 2, PT_CR_9 / 2, 1, 0},
639 {PT_CR_9 / 2, PT_IEEE_IP / 2, 2, 0},
640 {PT_IEEE_IP / 2, sizeof(struct user32), -1, -1},
644 fastcall int arch_compat_ptrace(compat_long_t *request,
645 struct task_struct *child,
646 struct utrace_attached_engine *engine,
647 compat_ulong_t addr, compat_ulong_t data,
650 ptrace_area_emu31 parea;
654 return ptrace_compat_peekusr(child, engine, s390_compat_uarea,
657 return ptrace_compat_pokeusr(child, engine, s390_compat_uarea,
659 case PTRACE_PEEKUSR_AREA:
660 case PTRACE_POKEUSR_AREA:
661 if (copy_from_user(&parea, ((ptrace_area_emu31 __user *)
662 (unsigned long) addr),
665 if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
667 return ptrace_layout_access(child, engine,
668 utrace_native_view(current),
670 parea.kernel_addr, parea.len,
672 (unsigned long) parea.process_addr,
674 *request == PTRACE_POKEUSR_AREA);
679 #endif /* CONFIG_COMPAT */
680 #endif /* CONFIG_PTRACE */
686 # define __ADDR_MASK 3
688 # define __ADDR_MASK 7
692 * Read the word at offset addr from the user area of a process. The
693 * trouble here is that the information is littered over different
694 * locations. The process registers are found on the kernel stack,
695 * the floating point stuff and the trace settings are stored in
696 * the task structure. In addition the different structures in
697 * struct user contain pad bytes that should be read as zeroes.
701 peek_user(struct task_struct *child, addr_t addr, addr_t data)
703 struct user *dummy = NULL;
704 addr_t offset, tmp, mask;
707 * Stupid gdb peeks/pokes the access registers in 64 bit with
708 * an alignment of 4. Programmers from hell...
712 if (addr >= (addr_t) &dummy->regs.acrs &&
713 addr < (addr_t) &dummy->regs.orig_gpr2)
716 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
719 if (addr < (addr_t) &dummy->regs.acrs) {
721 * psw and gprs are stored on the stack
723 tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
724 if (addr == (addr_t) &dummy->regs.psw.mask)
725 /* Remove per bit from user psw. */
726 tmp &= ~PSW_MASK_PER;
728 } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
730 * access registers are stored in the thread structure
732 offset = addr - (addr_t) &dummy->regs.acrs;
735 * Very special case: old & broken 64 bit gdb reading
736 * from acrs[15]. Result is a 64 bit value. Read the
737 * 32 bit acrs[15] value and shift it by 32. Sick...
739 if (addr == (addr_t) &dummy->regs.acrs[15])
740 tmp = ((unsigned long) child->thread.acrs[15]) << 32;
743 tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
745 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
747 * orig_gpr2 is stored on the kernel stack
749 tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
751 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
753 * floating point regs. are stored in the thread structure
755 offset = addr - (addr_t) &dummy->regs.fp_regs;
756 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
757 if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
758 tmp &= (unsigned long) FPC_VALID_MASK
759 << (BITS_PER_LONG - 32);
761 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
763 * per_info is found in the thread structure
765 offset = addr - (addr_t) &dummy->regs.per_info;
766 tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
771 return put_user(tmp, (addr_t __user *) data);
775 * Write a word to the user area of a process at location addr. This
776 * operation does have an additional problem compared to peek_user.
777 * Stores to the program status word and on the floating point
778 * control register needs to get checked for validity.
781 poke_user(struct task_struct *child, addr_t addr, addr_t data)
783 struct user *dummy = NULL;
787 * Stupid gdb peeks/pokes the access registers in 64 bit with
788 * an alignment of 4. Programmers from hell indeed...
792 if (addr >= (addr_t) &dummy->regs.acrs &&
793 addr < (addr_t) &dummy->regs.orig_gpr2)
796 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
799 if (addr < (addr_t) &dummy->regs.acrs) {
801 * psw and gprs are stored on the stack
803 if (addr == (addr_t) &dummy->regs.psw.mask &&
805 data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
807 data != PSW_MASK_MERGE(PSW_USER_BITS, data))
808 /* Invalid psw mask. */
811 if (addr == (addr_t) &dummy->regs.psw.addr)
812 /* I'd like to reject addresses without the
813 high order bit but older gdb's rely on it */
814 data |= PSW_ADDR_AMODE;
816 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
818 } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
820 * access registers are stored in the thread structure
822 offset = addr - (addr_t) &dummy->regs.acrs;
825 * Very special case: old & broken 64 bit gdb writing
826 * to acrs[15] with a 64 bit value. Ignore the lower
827 * half of the value and write the upper 32 bit to
830 if (addr == (addr_t) &dummy->regs.acrs[15])
831 child->thread.acrs[15] = (unsigned int) (data >> 32);
834 *(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
836 } else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
838 * orig_gpr2 is stored on the kernel stack
840 task_pt_regs(child)->orig_gpr2 = data;
842 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
844 * floating point regs. are stored in the thread structure
846 if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
847 (data & ~((unsigned long) FPC_VALID_MASK
848 << (BITS_PER_LONG - 32))) != 0)
850 offset = addr - (addr_t) &dummy->regs.fp_regs;
851 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
853 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
855 * per_info is found in the thread structure
857 offset = addr - (addr_t) &dummy->regs.per_info;
858 *(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
862 FixPerRegisters(child);
867 do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
874 case PTRACE_PEEKTEXT:
875 case PTRACE_PEEKDATA:
876 /* Remove high order bit from address (only for 31 bit). */
877 addr &= PSW_ADDR_INSN;
878 /* read word at location addr. */
879 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
880 if (copied != sizeof(tmp))
882 return put_user(tmp, (unsigned long __user *) data);
885 /* read the word at location addr in the USER area. */
886 return peek_user(child, addr, data);
888 case PTRACE_POKETEXT:
889 case PTRACE_POKEDATA:
890 /* Remove high order bit from address (only for 31 bit). */
891 addr &= PSW_ADDR_INSN;
892 /* write the word at location addr. */
893 copied = access_process_vm(child, addr, &data, sizeof(data),1);
894 if (copied != sizeof(data))
899 /* write the word at location addr in the USER area */
900 return poke_user(child, addr, data);
902 case PTRACE_PEEKUSR_AREA:
903 case PTRACE_POKEUSR_AREA:
904 if (copy_from_user(&parea, (void __user *) addr,
907 addr = parea.kernel_addr;
908 data = parea.process_addr;
910 while (copied < parea.len) {
911 if (request == PTRACE_PEEKUSR_AREA)
912 ret = peek_user(child, addr, data);
915 if (get_user (tmp, (addr_t __user *) data))
917 ret = poke_user(child, addr, tmp);
921 addr += sizeof(unsigned long);
922 data += sizeof(unsigned long);
923 copied += sizeof(unsigned long);
927 return ptrace_request(child, request, addr, data);
932 * Now the fun part starts... a 31 bit program running in the
933 * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
934 * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
935 * to handle, the difference to the 64 bit versions of the requests
936 * is that the access is done in multiples of 4 byte instead of
937 * 8 bytes (sizeof(unsigned long) on 31/64 bit).
938 * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
939 * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
940 * is a 31 bit program too, the content of struct user can be
941 * emulated. A 31 bit program peeking into the struct user of
942 * a 64 bit program is a no-no.
946 * Same as peek_user but for a 31 bit program.
949 peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
951 struct user32 *dummy32 = NULL;
952 per_struct32 *dummy_per32 = NULL;
956 if (!test_thread_flag(TIF_31BIT) ||
957 (addr & 3) || addr > sizeof(struct user) - 3)
960 if (addr < (addr_t) &dummy32->regs.acrs) {
962 * psw and gprs are stored on the stack
964 if (addr == (addr_t) &dummy32->regs.psw.mask) {
965 /* Fake a 31 bit psw mask. */
966 tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
967 tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
968 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
969 /* Fake a 31 bit psw address. */
970 tmp = (__u32) task_pt_regs(child)->psw.addr |
974 tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
977 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
979 * access registers are stored in the thread structure
981 offset = addr - (addr_t) &dummy32->regs.acrs;
982 tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
984 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
986 * orig_gpr2 is stored on the kernel stack
988 tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
990 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
992 * floating point regs. are stored in the thread structure
994 offset = addr - (addr_t) &dummy32->regs.fp_regs;
995 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
997 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
999 * per_info is found in the thread structure
1001 offset = addr - (addr_t) &dummy32->regs.per_info;
1002 /* This is magic. See per_struct and per_struct32. */
1003 if ((offset >= (addr_t) &dummy_per32->control_regs &&
1004 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
1005 (offset >= (addr_t) &dummy_per32->starting_addr &&
1006 offset <= (addr_t) &dummy_per32->ending_addr) ||
1007 offset == (addr_t) &dummy_per32->lowcore.words.address)
1008 offset = offset*2 + 4;
1011 tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
1016 return put_user(tmp, (__u32 __user *) data);
1020 * Same as poke_user but for a 31 bit program.
1023 poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
1025 struct user32 *dummy32 = NULL;
1026 per_struct32 *dummy_per32 = NULL;
1030 if (!test_thread_flag(TIF_31BIT) ||
1031 (addr & 3) || addr > sizeof(struct user32) - 3)
1036 if (addr < (addr_t) &dummy32->regs.acrs) {
1038 * psw, gprs, acrs and orig_gpr2 are stored on the stack
1040 if (addr == (addr_t) &dummy32->regs.psw.mask) {
1041 /* Build a 64 bit psw mask from 31 bit mask. */
1042 if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
1043 /* Invalid psw mask. */
1045 task_pt_regs(child)->psw.mask =
1046 PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
1047 } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
1048 /* Build a 64 bit psw address from 31 bit address. */
1049 task_pt_regs(child)->psw.addr =
1050 (__u64) tmp & PSW32_ADDR_INSN;
1053 *(__u32*)((addr_t) &task_pt_regs(child)->psw
1054 + addr*2 + 4) = tmp;
1056 } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
1058 * access registers are stored in the thread structure
1060 offset = addr - (addr_t) &dummy32->regs.acrs;
1061 *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
1063 } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
1065 * orig_gpr2 is stored on the kernel stack
1067 *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
1069 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
1071 * floating point regs. are stored in the thread structure
1073 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
1074 (tmp & ~FPC_VALID_MASK) != 0)
1075 /* Invalid floating point control. */
1077 offset = addr - (addr_t) &dummy32->regs.fp_regs;
1078 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
1080 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
1082 * per_info is found in the thread structure.
1084 offset = addr - (addr_t) &dummy32->regs.per_info;
1086 * This is magic. See per_struct and per_struct32.
1087 * By incident the offsets in per_struct are exactly
1088 * twice the offsets in per_struct32 for all fields.
1089 * The 8 byte fields need special handling though,
1090 * because the second half (bytes 4-7) is needed and
1091 * not the first half.
1093 if ((offset >= (addr_t) &dummy_per32->control_regs &&
1094 offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
1095 (offset >= (addr_t) &dummy_per32->starting_addr &&
1096 offset <= (addr_t) &dummy_per32->ending_addr) ||
1097 offset == (addr_t) &dummy_per32->lowcore.words.address)
1098 offset = offset*2 + 4;
1101 *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
1105 FixPerRegisters(child);
1110 do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
1112 unsigned int tmp; /* 4 bytes !! */
1113 ptrace_area_emu31 parea;
1117 case PTRACE_PEEKTEXT:
1118 case PTRACE_PEEKDATA:
1119 /* read word at location addr. */
1120 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
1121 if (copied != sizeof(tmp))
1123 return put_user(tmp, (unsigned int __user *) data);
1125 case PTRACE_PEEKUSR:
1126 /* read the word at location addr in the USER area. */
1127 return peek_user_emu31(child, addr, data);
1129 case PTRACE_POKETEXT:
1130 case PTRACE_POKEDATA:
1131 /* write the word at location addr. */
1133 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
1134 if (copied != sizeof(tmp))
1138 case PTRACE_POKEUSR:
1139 /* write the word at location addr in the USER area */
1140 return poke_user_emu31(child, addr, data);
1142 case PTRACE_PEEKUSR_AREA:
1143 case PTRACE_POKEUSR_AREA:
1144 if (copy_from_user(&parea, (void __user *) addr,
1147 addr = parea.kernel_addr;
1148 data = parea.process_addr;
1150 while (copied < parea.len) {
1151 if (request == PTRACE_PEEKUSR_AREA)
1152 ret = peek_user_emu31(child, addr, data);
1155 if (get_user (tmp, (__u32 __user *) data))
1157 ret = poke_user_emu31(child, addr, tmp);
1161 addr += sizeof(unsigned int);
1162 data += sizeof(unsigned int);
1163 copied += sizeof(unsigned int);
1167 case PTRACE_GETEVENTMSG:
1168 return put_user((__u32) child->ptrace_message,
1169 (unsigned int __user *) data);
1170 case PTRACE_GETSIGINFO:
1171 if (child->last_siginfo == NULL)
1173 return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
1174 child->last_siginfo);
1175 case PTRACE_SETSIGINFO:
1176 if (child->last_siginfo == NULL)
1178 return copy_siginfo_from_user32(child->last_siginfo,
1179 (compat_siginfo_t __user *) data);
1182 return ptrace_request(child, request, addr, data);
1186 #define PT32_IEEE_IP 0x13c
1189 do_ptrace(struct task_struct *child, long request, long addr, long data)
1193 if (request == PTRACE_ATTACH)
1194 return ptrace_attach(child);
1197 * Special cases to get/store the ieee instructions pointer.
1199 if (child == current) {
1200 if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
1201 return peek_user(child, addr, data);
1202 if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
1203 return poke_user(child, addr, data);
1204 #ifdef CONFIG_COMPAT
1205 if (request == PTRACE_PEEKUSR &&
1206 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
1207 return peek_user_emu31(child, addr, data);
1208 if (request == PTRACE_POKEUSR &&
1209 addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
1210 return poke_user_emu31(child, addr, data);
1214 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1219 case PTRACE_SYSCALL:
1220 /* continue and stop at next (return from) syscall */
1222 /* restart after signal. */
1223 if (!valid_signal(data))
1225 if (request == PTRACE_SYSCALL)
1226 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1228 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1229 child->exit_code = data;
1230 /* make sure the single step bit is not set. */
1231 tracehook_disable_single_step(child);
1232 wake_up_process(child);
1237 * make the child exit. Best I can do is send it a sigkill.
1238 * perhaps it should be put in the status that it wants to
1241 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
1243 child->exit_code = SIGKILL;
1244 /* make sure the single step bit is not set. */
1245 tracehook_disable_single_step(child);
1246 wake_up_process(child);
1249 case PTRACE_SINGLESTEP:
1250 /* set the trap flag. */
1251 if (!valid_signal(data))
1253 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1254 child->exit_code = data;
1256 set_tsk_thread_flag(child, TIF_SINGLE_STEP);
1258 tracehook_enable_single_step(child);
1259 /* give it a chance to run. */
1260 wake_up_process(child);
1264 /* detach a process that was attached. */
1265 return ptrace_detach(child, data);
1268 /* Do requests that differ for 31/64 bit */
1270 #ifdef CONFIG_COMPAT
1271 if (test_thread_flag(TIF_31BIT))
1272 return do_ptrace_emu31(child, request, addr, data);
1274 return do_ptrace_normal(child, request, addr, data);
1284 syscall_trace(struct pt_regs *regs, int entryexit)
1286 if (unlikely(current->audit_context) && entryexit)
1287 audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
1289 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1290 tracehook_report_syscall(regs, entryexit);
1293 * If the debugger has set an invalid system call number,
1294 * we prepare to skip the system call restart handling.
1296 if (!entryexit && regs->gprs[2] >= NR_syscalls)
1300 if (unlikely(current->audit_context) && !entryexit)
1301 audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
1302 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
1303 regs->gprs[4], regs->gprs[5]);