+static int
+s390_genregs_set(struct task_struct *target,
+ const struct utrace_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret = 0;
+
+ /* Check for an invalid PSW mask. */
+ if (count > 0 && pos == PT_PSWMASK / 2) {
+ u32 pswmask;
+ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &pswmask, PT_PSWMASK / 2,
+ PT_PSWADDR / 2);
+ if (ret)
+ return ret;
+
+ if (pswmask != PSW_MASK_MERGE(PSW_USER32_BITS, pswmask))
+ /* Invalid psw mask. */
+ return -EINVAL;
+
+ /* Build a 64 bit psw mask from 31 bit mask. */
+ regs->psw.mask = PSW_MASK_MERGE(PSW_USER32_BITS,
+ (u64) pswmask << 32);
+ FixPerRegisters(target);
+ }
+
+ /* Build a 64 bit psw address from 31 bit address. */
+ if (count > 0 && pos == PT_PSWADDR / 2) {
+ u32 pswaddr;
+ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &pswaddr, PT_PSWADDR / 2,
+ PT_GPR0 / 2);
+ if (ret == 0)
+ /* Build a 64 bit psw mask from 31 bit mask. */
+ regs->psw.addr = pswaddr & PSW32_ADDR_INSN;
+ }
+
+ /* The GPRs are directly onto the stack. */
+ while (ret == 0 && count > 0 && pos < PT_ACR0 / 2) {
+ u32 value;
+
+ if (kbuf) {
+ value = *(const u32 *) kbuf;
+ kbuf += sizeof(u32);
+ }
+ else if (get_user(value, (const u32 __user *) ubuf))
+ return -EFAULT;
+ else
+ ubuf += sizeof(u32);
+ pos += sizeof(u32);
+ count -= sizeof(u32);
+
+ regs->gprs[(pos - PT_GPR0 / 2) / sizeof(u32)] = value;
+ }
+
+ /* The ACRs are kept in the thread_struct. */
+ if (count > 0 && pos < PT_ORIGGPR2 / 2) {
+ if (target == current
+ && (pos != PT_ACR0 / 2
+ || count < sizeof(target->thread.acrs)))
+ save_access_regs(target->thread.acrs);
+
+ ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.acrs,
+ PT_ACR0 / 2,
+ PT_ACR0 / 2 + NUM_ACRS * ACR_SIZE);
+
+ if (ret == 0 && target == current)
+ restore_access_regs(target->thread.acrs);
+ }
+
+ /* Finally, the ORIG_GPR2 value. */
+ if (ret == 0 && count > 0) {
+ u32 value;
+ if (kbuf)
+ value = *(const u32 *) kbuf;
+ else if (get_user(value, (const u32 __user *) ubuf))
+ return -EFAULT;
+ regs->orig_gpr2 = value;
+ }
+
+ return ret;
+}
+
+
+/*
+ * This is magic. See per_struct and per_struct32.
+ * By incident the offsets in per_struct are exactly
+ * twice the offsets in per_struct32 for all fields.
+ * The 8 byte fields need special handling though,
+ * because the second half (bytes 4-7) is needed and
+ * not the first half.
+ */
+static unsigned int
+offset_from_per32(unsigned int offset)
+{
+ BUILD_BUG_ON(offsetof(per_struct32, control_regs) != 0);
+ if (offset - offsetof(per_struct32, control_regs) < 3*sizeof(u32)
+ || (offset >= offsetof(per_struct32, starting_addr) &&
+ offset <= offsetof(per_struct32, ending_addr))
+ || offset == offsetof(per_struct32, lowcore.words.address))
+ offset = offset*2 + 4;
+ else
+ offset = offset*2;
+ return offset;
+}
+
+static int
+s390_per_info_get(struct task_struct *target,
+ const struct utrace_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ while (count > 0) {
+ u32 val = *(u32 *) ((char *) &target->thread.per_info
+ + offset_from_per32 (pos));
+ if (kbuf) {
+ *(u32 *) kbuf = val;
+ kbuf += sizeof(u32);
+ }
+ else if (put_user(val, (u32 __user *) ubuf))
+ return -EFAULT;
+ else
+ ubuf += sizeof(u32);
+ pos += sizeof(u32);
+ count -= sizeof(u32);
+ }
+ return 0;
+}
+
+static int
+s390_per_info_set(struct task_struct *target,
+ const struct utrace_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ while (count > 0) {
+ u32 val;
+
+ if (kbuf) {
+ val = *(const u32 *) kbuf;
+ kbuf += sizeof(u32);
+ }
+ else if (get_user(val, (const u32 __user *) ubuf))
+ return -EFAULT;
+ else
+ ubuf += sizeof(u32);
+ pos += sizeof(u32);
+ count -= sizeof(u32);
+
+ *(u32 *) ((char *) &target->thread.per_info
+ + offset_from_per32 (pos)) = val;
+ }
+ return 0;
+}
+
+
+static const struct utrace_regset s390_compat_regsets[] = {
+ {
+ .size = sizeof(u32), .align = sizeof(u32),
+ .n = sizeof(s390_regs) / sizeof(long),
+ .get = s390_genregs_get, .set = s390_genregs_set
+ },
+ {
+ .size = sizeof(u32), .align = sizeof(u32),
+ .n = sizeof(s390_fp_regs) / sizeof(u32),
+ .get = fpregs_get, .set = fpregs_set
+ },
+ {
+ .size = sizeof(u32), .align = sizeof(u32),
+ .n = sizeof(per_struct) / sizeof(u32),
+ .get = s390_per_info_get, .set = s390_per_info_set
+ },
+};
+
+const struct utrace_regset_view utrace_s390_compat_view = {
+ .name = "s390", .e_machine = EM_S390,
+ .regsets = s390_compat_regsets,
+ .n = sizeof s390_compat_regsets / sizeof s390_compat_regsets[0],
+};
+EXPORT_SYMBOL_GPL(utrace_s390_compat_view);
+#endif /* CONFIG_COMPAT */
+
+
+#ifdef CONFIG_PTRACE
+static const struct ptrace_layout_segment s390_uarea[] = {
+ {PT_PSWMASK, PT_FPC, 0, 0},
+ {PT_FPC, PT_CR_9, 1, 0},
+ {PT_CR_9, PT_IEEE_IP, 2, 0},
+ {PT_IEEE_IP, sizeof(struct user), -1, -1},
+ {0, 0, -1, 0}
+};
+
+fastcall int arch_ptrace(long *request, struct task_struct *child,
+ struct utrace_attached_engine *engine,
+ unsigned long addr, unsigned long data, long *val)
+{
+ ptrace_area parea;
+ unsigned long tmp;
+ int copied;
+
+ switch (*request) {
+ case PTRACE_PEEKUSR:
+ return ptrace_peekusr(child, engine, s390_uarea, addr, data);
+ case PTRACE_POKEUSR:
+ return ptrace_pokeusr(child, engine, s390_uarea, addr, data);
+
+ case PTRACE_PEEKUSR_AREA:
+ case PTRACE_POKEUSR_AREA:
+ if (copy_from_user(&parea, (ptrace_area __user *) addr,
+ sizeof(parea)))
+ return -EFAULT;
+ if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
+ return -EIO;
+ return ptrace_layout_access(child, engine,
+ utrace_native_view(current),
+ s390_uarea,
+ parea.kernel_addr, parea.len,
+ (void __user *) parea.process_addr,
+ NULL,
+ *request == PTRACE_POKEUSR_AREA);
+
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA:
+ /* Remove high order bit from address (only for 31 bit). */
+ addr &= PSW_ADDR_INSN;
+ /* read word at location addr. */
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ return -EIO;
+ return put_user(tmp, (unsigned long __user *) data);
+
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA:
+ /* Remove high order bit from address (only for 31 bit). */
+ addr &= PSW_ADDR_INSN;
+ /* write the word at location addr. */
+ copied = access_process_vm(child, addr, &data, sizeof(data),1);
+ if (copied != sizeof(data))
+ return -EIO;
+ return 0;
+ }
+
+ return -ENOSYS;
+}
+
+#ifdef CONFIG_COMPAT
+static const struct ptrace_layout_segment s390_compat_uarea[] = {
+ {PT_PSWMASK / 2, PT_FPC / 2, 0, 0},
+ {PT_FPC / 2, PT_CR_9 / 2, 1, 0},
+ {PT_CR_9 / 2, PT_IEEE_IP / 2, 2, 0},
+ {PT_IEEE_IP / 2, sizeof(struct user32), -1, -1},
+ {0, 0, -1, 0}
+};
+
+fastcall int arch_compat_ptrace(compat_long_t *request,
+ struct task_struct *child,
+ struct utrace_attached_engine *engine,
+ compat_ulong_t addr, compat_ulong_t data,
+ compat_long_t *val)
+{
+ ptrace_area_emu31 parea;
+
+ switch (*request) {
+ case PTRACE_PEEKUSR:
+ return ptrace_compat_peekusr(child, engine, s390_compat_uarea,
+ addr, data);
+ case PTRACE_POKEUSR:
+ return ptrace_compat_pokeusr(child, engine, s390_compat_uarea,
+ addr, data);
+ case PTRACE_PEEKUSR_AREA:
+ case PTRACE_POKEUSR_AREA:
+ if (copy_from_user(&parea, ((ptrace_area_emu31 __user *)
+ (unsigned long) addr),
+ sizeof(parea)))
+ return -EFAULT;
+ if ((parea.kernel_addr | parea.len) & (sizeof(data) - 1))
+ return -EIO;
+ return ptrace_layout_access(child, engine,
+ utrace_native_view(current),
+ s390_compat_uarea,
+ parea.kernel_addr, parea.len,
+ (void __user *)
+ (unsigned long) parea.process_addr,
+ NULL,
+ *request == PTRACE_POKEUSR_AREA);
+ }
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_PTRACE */
+
+
+#if 0 /* XXX */
+
+#ifndef CONFIG_64BIT