2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 * Derived from the x86 and Alpha versions.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/tracehook.h>
19 #include <linux/smp_lock.h>
20 #include <linux/user.h>
21 #include <linux/security.h>
22 #include <linux/audit.h>
23 #include <linux/signal.h>
24 #include <linux/module.h>
26 #include <asm/tracehook.h>
27 #include <asm/pgtable.h>
28 #include <asm/processor.h>
29 #include <asm/ptrace_offsets.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
34 #include <asm/unwind.h>
36 #include <asm/perfmon.h>
42 * Bits in the PSR that we allow ptrace() to change:
43 * be, up, ac, mfl, mfh (the user mask; five bits total)
44 * db (debug breakpoint fault; one bit)
45 * id (instruction debug fault disable; one bit)
46 * dd (data debug fault disable; one bit)
47 * ri (restart instruction; two bits)
48 * is (instruction set; one bit)
50 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
51 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
53 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
54 #define PFM_MASK MASK(38)
56 #define PTRACE_DEBUG 0
59 # define dprintk(format...) printk(format)
62 # define dprintk(format...)
65 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
68 in_syscall (struct pt_regs *pt)
70 return (long) pt->cr_ifs >= 0;
74 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
75 * bitset where bit i is set iff the NaT bit of register i is set.
78 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
80 # define GET_BITS(first, last, unat) \
82 unsigned long bit = ia64_unat_pos(&pt->r##first); \
83 unsigned long nbits = (last - first + 1); \
84 unsigned long mask = MASK(nbits) << first; \
87 dist = 64 + bit - first; \
90 ia64_rotr(unat, dist) & mask; \
95 * Registers that are stored consecutively in struct pt_regs
96 * can be handled in parallel. If the register order in
97 * struct_pt_regs changes, this code MUST be updated.
99 val = GET_BITS( 1, 1, scratch_unat);
100 val |= GET_BITS( 2, 3, scratch_unat);
101 val |= GET_BITS(12, 13, scratch_unat);
102 val |= GET_BITS(14, 14, scratch_unat);
103 val |= GET_BITS(15, 15, scratch_unat);
104 val |= GET_BITS( 8, 11, scratch_unat);
105 val |= GET_BITS(16, 31, scratch_unat);
112 * Set the NaT bits for the scratch registers according to NAT and
113 * return the resulting unat (assuming the scratch registers are
117 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
119 # define PUT_BITS(first, last, nat) \
121 unsigned long bit = ia64_unat_pos(&pt->r##first); \
122 unsigned long nbits = (last - first + 1); \
123 unsigned long mask = MASK(nbits) << first; \
126 dist = 64 + bit - first; \
128 dist = bit - first; \
129 ia64_rotl(nat & mask, dist); \
131 unsigned long scratch_unat;
134 * Registers that are stored consecutively in struct pt_regs
135 * can be handled in parallel. If the register order in
136 * struct_pt_regs changes, this code MUST be updated.
138 scratch_unat = PUT_BITS( 1, 1, nat);
139 scratch_unat |= PUT_BITS( 2, 3, nat);
140 scratch_unat |= PUT_BITS(12, 13, nat);
141 scratch_unat |= PUT_BITS(14, 14, nat);
142 scratch_unat |= PUT_BITS(15, 15, nat);
143 scratch_unat |= PUT_BITS( 8, 11, nat);
144 scratch_unat |= PUT_BITS(16, 31, nat);
151 #define IA64_MLX_TEMPLATE 0x2
152 #define IA64_MOVL_OPCODE 6
155 ia64_increment_ip (struct pt_regs *regs)
157 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
162 } else if (ri == 2) {
163 get_user(w0, (char __user *) regs->cr_iip + 0);
164 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
166 * rfi'ing to slot 2 of an MLX bundle causes
167 * an illegal operation fault. We don't want
174 ia64_psr(regs)->ri = ri;
178 ia64_decrement_ip (struct pt_regs *regs)
180 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
182 if (ia64_psr(regs)->ri == 0) {
185 get_user(w0, (char __user *) regs->cr_iip + 0);
186 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
188 * rfi'ing to slot 2 of an MLX bundle causes
189 * an illegal operation fault. We don't want
195 ia64_psr(regs)->ri = ri;
199 * This routine is used to read an rnat bits that are stored on the
200 * kernel backing store. Since, in general, the alignment of the user
201 * and kernel are different, this is not completely trivial. In
202 * essence, we need to construct the user RNAT based on up to two
203 * kernel RNAT values and/or the RNAT value saved in the child's
208 * +--------+ <-- lowest address
215 * | slot01 | > child_regs->ar_rnat
217 * | slot02 | / kernel rbs
218 * +--------+ +--------+
219 * <- child_regs->ar_bspstore | slot61 | <-- krbs
220 * +- - - - + +--------+
222 * +- - - - + +--------+
224 * +- - - - + +--------+
226 * +- - - - + +--------+
231 * | slot01 | > child_stack->ar_rnat
235 * <--- child_stack->ar_bspstore
237 * The way to think of this code is as follows: bit 0 in the user rnat
238 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
239 * value. The kernel rnat value holding this bit is stored in
240 * variable rnat0. rnat1 is loaded with the kernel rnat value that
241 * form the upper bits of the user rnat value.
245 * o when reading the rnat "below" the first rnat slot on the kernel
246 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
247 * merged in from pt->ar_rnat.
249 * o when reading the rnat "above" the last rnat slot on the kernel
250 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
253 get_rnat (struct task_struct *task, struct switch_stack *sw,
254 unsigned long *krbs, unsigned long *urnat_addr,
255 unsigned long *urbs_end)
257 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
258 unsigned long umask = 0, mask, m;
259 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
260 long num_regs, nbits;
263 pt = task_pt_regs(task);
264 kbsp = (unsigned long *) sw->ar_bspstore;
265 ubspstore = (unsigned long *) pt->ar_bspstore;
267 if (urbs_end < urnat_addr)
268 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
273 * First, figure out which bit number slot 0 in user-land maps
274 * to in the kernel rnat. Do this by figuring out how many
275 * register slots we're beyond the user's backingstore and
276 * then computing the equivalent address in kernel space.
278 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
279 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
280 shift = ia64_rse_slot_num(slot0_kaddr);
281 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
282 rnat0_kaddr = rnat1_kaddr - 64;
284 if (ubspstore + 63 > urnat_addr) {
285 /* some bits need to be merged in from pt->ar_rnat */
286 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
287 urnat = (pt->ar_rnat & umask);
294 if (rnat0_kaddr >= kbsp)
296 else if (rnat0_kaddr > krbs)
297 rnat0 = *rnat0_kaddr;
298 urnat |= (rnat0 & m) >> shift;
300 m = mask >> (63 - shift);
301 if (rnat1_kaddr >= kbsp)
303 else if (rnat1_kaddr > krbs)
304 rnat1 = *rnat1_kaddr;
305 urnat |= (rnat1 & m) << (63 - shift);
310 * The reverse of get_rnat.
313 put_rnat (struct task_struct *task, struct switch_stack *sw,
314 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
315 unsigned long *urbs_end)
317 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
318 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
319 long num_regs, nbits;
321 unsigned long cfm, *urbs_kargs;
323 pt = task_pt_regs(task);
324 kbsp = (unsigned long *) sw->ar_bspstore;
325 ubspstore = (unsigned long *) pt->ar_bspstore;
327 urbs_kargs = urbs_end;
328 if (in_syscall(pt)) {
330 * If entered via syscall, don't allow user to set rnat bits
334 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
337 if (urbs_kargs >= urnat_addr)
340 if ((urnat_addr - 63) >= urbs_kargs)
342 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
347 * First, figure out which bit number slot 0 in user-land maps
348 * to in the kernel rnat. Do this by figuring out how many
349 * register slots we're beyond the user's backingstore and
350 * then computing the equivalent address in kernel space.
352 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
353 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
354 shift = ia64_rse_slot_num(slot0_kaddr);
355 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
356 rnat0_kaddr = rnat1_kaddr - 64;
358 if (ubspstore + 63 > urnat_addr) {
359 /* some bits need to be place in pt->ar_rnat: */
360 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
361 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
367 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
368 * rnat slot is ignored. so we don't have to clear it here.
370 rnat0 = (urnat << shift);
372 if (rnat0_kaddr >= kbsp)
373 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
374 else if (rnat0_kaddr > krbs)
375 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
377 rnat1 = (urnat >> (63 - shift));
378 m = mask >> (63 - shift);
379 if (rnat1_kaddr >= kbsp)
380 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
381 else if (rnat1_kaddr > krbs)
382 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
386 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
387 unsigned long urbs_end)
389 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
391 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
395 * Read a word from the user-level backing store of task CHILD. ADDR
396 * is the user-level address to read the word from, VAL a pointer to
397 * the return value, and USER_BSP gives the end of the user-level
398 * backing store (i.e., it's the address that would be in ar.bsp after
399 * the user executed a "cover" instruction).
401 * This routine takes care of accessing the kernel register backing
402 * store for those registers that got spilled there. It also takes
403 * care of calculating the appropriate RNaT collection words.
406 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
407 unsigned long user_rbs_end, unsigned long addr, long *val)
409 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
410 struct pt_regs *child_regs;
414 urbs_end = (long *) user_rbs_end;
415 laddr = (unsigned long *) addr;
416 child_regs = task_pt_regs(child);
417 bspstore = (unsigned long *) child_regs->ar_bspstore;
418 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
419 if (on_kernel_rbs(addr, (unsigned long) bspstore,
420 (unsigned long) urbs_end))
423 * Attempt to read the RBS in an area that's actually
424 * on the kernel RBS => read the corresponding bits in
427 rnat_addr = ia64_rse_rnat_addr(laddr);
428 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
430 if (laddr == rnat_addr) {
431 /* return NaT collection word itself */
436 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
438 * It is implementation dependent whether the
439 * data portion of a NaT value gets saved on a
440 * st8.spill or RSE spill (e.g., see EAS 2.6,
441 * 4.4.4.6 Register Spill and Fill). To get
442 * consistent behavior across all possible
443 * IA-64 implementations, we return zero in
450 if (laddr < urbs_end) {
452 * The desired word is on the kernel RBS and
455 regnum = ia64_rse_num_regs(bspstore, laddr);
456 *val = *ia64_rse_skip_regs(krbs, regnum);
460 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
461 if (copied != sizeof(ret))
468 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
469 unsigned long user_rbs_end, unsigned long addr, long val)
471 unsigned long *bspstore, *krbs, regnum, *laddr;
472 unsigned long *urbs_end = (long *) user_rbs_end;
473 struct pt_regs *child_regs;
475 laddr = (unsigned long *) addr;
476 child_regs = task_pt_regs(child);
477 bspstore = (unsigned long *) child_regs->ar_bspstore;
478 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
479 if (on_kernel_rbs(addr, (unsigned long) bspstore,
480 (unsigned long) urbs_end))
483 * Attempt to write the RBS in an area that's actually
484 * on the kernel RBS => write the corresponding bits
487 if (ia64_rse_is_rnat_slot(laddr))
488 put_rnat(child, child_stack, krbs, laddr, val,
491 if (laddr < urbs_end) {
492 regnum = ia64_rse_num_regs(bspstore, laddr);
493 *ia64_rse_skip_regs(krbs, regnum) = val;
496 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
503 * Calculate the address of the end of the user-level register backing
504 * store. This is the address that would have been stored in ar.bsp
505 * if the user had executed a "cover" instruction right before
506 * entering the kernel. If CFMP is not NULL, it is used to return the
507 * "current frame mask" that was active at the time the kernel was
511 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
514 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
517 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
518 bspstore = (unsigned long *) pt->ar_bspstore;
519 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
522 ndirty += (cfm & 0x7f);
524 cfm &= ~(1UL << 63); /* clear valid bit */
528 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
532 * Synchronize (i.e, write) the RSE backing store living in kernel
533 * space to the VM of the CHILD task. SW and PT are the pointers to
534 * the switch_stack and pt_regs structures, respectively.
535 * USER_RBS_END is the user-level address at which the backing store
539 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
540 unsigned long user_rbs_start, unsigned long user_rbs_end)
542 unsigned long addr, val;
545 /* now copy word for word from kernel rbs to user rbs: */
546 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
547 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
550 if (access_process_vm(child, addr, &val, sizeof(val), 1)
558 * Write f32-f127 back to task->thread.fph if it has been modified.
561 ia64_flush_fph (struct task_struct *task)
563 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
566 * Prevent migrating this task while
567 * we're fiddling with the FPU state
570 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
572 task->thread.flags |= IA64_THREAD_FPH_VALID;
573 ia64_save_fpu(&task->thread.fph[0]);
579 * Sync the fph state of the task so that it can be manipulated
580 * through thread.fph. If necessary, f32-f127 are written back to
581 * thread.fph or, if the fph state hasn't been used before, thread.fph
582 * is cleared to zeroes. Also, access to f32-f127 is disabled to
583 * ensure that the task picks up the state from thread.fph when it
587 ia64_sync_fph (struct task_struct *task)
589 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
591 ia64_flush_fph(task);
592 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
593 task->thread.flags |= IA64_THREAD_FPH_VALID;
594 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
602 access_fr (struct unw_frame_info *info, int regnum, int hi,
603 unsigned long *data, int write_access)
605 struct ia64_fpreg fpval;
608 ret = unw_get_fr(info, regnum, &fpval);
613 fpval.u.bits[hi] = *data;
614 ret = unw_set_fr(info, regnum, fpval);
616 *data = fpval.u.bits[hi];
619 #endif /* access_fr() */
622 * Change the machine-state of CHILD such that it will return via the normal
623 * kernel exit-path, rather than the syscall-exit path.
626 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
629 struct unw_frame_info info, prev_info;
630 unsigned long ip, sp, pr;
632 unw_init_from_blocked_task(&info, child);
635 if (unw_unwind(&info) < 0)
638 unw_get_sp(&info, &sp);
639 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
640 < IA64_PT_REGS_SIZE) {
641 dprintk("ptrace.%s: ran off the top of the kernel "
642 "stack\n", __FUNCTION__);
645 if (unw_get_pr (&prev_info, &pr) < 0) {
646 unw_get_rp(&prev_info, &ip);
647 dprintk("ptrace.%s: failed to read "
648 "predicate register (ip=0x%lx)\n",
652 if (unw_is_intr_frame(&info)
653 && (pr & (1UL << PRED_USER_STACK)))
658 * Note: at the time of this call, the target task is blocked
659 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
660 * (aka, "pLvSys") we redirect execution from
661 * .work_pending_syscall_end to .work_processed_kernel.
663 unw_get_pr(&prev_info, &pr);
664 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
665 pr |= (1UL << PRED_NON_SYSCALL);
666 unw_set_pr(&prev_info, pr);
668 pt->cr_ifs = (1UL << 63) | cfm;
670 * Clear the memory that is NOT written on syscall-entry to
671 * ensure we do not leak kernel-state to user when execution
677 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
678 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
686 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
687 struct unw_frame_info *info,
688 unsigned long *data, int write_access)
690 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
695 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
696 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
697 dprintk("ptrace: failed to set ar.unat\n");
700 for (regnum = 4; regnum <= 7; ++regnum) {
701 unw_get_gr(info, regnum, &dummy, &nat);
702 unw_set_gr(info, regnum, dummy,
703 (nat_bits >> regnum) & 1);
706 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
707 dprintk("ptrace: failed to read ar.unat\n");
710 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
711 for (regnum = 4; regnum <= 7; ++regnum) {
712 unw_get_gr(info, regnum, &dummy, &nat);
713 nat_bits |= (nat != 0) << regnum;
721 /* "asmlinkage" so the input arguments are preserved... */
724 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
725 long arg4, long arg5, long arg6, long arg7,
728 if (test_thread_flag(TIF_SYSCALL_TRACE))
729 tracehook_report_syscall(®s, 0);
731 if (unlikely(current->audit_context)) {
735 if (IS_IA32_PROCESS(®s)) {
737 arch = AUDIT_ARCH_I386;
740 arch = AUDIT_ARCH_IA64;
743 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
748 /* "asmlinkage" so the input arguments are preserved... */
751 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
752 long arg4, long arg5, long arg6, long arg7,
755 if (unlikely(current->audit_context)) {
756 int success = AUDITSC_RESULT(regs.r10);
757 long result = regs.r8;
759 if (success != AUDITSC_SUCCESS)
761 audit_syscall_exit(success, result);
764 if (test_thread_flag(TIF_SYSCALL_TRACE))
765 tracehook_report_syscall(®s, 1);
771 /* Utrace implementation starts here */
773 typedef struct utrace_get {
778 typedef struct utrace_set {
780 const void __user *ubuf;
783 typedef struct utrace_getset {
784 struct task_struct *target;
785 const struct utrace_regset *regset;
796 access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
797 unsigned long addr, unsigned long *data, int write_access)
800 unsigned long *ptr = NULL;
804 pt = task_pt_regs(target);
806 case ELF_GR_OFFSET(1):
809 case ELF_GR_OFFSET(2):
810 case ELF_GR_OFFSET(3):
811 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
813 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
815 /* read NaT bit first: */
818 ret = unw_get_gr(info, addr/8, &dummy, &nat);
822 return unw_access_gr(info, addr/8, data, &nat, write_access);
823 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
824 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
826 case ELF_GR_OFFSET(12):
827 case ELF_GR_OFFSET(13):
828 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
830 case ELF_GR_OFFSET(14):
833 case ELF_GR_OFFSET(15):
844 access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
845 unsigned long addr, unsigned long *data, int write_access)
848 unsigned long *ptr = NULL;
850 pt = task_pt_regs(target);
852 case ELF_BR_OFFSET(0):
855 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
856 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
858 case ELF_BR_OFFSET(6):
861 case ELF_BR_OFFSET(7):
872 access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
873 unsigned long addr, unsigned long *data, int write_access)
876 unsigned long cfm, urbs_end, rnat_addr;
877 unsigned long *ptr = NULL;
879 pt = task_pt_regs(target);
880 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
882 case ELF_AR_RSC_OFFSET:
885 pt->ar_rsc = *data | (3 << 2);
889 case ELF_AR_BSP_OFFSET:
891 * By convention, we use PT_AR_BSP to refer to
892 * the end of the user-level backing store.
893 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
894 * to get the real value of ar.bsp at the time
895 * the kernel was entered.
897 * Furthermore, when changing the contents of
898 * PT_AR_BSP (or PT_CFM) we MUST copy any
899 * users-level stacked registers that are
900 * stored on the kernel stack back to
901 * user-space because otherwise, we might end
902 * up clobbering kernel stacked registers.
903 * Also, if this happens while the task is
904 * blocked in a system call, which convert the
905 * state such that the non-system-call exit
906 * path is used. This ensures that the proper
907 * state will be picked up when resuming
908 * execution. However, it *also* means that
909 * once we write PT_AR_BSP/PT_CFM, it won't be
910 * possible to modify the syscall arguments of
911 * the pending system call any longer. This
912 * shouldn't be an issue because modifying
913 * PT_AR_BSP/PT_CFM generally implies that
914 * we're either abandoning the pending system
915 * call or that we defer it's re-execution
916 * (e.g., due to GDB doing an inferior
919 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
921 if (*data != urbs_end) {
922 if (ia64_sync_user_rbs(target, info->sw,
927 convert_to_non_syscall(target,
931 * Simulate user-level write
935 pt->ar_bspstore = *data;
940 case ELF_AR_BSPSTORE_OFFSET: // ar_bsp_store
941 ptr = &pt->ar_bspstore;
943 case ELF_AR_RNAT_OFFSET: // ar_rnat
944 urbs_end = ia64_get_user_rbs_end(target, pt, NULL);
945 rnat_addr = (long) ia64_rse_rnat_addr((long *)
948 return ia64_poke(target, info->sw, urbs_end,
951 return ia64_peek(target, info->sw, urbs_end,
953 case ELF_AR_CCV_OFFSET: // ar_ccv
956 case ELF_AR_UNAT_OFFSET: // ar_unat
959 case ELF_AR_FPSR_OFFSET: // ar_fpsr
962 case ELF_AR_PFS_OFFSET: // ar_pfs
965 case ELF_AR_LC_OFFSET: // ar_lc
966 return unw_access_ar(info, UNW_AR_LC, data,
968 case ELF_AR_EC_OFFSET: // ar_ec
969 return unw_access_ar(info, UNW_AR_EC, data,
971 case ELF_AR_CSD_OFFSET: // ar_csd
974 case ELF_AR_SSD_OFFSET: // ar_ssd
977 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
979 case ELF_CR_IIP_OFFSET:
983 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
985 if (((cfm ^ *data) & PFM_MASK) != 0) {
986 if (ia64_sync_user_rbs(target, info->sw,
991 convert_to_non_syscall(target,
994 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
995 | (*data & PFM_MASK));
1000 case ELF_CR_IPSR_OFFSET:
1002 pt->cr_ipsr = ((*data & IPSR_MASK)
1003 | (pt->cr_ipsr & ~IPSR_MASK));
1005 *data = (pt->cr_ipsr & IPSR_MASK);
1008 } else if (addr == ELF_NAT_OFFSET)
1009 return access_nat_bits(target, pt, info,
1010 data, write_access);
1011 else if (addr == ELF_PR_OFFSET)
1025 access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1026 unsigned long addr, unsigned long *data, int write_access)
1028 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1029 return access_elf_gpreg(target, info, addr, data, write_access);
1030 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1031 return access_elf_breg(target, info, addr, data, write_access);
1033 return access_elf_areg(target, info, addr, data, write_access);
1036 void do_gpregs_get(struct unw_frame_info *info, void *arg)
1039 utrace_getset_t *dst = arg;
1041 unsigned int i, index, min_copy;
1043 if (unw_unwind_to_user(info) < 0)
1049 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1050 * predicate registers (p0-p63)
1053 * ar.rsc ar.bsp ar.bspstore ar.rnat
1054 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1059 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1060 dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
1063 0, ELF_GR_OFFSET(1));
1064 if (dst->ret || dst->count == 0)
1069 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1070 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1071 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1072 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1073 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
1074 if (access_elf_reg(dst->target, info, i,
1075 &tmp[index], 0) < 0) {
1079 dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
1080 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1081 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1082 if (dst->ret || dst->count == 0)
1087 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1088 pt = task_pt_regs(dst->target);
1089 dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
1090 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1091 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1092 if (dst->ret || dst->count == 0)
1096 /* nat, pr, b0 - b7 */
1097 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1098 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1099 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1100 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1101 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
1102 if (access_elf_reg(dst->target, info, i,
1103 &tmp[index], 0) < 0) {
1107 dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
1108 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1109 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1110 if (dst->ret || dst->count == 0)
1114 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1115 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1117 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1118 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1119 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1120 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1121 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t), index++)
1122 if (access_elf_reg(dst->target, info, i,
1123 &tmp[index], 0) < 0) {
1127 dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
1128 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1129 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1133 void do_gpregs_set(struct unw_frame_info *info, void *arg)
1136 utrace_getset_t *dst = arg;
1138 unsigned int i, index;
1140 if (unw_unwind_to_user(info) < 0)
1144 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1145 dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
1148 0, ELF_GR_OFFSET(1));
1149 if (dst->ret || dst->count == 0)
1154 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1156 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1157 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1158 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1159 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1162 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1163 if (access_elf_reg(dst->target, info, i,
1164 &tmp[index], 1) < 0) {
1168 if (dst->count == 0)
1173 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1174 pt = task_pt_regs(dst->target);
1175 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1176 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1177 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1178 if (dst->ret || dst->count == 0)
1182 /* nat, pr, b0 - b7 */
1183 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1185 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1186 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1187 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1188 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1191 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1192 if (access_elf_reg(dst->target, info, i,
1193 &tmp[index], 1) < 0) {
1197 if (dst->count == 0)
1201 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1202 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1204 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1206 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1207 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1208 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1209 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1212 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1213 if (access_elf_reg(dst->target, info, i,
1214 &tmp[index], 1) < 0) {
1221 #define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1223 void do_fpregs_get(struct unw_frame_info *info, void *arg)
1225 utrace_getset_t *dst = arg;
1226 struct task_struct *task = dst->target;
1227 elf_fpreg_t tmp[30];
1228 int index, min_copy, i;
1230 if (unw_unwind_to_user(info) < 0)
1233 /* Skip pos 0 and 1 */
1234 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1235 dst->ret = utrace_regset_copyout_zero(&dst->pos, &dst->count,
1238 0, ELF_FP_OFFSET(2));
1239 if (dst->count == 0 || dst->ret)
1244 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1245 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1246 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1247 dst->pos + dst->count);
1248 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t), index++)
1249 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1254 dst->ret = utrace_regset_copyout(&dst->pos, &dst->count,
1255 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1256 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1257 if (dst->count == 0 || dst->ret)
1262 if (dst->count > 0) {
1263 ia64_flush_fph(dst->target);
1264 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1265 dst->ret = utrace_regset_copyout(
1266 &dst->pos, &dst->count,
1267 &dst->u.get.kbuf, &dst->u.get.ubuf,
1268 &dst->target->thread.fph,
1269 ELF_FP_OFFSET(32), -1);
1271 /* Zero fill instead. */
1272 dst->ret = utrace_regset_copyout_zero(
1273 &dst->pos, &dst->count,
1274 &dst->u.get.kbuf, &dst->u.get.ubuf,
1275 ELF_FP_OFFSET(32), -1);
1279 void do_fpregs_set(struct unw_frame_info *info, void *arg)
1281 utrace_getset_t *dst = arg;
1282 elf_fpreg_t fpreg, tmp[30];
1283 int index, start, end;
1285 if (unw_unwind_to_user(info) < 0)
1288 /* Skip pos 0 and 1 */
1289 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1290 dst->ret = utrace_regset_copyin_ignore(&dst->pos, &dst->count,
1293 0, ELF_FP_OFFSET(2));
1294 if (dst->count == 0 || dst->ret)
1299 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1301 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1302 dst->pos + dst->count);
1303 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1304 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1305 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1309 if (start & 0xF) { // only write high part
1310 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1315 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1319 if (end & 0xF) { // only write low part
1320 if (unw_get_fr(info, end / sizeof(elf_fpreg_t), &fpreg)) {
1324 tmp[end / sizeof(elf_fpreg_t) -2].u.bits[1]
1326 end = (end + 0xF) & ~0xFUL;
1329 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1330 index = start / sizeof(elf_fpreg_t);
1331 if (unw_set_fr(info, index, tmp[index - 2])){
1336 if (dst->ret || dst->count == 0)
1341 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1342 ia64_sync_fph(dst->target);
1343 dst->ret = utrace_regset_copyin(&dst->pos, &dst->count,
1346 &dst->target->thread.fph,
1347 ELF_FP_OFFSET(32), -1);
1352 do_regset_call(void (*call)(struct unw_frame_info *, void *),
1353 struct task_struct *target,
1354 const struct utrace_regset *regset,
1355 unsigned int pos, unsigned int count,
1356 const void *kbuf, const void __user *ubuf)
1358 utrace_getset_t info = { .target = target, .regset = regset,
1359 .pos = pos, .count = count,
1360 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1363 if (target == current)
1364 unw_init_running(call, &info);
1366 struct unw_frame_info ufi;
1367 memset(&ufi, 0, sizeof(ufi));
1368 unw_init_from_blocked_task(&ufi, target);
1369 (*call)(&ufi, &info);
1376 gpregs_get(struct task_struct *target,
1377 const struct utrace_regset *regset,
1378 unsigned int pos, unsigned int count,
1379 void *kbuf, void __user *ubuf)
1381 return do_regset_call(do_gpregs_get, target, regset, pos, count, kbuf, ubuf);
1384 static int gpregs_set(struct task_struct *target,
1385 const struct utrace_regset *regset,
1386 unsigned int pos, unsigned int count,
1387 const void *kbuf, const void __user *ubuf)
1389 return do_regset_call(do_gpregs_set, target, regset, pos, count, kbuf, ubuf);
1393 * This is called to write back the register backing store.
1394 * ptrace does this before it stops, so that a tracer reading the user
1395 * memory after the thread stops will get the current register data.
1398 gpregs_writeback(struct task_struct *target,
1399 const struct utrace_regset *regset,
1402 unsigned long urbs_end, cfm;
1403 struct pt_regs *pt = task_pt_regs(target);
1404 struct switch_stack *sw = (void *) (target->thread.ksp + 16);
1405 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1406 return ia64_sync_user_rbs(target, sw, pt->ar_bspstore, urbs_end);
1410 fpregs_active(struct task_struct *target, const struct utrace_regset *regset)
1412 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1415 static int fpregs_get(struct task_struct *target,
1416 const struct utrace_regset *regset,
1417 unsigned int pos, unsigned int count,
1418 void *kbuf, void __user *ubuf)
1420 return do_regset_call(do_fpregs_get, target, regset, pos, count, kbuf, ubuf);
1423 static int fpregs_set(struct task_struct *target,
1424 const struct utrace_regset *regset,
1425 unsigned int pos, unsigned int count,
1426 const void *kbuf, const void __user *ubuf)
1428 return do_regset_call(do_fpregs_set, target, regset, pos, count, kbuf, ubuf);
1431 static int dbregs_get(struct task_struct *target,
1432 const struct utrace_regset *regset,
1433 unsigned int pos, unsigned int count,
1434 void *kbuf, void __user *ubuf)
1438 #ifdef CONFIG_PERFMON
1440 * Check if debug registers are used by perfmon. This
1441 * test must be done once we know that we can do the
1442 * operation, i.e. the arguments are all valid, but
1443 * before we start modifying the state.
1445 * Perfmon needs to keep a count of how many processes
1446 * are trying to modify the debug registers for system
1447 * wide monitoring sessions.
1449 * We also include read access here, because they may
1450 * cause the PMU-installed debug register state
1451 * (dbr[], ibr[]) to be reset. The two arrays are also
1452 * used by perfmon, but we do not use
1453 * IA64_THREAD_DBG_VALID. The registers are restored
1454 * by the PMU context switch code.
1456 if (pfm_use_debug_registers(target))
1460 if (!(target->thread.flags & IA64_THREAD_DBG_VALID))
1461 ret = utrace_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
1465 if (target == current)
1466 ia64_load_debug_regs(&target->thread.dbr[0]);
1467 preempt_enable_no_resched();
1468 ret = utrace_regset_copyout(&pos, &count, &kbuf, &ubuf,
1469 &target->thread.dbr, 0, -1);
1475 static int dbregs_set(struct task_struct *target,
1476 const struct utrace_regset *regset,
1477 unsigned int pos, unsigned int count,
1478 const void *kbuf, const void __user *ubuf)
1482 #ifdef CONFIG_PERFMON
1483 if (pfm_use_debug_registers(target))
1488 if (!(target->thread.flags & IA64_THREAD_DBG_VALID)){
1489 target->thread.flags |= IA64_THREAD_DBG_VALID;
1490 memset(target->thread.dbr, 0, 2 * sizeof(target->thread.dbr));
1491 } else if (target == current){
1493 ia64_save_debug_regs(&target->thread.dbr[0]);
1494 preempt_enable_no_resched();
1497 ret = utrace_regset_copyin(&pos, &count, &kbuf, &ubuf,
1498 &target->thread.dbr, 0, -1);
1500 for (i = 1; i < IA64_NUM_DBG_REGS; i += 2) {
1501 target->thread.dbr[i] &= ~(7UL << 56);
1502 target->thread.ibr[i] &= ~(7UL << 56);
1508 if (target == current){
1510 ia64_load_debug_regs(&target->thread.dbr[0]);
1511 preempt_enable_no_resched();
1516 static const struct utrace_regset native_regsets[] = {
1519 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
1520 .get = gpregs_get, .set = gpregs_set,
1521 .writeback = gpregs_writeback
1525 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
1526 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
1529 .n = 2 * IA64_NUM_DBG_REGS, .size = sizeof(long),
1530 .align = sizeof(long),
1531 .get = dbregs_get, .set = dbregs_set
1535 const struct utrace_regset_view utrace_ia64_native = {
1537 .e_machine = EM_IA_64,
1538 .regsets = native_regsets,
1539 .n = sizeof native_regsets / sizeof native_regsets[0],
1541 EXPORT_SYMBOL_GPL(utrace_ia64_native);
1543 #endif /* CONFIG_UTRACE */
1546 #ifdef CONFIG_PTRACE
1548 #define WORD(member, num) \
1549 offsetof(struct pt_all_user_regs, member), \
1550 offsetof(struct pt_all_user_regs, member) + num * sizeof(long)
1551 static const struct ptrace_layout_segment pt_all_user_regs_layout[] = {
1552 {WORD(nat, 1), 0, ELF_NAT_OFFSET},
1553 {WORD(cr_iip, 1), 0, ELF_CR_IIP_OFFSET},
1554 {WORD(cfm, 1), 0, ELF_CFM_OFFSET},
1555 {WORD(cr_ipsr, 1), 0, ELF_CR_IPSR_OFFSET},
1556 {WORD(pr, 1), 0, ELF_PR_OFFSET},
1557 {WORD(gr[0], 32), 0, ELF_GR_OFFSET(0)},
1558 {WORD(br[0], 8), 0, ELF_BR_OFFSET(0)},
1559 {WORD(ar[PT_AUR_RSC], 4), 0, ELF_AR_RSC_OFFSET},
1560 {WORD(ar[PT_AUR_CCV], 1), 0, ELF_AR_CCV_OFFSET},
1561 {WORD(ar[PT_AUR_UNAT], 1), 0, ELF_AR_UNAT_OFFSET},
1562 {WORD(ar[PT_AUR_FPSR], 1), 0, ELF_AR_FPSR_OFFSET},
1563 {WORD(ar[PT_AUR_PFS], 3), 0, ELF_AR_PFS_OFFSET},
1564 {offsetof(struct pt_all_user_regs, fr[0]),
1565 offsetof(struct pt_all_user_regs, fr[128]),
1571 #define NEXT(addr, sum) (addr + sum * sizeof(long))
1572 static const struct ptrace_layout_segment pt_uarea_layout[] = {
1573 {PT_F32, PT_NAT_BITS, 1, ELF_FP_OFFSET(32)},
1574 {PT_NAT_BITS, NEXT(PT_NAT_BITS, 1), 0, ELF_NAT_OFFSET},
1575 {PT_F2, PT_F10, 1, ELF_FP_OFFSET(2)},
1576 {PT_F10, PT_R4, 1, ELF_FP_OFFSET(10)},
1577 {PT_R4, PT_B1, 0, ELF_GR_OFFSET(4)},
1578 {PT_B1, PT_AR_EC, 0, ELF_BR_OFFSET(1)},
1579 {PT_AR_EC, PT_AR_LC, 0, ELF_AR_EC_OFFSET},
1580 {PT_AR_LC, NEXT(PT_AR_LC, 1), 0, ELF_AR_LC_OFFSET},
1581 {PT_CR_IPSR, PT_CR_IIP, 0, ELF_CR_IPSR_OFFSET},
1582 {PT_CR_IIP, PT_AR_UNAT, 0, ELF_CR_IIP_OFFSET},
1583 {PT_AR_UNAT, PT_AR_PFS, 0, ELF_AR_UNAT_OFFSET},
1584 {PT_AR_PFS, PT_AR_RSC, 0, ELF_AR_PFS_OFFSET},
1585 {PT_AR_RSC, PT_AR_RNAT, 0, ELF_AR_RSC_OFFSET},
1586 {PT_AR_RNAT, PT_AR_BSPSTORE, 0, ELF_AR_RNAT_OFFSET},
1587 {PT_AR_BSPSTORE,PT_PR, 0, ELF_AR_BSPSTORE_OFFSET},
1588 {PT_PR, PT_B6, 0, ELF_PR_OFFSET},
1589 {PT_B6, PT_AR_BSP, 0, ELF_BR_OFFSET(6)},
1590 {PT_AR_BSP, PT_R1, 0, ELF_AR_BSP_OFFSET},
1591 {PT_R1, PT_R12, 0, ELF_GR_OFFSET(1)},
1592 {PT_R12, PT_R8, 0, ELF_GR_OFFSET(12)},
1593 {PT_R8, PT_R16, 0, ELF_GR_OFFSET(8)},
1594 {PT_R16, PT_AR_CCV, 0, ELF_GR_OFFSET(16)},
1595 {PT_AR_CCV, PT_AR_FPSR, 0, ELF_AR_CCV_OFFSET},
1596 {PT_AR_FPSR, PT_B0, 0, ELF_AR_FPSR_OFFSET},
1597 {PT_B0, PT_B7, 0, ELF_BR_OFFSET(0)},
1598 {PT_B7, PT_F6, 0, ELF_BR_OFFSET(7)},
1599 {PT_F6, PT_AR_CSD, 1, ELF_FP_OFFSET(6)},
1600 {PT_AR_CSD, NEXT(PT_AR_CSD, 2), 0, ELF_AR_CSD_OFFSET},
1601 {PT_DBR, NEXT(PT_DBR, 8), 2, 0},
1602 {PT_IBR, NEXT(PT_IBR, 8), 2, 8 * sizeof(long)},
1607 fastcall int arch_ptrace(long *request, struct task_struct *child,
1608 struct utrace_attached_engine *engine,
1609 unsigned long addr, unsigned long data, long *val)
1613 case PTRACE_OLD_GETSIGINFO:
1614 *request = PTRACE_GETSIGINFO;
1616 case PTRACE_OLD_SETSIGINFO:
1617 *request = PTRACE_SETSIGINFO;
1620 case PTRACE_PEEKTEXT: /* read word at location addr. */
1621 case PTRACE_PEEKDATA:
1622 ret = access_process_vm(child, addr, val, sizeof(*val), 0);
1623 ret = ret == sizeof(*val) ? 0 : -EIO;
1626 case PTRACE_PEEKUSR:
1627 return ptrace_layout_access(child, engine,
1628 utrace_native_view(current),
1632 case PTRACE_POKEUSR:
1633 return ptrace_pokeusr(child, engine,
1634 pt_uarea_layout, addr, data);
1636 case PTRACE_GETREGS:
1637 case PTRACE_SETREGS:
1638 return ptrace_layout_access(child, engine,
1639 utrace_native_view(current),
1640 pt_all_user_regs_layout,
1641 0, sizeof(struct pt_all_user_regs),
1642 (void __user *) data, NULL,
1643 *request == PTRACE_SETREGS);
1648 #endif /* CONFIG_PTRACE */