2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Derived from the x86 and Alpha versions.
9 #include <linux/config.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/ptrace.h>
16 #include <linux/smp_lock.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/signal.h>
21 #include <linux/vs_cvirt.h>
23 #include <asm/pgtable.h>
24 #include <asm/processor.h>
25 #include <asm/ptrace_offsets.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/unwind.h>
31 #include <asm/perfmon.h>
37 * Bits in the PSR that we allow ptrace() to change:
38 * be, up, ac, mfl, mfh (the user mask; five bits total)
39 * db (debug breakpoint fault; one bit)
40 * id (instruction debug fault disable; one bit)
41 * dd (data debug fault disable; one bit)
42 * ri (restart instruction; two bits)
43 * is (instruction set; one bit)
45 #define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
46 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
48 #define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
49 #define PFM_MASK MASK(38)
51 #define PTRACE_DEBUG 0
54 # define dprintk(format...) printk(format)
57 # define dprintk(format...)
60 /* Return TRUE if PT was created due to kernel-entry via a system-call. */
63 in_syscall (struct pt_regs *pt)
65 return (long) pt->cr_ifs >= 0;
69 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
70 * bitset where bit i is set iff the NaT bit of register i is set.
73 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
75 # define GET_BITS(first, last, unat) \
77 unsigned long bit = ia64_unat_pos(&pt->r##first); \
78 unsigned long nbits = (last - first + 1); \
79 unsigned long mask = MASK(nbits) << first; \
82 dist = 64 + bit - first; \
85 ia64_rotr(unat, dist) & mask; \
90 * Registers that are stored consecutively in struct pt_regs
91 * can be handled in parallel. If the register order in
92 * struct_pt_regs changes, this code MUST be updated.
94 val = GET_BITS( 1, 1, scratch_unat);
95 val |= GET_BITS( 2, 3, scratch_unat);
96 val |= GET_BITS(12, 13, scratch_unat);
97 val |= GET_BITS(14, 14, scratch_unat);
98 val |= GET_BITS(15, 15, scratch_unat);
99 val |= GET_BITS( 8, 11, scratch_unat);
100 val |= GET_BITS(16, 31, scratch_unat);
107 * Set the NaT bits for the scratch registers according to NAT and
108 * return the resulting unat (assuming the scratch registers are
112 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
114 # define PUT_BITS(first, last, nat) \
116 unsigned long bit = ia64_unat_pos(&pt->r##first); \
117 unsigned long nbits = (last - first + 1); \
118 unsigned long mask = MASK(nbits) << first; \
121 dist = 64 + bit - first; \
123 dist = bit - first; \
124 ia64_rotl(nat & mask, dist); \
126 unsigned long scratch_unat;
129 * Registers that are stored consecutively in struct pt_regs
130 * can be handled in parallel. If the register order in
131 * struct_pt_regs changes, this code MUST be updated.
133 scratch_unat = PUT_BITS( 1, 1, nat);
134 scratch_unat |= PUT_BITS( 2, 3, nat);
135 scratch_unat |= PUT_BITS(12, 13, nat);
136 scratch_unat |= PUT_BITS(14, 14, nat);
137 scratch_unat |= PUT_BITS(15, 15, nat);
138 scratch_unat |= PUT_BITS( 8, 11, nat);
139 scratch_unat |= PUT_BITS(16, 31, nat);
146 #define IA64_MLX_TEMPLATE 0x2
147 #define IA64_MOVL_OPCODE 6
150 ia64_increment_ip (struct pt_regs *regs)
152 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
157 } else if (ri == 2) {
158 get_user(w0, (char __user *) regs->cr_iip + 0);
159 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
161 * rfi'ing to slot 2 of an MLX bundle causes
162 * an illegal operation fault. We don't want
169 ia64_psr(regs)->ri = ri;
173 ia64_decrement_ip (struct pt_regs *regs)
175 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
177 if (ia64_psr(regs)->ri == 0) {
180 get_user(w0, (char __user *) regs->cr_iip + 0);
181 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
183 * rfi'ing to slot 2 of an MLX bundle causes
184 * an illegal operation fault. We don't want
190 ia64_psr(regs)->ri = ri;
194 * This routine is used to read an rnat bits that are stored on the
195 * kernel backing store. Since, in general, the alignment of the user
196 * and kernel are different, this is not completely trivial. In
197 * essence, we need to construct the user RNAT based on up to two
198 * kernel RNAT values and/or the RNAT value saved in the child's
203 * +--------+ <-- lowest address
210 * | slot01 | > child_regs->ar_rnat
212 * | slot02 | / kernel rbs
213 * +--------+ +--------+
214 * <- child_regs->ar_bspstore | slot61 | <-- krbs
215 * +- - - - + +--------+
217 * +- - - - + +--------+
219 * +- - - - + +--------+
221 * +- - - - + +--------+
226 * | slot01 | > child_stack->ar_rnat
230 * <--- child_stack->ar_bspstore
232 * The way to think of this code is as follows: bit 0 in the user rnat
233 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
234 * value. The kernel rnat value holding this bit is stored in
235 * variable rnat0. rnat1 is loaded with the kernel rnat value that
236 * form the upper bits of the user rnat value.
240 * o when reading the rnat "below" the first rnat slot on the kernel
241 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
242 * merged in from pt->ar_rnat.
244 * o when reading the rnat "above" the last rnat slot on the kernel
245 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
248 get_rnat (struct task_struct *task, struct switch_stack *sw,
249 unsigned long *krbs, unsigned long *urnat_addr,
250 unsigned long *urbs_end)
252 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
253 unsigned long umask = 0, mask, m;
254 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
255 long num_regs, nbits;
258 pt = task_pt_regs(task);
259 kbsp = (unsigned long *) sw->ar_bspstore;
260 ubspstore = (unsigned long *) pt->ar_bspstore;
262 if (urbs_end < urnat_addr)
263 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
268 * First, figure out which bit number slot 0 in user-land maps
269 * to in the kernel rnat. Do this by figuring out how many
270 * register slots we're beyond the user's backingstore and
271 * then computing the equivalent address in kernel space.
273 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
274 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
275 shift = ia64_rse_slot_num(slot0_kaddr);
276 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
277 rnat0_kaddr = rnat1_kaddr - 64;
279 if (ubspstore + 63 > urnat_addr) {
280 /* some bits need to be merged in from pt->ar_rnat */
281 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
282 urnat = (pt->ar_rnat & umask);
289 if (rnat0_kaddr >= kbsp)
291 else if (rnat0_kaddr > krbs)
292 rnat0 = *rnat0_kaddr;
293 urnat |= (rnat0 & m) >> shift;
295 m = mask >> (63 - shift);
296 if (rnat1_kaddr >= kbsp)
298 else if (rnat1_kaddr > krbs)
299 rnat1 = *rnat1_kaddr;
300 urnat |= (rnat1 & m) << (63 - shift);
305 * The reverse of get_rnat.
308 put_rnat (struct task_struct *task, struct switch_stack *sw,
309 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
310 unsigned long *urbs_end)
312 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
313 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
314 long num_regs, nbits;
316 unsigned long cfm, *urbs_kargs;
318 pt = task_pt_regs(task);
319 kbsp = (unsigned long *) sw->ar_bspstore;
320 ubspstore = (unsigned long *) pt->ar_bspstore;
322 urbs_kargs = urbs_end;
323 if (in_syscall(pt)) {
325 * If entered via syscall, don't allow user to set rnat bits
329 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
332 if (urbs_kargs >= urnat_addr)
335 if ((urnat_addr - 63) >= urbs_kargs)
337 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
342 * First, figure out which bit number slot 0 in user-land maps
343 * to in the kernel rnat. Do this by figuring out how many
344 * register slots we're beyond the user's backingstore and
345 * then computing the equivalent address in kernel space.
347 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
348 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
349 shift = ia64_rse_slot_num(slot0_kaddr);
350 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
351 rnat0_kaddr = rnat1_kaddr - 64;
353 if (ubspstore + 63 > urnat_addr) {
354 /* some bits need to be place in pt->ar_rnat: */
355 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
356 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
362 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
363 * rnat slot is ignored. so we don't have to clear it here.
365 rnat0 = (urnat << shift);
367 if (rnat0_kaddr >= kbsp)
368 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
369 else if (rnat0_kaddr > krbs)
370 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
372 rnat1 = (urnat >> (63 - shift));
373 m = mask >> (63 - shift);
374 if (rnat1_kaddr >= kbsp)
375 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
376 else if (rnat1_kaddr > krbs)
377 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
381 on_kernel_rbs (unsigned long addr, unsigned long bspstore,
382 unsigned long urbs_end)
384 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
386 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
390 * Read a word from the user-level backing store of task CHILD. ADDR
391 * is the user-level address to read the word from, VAL a pointer to
392 * the return value, and USER_BSP gives the end of the user-level
393 * backing store (i.e., it's the address that would be in ar.bsp after
394 * the user executed a "cover" instruction).
396 * This routine takes care of accessing the kernel register backing
397 * store for those registers that got spilled there. It also takes
398 * care of calculating the appropriate RNaT collection words.
401 ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
402 unsigned long user_rbs_end, unsigned long addr, long *val)
404 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
405 struct pt_regs *child_regs;
409 urbs_end = (long *) user_rbs_end;
410 laddr = (unsigned long *) addr;
411 child_regs = task_pt_regs(child);
412 bspstore = (unsigned long *) child_regs->ar_bspstore;
413 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
414 if (on_kernel_rbs(addr, (unsigned long) bspstore,
415 (unsigned long) urbs_end))
418 * Attempt to read the RBS in an area that's actually
419 * on the kernel RBS => read the corresponding bits in
422 rnat_addr = ia64_rse_rnat_addr(laddr);
423 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
425 if (laddr == rnat_addr) {
426 /* return NaT collection word itself */
431 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
433 * It is implementation dependent whether the
434 * data portion of a NaT value gets saved on a
435 * st8.spill or RSE spill (e.g., see EAS 2.6,
436 * 4.4.4.6 Register Spill and Fill). To get
437 * consistent behavior across all possible
438 * IA-64 implementations, we return zero in
445 if (laddr < urbs_end) {
447 * The desired word is on the kernel RBS and
450 regnum = ia64_rse_num_regs(bspstore, laddr);
451 *val = *ia64_rse_skip_regs(krbs, regnum);
455 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
456 if (copied != sizeof(ret))
463 ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
464 unsigned long user_rbs_end, unsigned long addr, long val)
466 unsigned long *bspstore, *krbs, regnum, *laddr;
467 unsigned long *urbs_end = (long *) user_rbs_end;
468 struct pt_regs *child_regs;
470 laddr = (unsigned long *) addr;
471 child_regs = task_pt_regs(child);
472 bspstore = (unsigned long *) child_regs->ar_bspstore;
473 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
474 if (on_kernel_rbs(addr, (unsigned long) bspstore,
475 (unsigned long) urbs_end))
478 * Attempt to write the RBS in an area that's actually
479 * on the kernel RBS => write the corresponding bits
482 if (ia64_rse_is_rnat_slot(laddr))
483 put_rnat(child, child_stack, krbs, laddr, val,
486 if (laddr < urbs_end) {
487 regnum = ia64_rse_num_regs(bspstore, laddr);
488 *ia64_rse_skip_regs(krbs, regnum) = val;
491 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
498 * Calculate the address of the end of the user-level register backing
499 * store. This is the address that would have been stored in ar.bsp
500 * if the user had executed a "cover" instruction right before
501 * entering the kernel. If CFMP is not NULL, it is used to return the
502 * "current frame mask" that was active at the time the kernel was
506 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
509 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
512 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
513 bspstore = (unsigned long *) pt->ar_bspstore;
514 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
517 ndirty += (cfm & 0x7f);
519 cfm &= ~(1UL << 63); /* clear valid bit */
523 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
527 * Synchronize (i.e, write) the RSE backing store living in kernel
528 * space to the VM of the CHILD task. SW and PT are the pointers to
529 * the switch_stack and pt_regs structures, respectively.
530 * USER_RBS_END is the user-level address at which the backing store
534 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
535 unsigned long user_rbs_start, unsigned long user_rbs_end)
537 unsigned long addr, val;
540 /* now copy word for word from kernel rbs to user rbs: */
541 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
542 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
545 if (access_process_vm(child, addr, &val, sizeof(val), 1)
553 thread_matches (struct task_struct *thread, unsigned long addr)
555 unsigned long thread_rbs_end;
556 struct pt_regs *thread_regs;
558 if (ptrace_check_attach(thread, 0) < 0)
560 * If the thread is not in an attachable state, we'll
561 * ignore it. The net effect is that if ADDR happens
562 * to overlap with the portion of the thread's
563 * register backing store that is currently residing
564 * on the thread's kernel stack, then ptrace() may end
565 * up accessing a stale value. But if the thread
566 * isn't stopped, that's a problem anyhow, so we're
567 * doing as well as we can...
571 thread_regs = task_pt_regs(thread);
572 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
573 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
576 return 1; /* looks like we've got a winner */
580 * GDB apparently wants to be able to read the register-backing store
581 * of any thread when attached to a given process. If we are peeking
582 * or poking an address that happens to reside in the kernel-backing
583 * store of another thread, we need to attach to that thread, because
584 * otherwise we end up accessing stale data.
586 * task_list_lock must be read-locked before calling this routine!
588 static struct task_struct *
589 find_thread_for_addr (struct task_struct *child, unsigned long addr)
591 struct task_struct *p;
592 struct mm_struct *mm;
593 struct list_head *this, *next;
596 if (!(mm = get_task_mm(child)))
599 /* -1 because of our get_task_mm(): */
600 mm_users = atomic_read(&mm->mm_users) - 1;
602 goto out; /* not multi-threaded */
605 * Traverse the current process' children list. Every task that
606 * one attaches to becomes a child. And it is only attached children
607 * of the debugger that are of interest (ptrace_check_attach checks
610 list_for_each_safe(this, next, ¤t->children) {
611 p = list_entry(this, struct task_struct, sibling);
614 if (thread_matches(p, addr)) {
626 * Write f32-f127 back to task->thread.fph if it has been modified.
629 ia64_flush_fph (struct task_struct *task)
631 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
634 * Prevent migrating this task while
635 * we're fiddling with the FPU state
638 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
640 task->thread.flags |= IA64_THREAD_FPH_VALID;
641 ia64_save_fpu(&task->thread.fph[0]);
647 * Sync the fph state of the task so that it can be manipulated
648 * through thread.fph. If necessary, f32-f127 are written back to
649 * thread.fph or, if the fph state hasn't been used before, thread.fph
650 * is cleared to zeroes. Also, access to f32-f127 is disabled to
651 * ensure that the task picks up the state from thread.fph when it
655 ia64_sync_fph (struct task_struct *task)
657 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
659 ia64_flush_fph(task);
660 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
661 task->thread.flags |= IA64_THREAD_FPH_VALID;
662 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
669 access_fr (struct unw_frame_info *info, int regnum, int hi,
670 unsigned long *data, int write_access)
672 struct ia64_fpreg fpval;
675 ret = unw_get_fr(info, regnum, &fpval);
680 fpval.u.bits[hi] = *data;
681 ret = unw_set_fr(info, regnum, fpval);
683 *data = fpval.u.bits[hi];
688 * Change the machine-state of CHILD such that it will return via the normal
689 * kernel exit-path, rather than the syscall-exit path.
692 convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
695 struct unw_frame_info info, prev_info;
696 unsigned long ip, sp, pr;
698 unw_init_from_blocked_task(&info, child);
701 if (unw_unwind(&info) < 0)
704 unw_get_sp(&info, &sp);
705 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
706 < IA64_PT_REGS_SIZE) {
707 dprintk("ptrace.%s: ran off the top of the kernel "
708 "stack\n", __FUNCTION__);
711 if (unw_get_pr (&prev_info, &pr) < 0) {
712 unw_get_rp(&prev_info, &ip);
713 dprintk("ptrace.%s: failed to read "
714 "predicate register (ip=0x%lx)\n",
718 if (unw_is_intr_frame(&info)
719 && (pr & (1UL << PRED_USER_STACK)))
724 * Note: at the time of this call, the target task is blocked
725 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
726 * (aka, "pLvSys") we redirect execution from
727 * .work_pending_syscall_end to .work_processed_kernel.
729 unw_get_pr(&prev_info, &pr);
730 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
731 pr |= (1UL << PRED_NON_SYSCALL);
732 unw_set_pr(&prev_info, pr);
734 pt->cr_ifs = (1UL << 63) | cfm;
736 * Clear the memory that is NOT written on syscall-entry to
737 * ensure we do not leak kernel-state to user when execution
743 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
744 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
752 access_nat_bits (struct task_struct *child, struct pt_regs *pt,
753 struct unw_frame_info *info,
754 unsigned long *data, int write_access)
756 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
761 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
762 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
763 dprintk("ptrace: failed to set ar.unat\n");
766 for (regnum = 4; regnum <= 7; ++regnum) {
767 unw_get_gr(info, regnum, &dummy, &nat);
768 unw_set_gr(info, regnum, dummy,
769 (nat_bits >> regnum) & 1);
772 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
773 dprintk("ptrace: failed to read ar.unat\n");
776 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
777 for (regnum = 4; regnum <= 7; ++regnum) {
778 unw_get_gr(info, regnum, &dummy, &nat);
779 nat_bits |= (nat != 0) << regnum;
787 access_uarea (struct task_struct *child, unsigned long addr,
788 unsigned long *data, int write_access)
790 unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
791 struct switch_stack *sw;
793 # define pt_reg_addr(pt, reg) ((void *) \
794 ((unsigned long) (pt) \
795 + offsetof(struct pt_regs, reg)))
798 pt = task_pt_regs(child);
799 sw = (struct switch_stack *) (child->thread.ksp + 16);
801 if ((addr & 0x7) != 0) {
802 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
806 if (addr < PT_F127 + 16) {
809 ia64_sync_fph(child);
811 ia64_flush_fph(child);
812 ptr = (unsigned long *)
813 ((unsigned long) &child->thread.fph + addr);
814 } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
815 /* scratch registers untouched by kernel (saved in pt_regs) */
816 ptr = pt_reg_addr(pt, f10) + (addr - PT_F10);
817 } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
819 * Scratch registers untouched by kernel (saved in
822 ptr = (unsigned long *) ((long) sw
823 + (addr - PT_NAT_BITS - 32));
824 } else if (addr < PT_AR_LC + 8) {
825 /* preserved state: */
826 struct unw_frame_info info;
830 unw_init_from_blocked_task(&info, child);
831 if (unw_unwind_to_user(&info) < 0)
836 return access_nat_bits(child, pt, &info,
839 case PT_R4: case PT_R5: case PT_R6: case PT_R7:
841 /* read NaT bit first: */
844 ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4,
849 return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data,
852 case PT_B1: case PT_B2: case PT_B3:
853 case PT_B4: case PT_B5:
854 return unw_access_br(&info, (addr - PT_B1)/8 + 1, data,
858 return unw_access_ar(&info, UNW_AR_EC, data,
862 return unw_access_ar(&info, UNW_AR_LC, data,
866 if (addr >= PT_F2 && addr < PT_F5 + 16)
867 return access_fr(&info, (addr - PT_F2)/16 + 2,
868 (addr & 8) != 0, data,
870 else if (addr >= PT_F16 && addr < PT_F31 + 16)
871 return access_fr(&info,
872 (addr - PT_F16)/16 + 16,
876 dprintk("ptrace: rejecting access to register "
877 "address 0x%lx\n", addr);
881 } else if (addr < PT_F9+16) {
886 * By convention, we use PT_AR_BSP to refer to
887 * the end of the user-level backing store.
888 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
889 * to get the real value of ar.bsp at the time
890 * the kernel was entered.
892 * Furthermore, when changing the contents of
893 * PT_AR_BSP (or PT_CFM) we MUST copy any
894 * users-level stacked registers that are
895 * stored on the kernel stack back to
896 * user-space because otherwise, we might end
897 * up clobbering kernel stacked registers.
898 * Also, if this happens while the task is
899 * blocked in a system call, which convert the
900 * state such that the non-system-call exit
901 * path is used. This ensures that the proper
902 * state will be picked up when resuming
903 * execution. However, it *also* means that
904 * once we write PT_AR_BSP/PT_CFM, it won't be
905 * possible to modify the syscall arguments of
906 * the pending system call any longer. This
907 * shouldn't be an issue because modifying
908 * PT_AR_BSP/PT_CFM generally implies that
909 * we're either abandoning the pending system
910 * call or that we defer it's re-execution
911 * (e.g., due to GDB doing an inferior
914 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
916 if (*data != urbs_end) {
917 if (ia64_sync_user_rbs(child, sw,
922 convert_to_non_syscall(child,
926 * Simulate user-level write
930 pt->ar_bspstore = *data;
937 urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
939 if (((cfm ^ *data) & PFM_MASK) != 0) {
940 if (ia64_sync_user_rbs(child, sw,
945 convert_to_non_syscall(child,
948 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
949 | (*data & PFM_MASK));
957 pt->cr_ipsr = ((*data & IPSR_MASK)
958 | (pt->cr_ipsr & ~IPSR_MASK));
960 *data = (pt->cr_ipsr & IPSR_MASK);
965 pt->ar_rsc = *data | (3 << 2); /* force PL3 */
971 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
972 rnat_addr = (long) ia64_rse_rnat_addr((long *)
975 return ia64_poke(child, sw, urbs_end,
978 return ia64_peek(child, sw, urbs_end,
982 ptr = pt_reg_addr(pt, r1);
984 case PT_R2: case PT_R3:
985 ptr = pt_reg_addr(pt, r2) + (addr - PT_R2);
987 case PT_R8: case PT_R9: case PT_R10: case PT_R11:
988 ptr = pt_reg_addr(pt, r8) + (addr - PT_R8);
990 case PT_R12: case PT_R13:
991 ptr = pt_reg_addr(pt, r12) + (addr - PT_R12);
994 ptr = pt_reg_addr(pt, r14);
997 ptr = pt_reg_addr(pt, r15);
999 case PT_R16: case PT_R17: case PT_R18: case PT_R19:
1000 case PT_R20: case PT_R21: case PT_R22: case PT_R23:
1001 case PT_R24: case PT_R25: case PT_R26: case PT_R27:
1002 case PT_R28: case PT_R29: case PT_R30: case PT_R31:
1003 ptr = pt_reg_addr(pt, r16) + (addr - PT_R16);
1006 ptr = pt_reg_addr(pt, b0);
1009 ptr = pt_reg_addr(pt, b6);
1012 ptr = pt_reg_addr(pt, b7);
1014 case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
1015 case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
1016 ptr = pt_reg_addr(pt, f6) + (addr - PT_F6);
1018 case PT_AR_BSPSTORE:
1019 ptr = pt_reg_addr(pt, ar_bspstore);
1022 ptr = pt_reg_addr(pt, ar_unat);
1025 ptr = pt_reg_addr(pt, ar_pfs);
1028 ptr = pt_reg_addr(pt, ar_ccv);
1031 ptr = pt_reg_addr(pt, ar_fpsr);
1034 ptr = pt_reg_addr(pt, cr_iip);
1037 ptr = pt_reg_addr(pt, pr);
1039 /* scratch register */
1042 /* disallow accessing anything else... */
1043 dprintk("ptrace: rejecting access to register "
1044 "address 0x%lx\n", addr);
1047 } else if (addr <= PT_AR_SSD) {
1048 ptr = pt_reg_addr(pt, ar_csd) + (addr - PT_AR_CSD);
1050 /* access debug registers */
1052 if (addr >= PT_IBR) {
1053 regnum = (addr - PT_IBR) >> 3;
1054 ptr = &child->thread.ibr[0];
1056 regnum = (addr - PT_DBR) >> 3;
1057 ptr = &child->thread.dbr[0];
1061 dprintk("ptrace: rejecting access to register "
1062 "address 0x%lx\n", addr);
1065 #ifdef CONFIG_PERFMON
1067 * Check if debug registers are used by perfmon. This
1068 * test must be done once we know that we can do the
1069 * operation, i.e. the arguments are all valid, but
1070 * before we start modifying the state.
1072 * Perfmon needs to keep a count of how many processes
1073 * are trying to modify the debug registers for system
1074 * wide monitoring sessions.
1076 * We also include read access here, because they may
1077 * cause the PMU-installed debug register state
1078 * (dbr[], ibr[]) to be reset. The two arrays are also
1079 * used by perfmon, but we do not use
1080 * IA64_THREAD_DBG_VALID. The registers are restored
1081 * by the PMU context switch code.
1083 if (pfm_use_debug_registers(child)) return -1;
1086 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
1087 child->thread.flags |= IA64_THREAD_DBG_VALID;
1088 memset(child->thread.dbr, 0,
1089 sizeof(child->thread.dbr));
1090 memset(child->thread.ibr, 0,
1091 sizeof(child->thread.ibr));
1096 if ((regnum & 1) && write_access) {
1097 /* don't let the user set kernel-level breakpoints: */
1098 *ptr = *data & ~(7UL << 56);
1110 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1112 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
1113 struct unw_frame_info info;
1114 struct ia64_fpreg fpval;
1115 struct switch_stack *sw;
1117 long ret, retval = 0;
1121 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
1124 pt = task_pt_regs(child);
1125 sw = (struct switch_stack *) (child->thread.ksp + 16);
1126 unw_init_from_blocked_task(&info, child);
1127 if (unw_unwind_to_user(&info) < 0) {
1131 if (((unsigned long) ppr & 0x7) != 0) {
1132 dprintk("ptrace:unaligned register address %p\n", ppr);
1136 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
1137 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
1138 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
1139 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
1140 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
1141 || access_uarea(child, PT_CFM, &cfm, 0)
1142 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
1147 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
1148 retval |= __put_user(psr, &ppr->cr_ipsr);
1152 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1153 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1154 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1155 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1156 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1157 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1159 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
1160 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
1161 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1162 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
1163 retval |= __put_user(cfm, &ppr->cfm);
1167 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
1168 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
1172 for (i = 4; i < 8; i++) {
1173 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
1175 retval |= __put_user(val, &ppr->gr[i]);
1180 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
1184 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
1185 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
1186 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
1190 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
1194 retval |= __put_user(pt->b0, &ppr->br[0]);
1198 for (i = 1; i < 6; i++) {
1199 if (unw_access_br(&info, i, &val, 0) < 0)
1201 __put_user(val, &ppr->br[i]);
1206 retval |= __put_user(pt->b6, &ppr->br[6]);
1207 retval |= __put_user(pt->b7, &ppr->br[7]);
1211 for (i = 2; i < 6; i++) {
1212 if (unw_get_fr(&info, i, &fpval) < 0)
1214 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1219 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
1220 sizeof(struct ia64_fpreg) * 6);
1222 /* fp scratch regs(12-15) */
1224 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
1225 sizeof(struct ia64_fpreg) * 4);
1229 for (i = 16; i < 32; i++) {
1230 if (unw_get_fr(&info, i, &fpval) < 0)
1232 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
1237 ia64_flush_fph(child);
1238 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
1239 sizeof(ppr->fr[32]) * 96);
1243 retval |= __put_user(pt->pr, &ppr->pr);
1247 retval |= __put_user(nat_bits, &ppr->nat);
1249 ret = retval ? -EIO : 0;
1254 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
1256 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1257 struct unw_frame_info info;
1258 struct switch_stack *sw;
1259 struct ia64_fpreg fpval;
1261 long ret, retval = 0;
1264 memset(&fpval, 0, sizeof(fpval));
1266 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1269 pt = task_pt_regs(child);
1270 sw = (struct switch_stack *) (child->thread.ksp + 16);
1271 unw_init_from_blocked_task(&info, child);
1272 if (unw_unwind_to_user(&info) < 0) {
1276 if (((unsigned long) ppr & 0x7) != 0) {
1277 dprintk("ptrace:unaligned register address %p\n", ppr);
1283 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1284 retval |= __get_user(psr, &ppr->cr_ipsr);
1288 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1289 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1290 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1291 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1292 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1293 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1295 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1296 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1297 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1298 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1299 retval |= __get_user(cfm, &ppr->cfm);
1303 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1304 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1308 for (i = 4; i < 8; i++) {
1309 retval |= __get_user(val, &ppr->gr[i]);
1310 /* NaT bit will be set via PT_NAT_BITS: */
1311 if (unw_set_gr(&info, i, val, 0) < 0)
1317 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1321 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1322 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1323 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1327 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1331 retval |= __get_user(pt->b0, &ppr->br[0]);
1335 for (i = 1; i < 6; i++) {
1336 retval |= __get_user(val, &ppr->br[i]);
1337 unw_set_br(&info, i, val);
1342 retval |= __get_user(pt->b6, &ppr->br[6]);
1343 retval |= __get_user(pt->b7, &ppr->br[7]);
1347 for (i = 2; i < 6; i++) {
1348 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1349 if (unw_set_fr(&info, i, fpval) < 0)
1355 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1356 sizeof(ppr->fr[6]) * 6);
1358 /* fp scratch regs(12-15) */
1360 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1361 sizeof(ppr->fr[12]) * 4);
1365 for (i = 16; i < 32; i++) {
1366 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1368 if (unw_set_fr(&info, i, fpval) < 0)
1374 ia64_sync_fph(child);
1375 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1376 sizeof(ppr->fr[32]) * 96);
1380 retval |= __get_user(pt->pr, &ppr->pr);
1384 retval |= __get_user(nat_bits, &ppr->nat);
1386 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1387 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1388 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1389 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1390 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1391 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1392 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1393 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1395 ret = retval ? -EIO : 0;
1400 * Called by kernel/ptrace.c when detaching..
1402 * Make sure the single step bit is not set.
1405 ptrace_disable (struct task_struct *child)
1407 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1409 /* make sure the single step/taken-branch trap bits are not set: */
1415 sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
1418 unsigned long urbs_end, peek_or_poke;
1419 struct task_struct *child;
1420 struct switch_stack *sw;
1425 if (request == PTRACE_TRACEME) {
1426 ret = ptrace_traceme();
1430 peek_or_poke = (request == PTRACE_PEEKTEXT
1431 || request == PTRACE_PEEKDATA
1432 || request == PTRACE_POKETEXT
1433 || request == PTRACE_POKEDATA);
1435 read_lock(&tasklist_lock);
1437 child = find_task_by_pid(pid);
1440 child = find_thread_for_addr(child, addr);
1441 get_task_struct(child);
1444 read_unlock(&tasklist_lock);
1447 if (!vx_check(vx_task_xid(child), VX_WATCH|VX_IDENT))
1451 if (pid == 1) /* no messing around with init! */
1454 if (request == PTRACE_ATTACH) {
1455 ret = ptrace_attach(child);
1459 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1463 pt = task_pt_regs(child);
1464 sw = (struct switch_stack *) (child->thread.ksp + 16);
1467 case PTRACE_PEEKTEXT:
1468 case PTRACE_PEEKDATA:
1469 /* read word at location addr */
1470 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1471 ret = ia64_peek(child, sw, urbs_end, addr, &data);
1474 /* ensure "ret" is not mistaken as an error code: */
1475 force_successful_syscall_return();
1479 case PTRACE_POKETEXT:
1480 case PTRACE_POKEDATA:
1481 /* write the word at location addr */
1482 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1483 ret = ia64_poke(child, sw, urbs_end, addr, data);
1486 case PTRACE_PEEKUSR:
1487 /* read the word at addr in the USER area */
1488 if (access_uarea(child, addr, &data, 0) < 0) {
1493 /* ensure "ret" is not mistaken as an error code */
1494 force_successful_syscall_return();
1497 case PTRACE_POKEUSR:
1498 /* write the word at addr in the USER area */
1499 if (access_uarea(child, addr, &data, 1) < 0) {
1506 case PTRACE_OLD_GETSIGINFO:
1507 /* for backwards-compatibility */
1508 ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1511 case PTRACE_OLD_SETSIGINFO:
1512 /* for backwards-compatibility */
1513 ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1516 case PTRACE_SYSCALL:
1517 /* continue and stop at next (return from) syscall */
1519 /* restart after signal. */
1521 if (!valid_signal(data))
1523 if (request == PTRACE_SYSCALL)
1524 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1526 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1527 child->exit_code = data;
1530 * Make sure the single step/taken-branch trap bits
1533 ia64_psr(pt)->ss = 0;
1534 ia64_psr(pt)->tb = 0;
1536 wake_up_process(child);
1542 * Make the child exit. Best I can do is send it a
1543 * sigkill. Perhaps it should be put in the status
1544 * that it wants to exit.
1546 if (child->exit_state == EXIT_ZOMBIE)
1549 child->exit_code = SIGKILL;
1551 ptrace_disable(child);
1552 wake_up_process(child);
1556 case PTRACE_SINGLESTEP:
1557 /* let child execute for one instruction */
1558 case PTRACE_SINGLEBLOCK:
1560 if (!valid_signal(data))
1563 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1564 if (request == PTRACE_SINGLESTEP) {
1565 ia64_psr(pt)->ss = 1;
1567 ia64_psr(pt)->tb = 1;
1569 child->exit_code = data;
1571 /* give it a chance to run. */
1572 wake_up_process(child);
1577 /* detach a process that was attached. */
1578 ret = ptrace_detach(child, data);
1581 case PTRACE_GETREGS:
1582 ret = ptrace_getregs(child,
1583 (struct pt_all_user_regs __user *) data);
1586 case PTRACE_SETREGS:
1587 ret = ptrace_setregs(child,
1588 (struct pt_all_user_regs __user *) data);
1592 ret = ptrace_request(child, request, addr, data);
1596 put_task_struct(child);
1604 syscall_trace (void)
1606 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1608 if (!(current->ptrace & PT_PTRACED))
1611 * The 0x80 provides a way for the tracing parent to
1612 * distinguish between a syscall stop and SIGTRAP delivery.
1614 ptrace_notify(SIGTRAP
1615 | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1618 * This isn't the same as continuing with a signal, but it
1619 * will do for normal use. strace only continues with a
1620 * signal if the stopping signal is not SIGTRAP. -brl
1622 if (current->exit_code) {
1623 send_sig(current->exit_code, current, 1);
1624 current->exit_code = 0;
1628 /* "asmlinkage" so the input arguments are preserved... */
1631 syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1632 long arg4, long arg5, long arg6, long arg7,
1633 struct pt_regs regs)
1635 if (test_thread_flag(TIF_SYSCALL_TRACE)
1636 && (current->ptrace & PT_PTRACED))
1639 if (unlikely(current->audit_context)) {
1643 if (IS_IA32_PROCESS(®s)) {
1645 arch = AUDIT_ARCH_I386;
1648 arch = AUDIT_ARCH_IA64;
1651 audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3);
1656 /* "asmlinkage" so the input arguments are preserved... */
1659 syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1660 long arg4, long arg5, long arg6, long arg7,
1661 struct pt_regs regs)
1663 if (unlikely(current->audit_context))
1664 audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8);
1666 if (test_thread_flag(TIF_SYSCALL_TRACE)
1667 && (current->ptrace & PT_PTRACED))