This commit was generated by cvs2svn to compensate for changes in r517,
[linux-2.6.git] / arch / sparc64 / kernel / kprobes.c
1 /* arch/sparc64/kernel/kprobes.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@davemloft.net>
4  */
5
6 #include <linux/config.h>
7 #include <linux/kernel.h>
8 #include <linux/kprobes.h>
9
10 #include <asm/kdebug.h>
11 #include <asm/signal.h>
12
13 /* We do not have hardware single-stepping on sparc64.
14  * So we implement software single-stepping with breakpoint
15  * traps.  The top-level scheme is similar to that used
16  * in the x86 kprobes implementation.
17  *
18  * In the kprobe->ainsn.insn[] array we store the original
19  * instruction at index zero and a break instruction at
20  * index one.
21  *
22  * When we hit a kprobe we:
23  * - Run the pre-handler
24  * - Remember "regs->tnpc" and interrupt level stored in
25  *   "regs->tstate" so we can restore them later
26  * - Disable PIL interrupts
27  * - Set regs->tpc to point to kprobe->ainsn.insn[0]
28  * - Set regs->tnpc to point to kprobe->ainsn.insn[1]
29  * - Mark that we are actively in a kprobe
30  *
31  * At this point we wait for the second breakpoint at
32  * kprobe->ainsn.insn[1] to hit.  When it does we:
33  * - Run the post-handler
34  * - Set regs->tpc to "remembered" regs->tnpc stored above,
35  *   restore the PIL interrupt level in "regs->tstate" as well
36  * - Make any adjustments necessary to regs->tnpc in order
37  *   to handle relative branches correctly.  See below.
38  * - Mark that we are no longer actively in a kprobe.
39  */
40
41 int arch_prepare_kprobe(struct kprobe *p)
42 {
43         p->ainsn.insn[0] = *p->addr;
44         p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
45         return 0;
46 }
47
48 void arch_remove_kprobe(struct kprobe *p)
49 {
50 }
51
52 /* kprobe_status settings */
53 #define KPROBE_HIT_ACTIVE       0x00000001
54 #define KPROBE_HIT_SS           0x00000002
55
56 static struct kprobe *current_kprobe;
57 static unsigned long current_kprobe_orig_tnpc;
58 static unsigned long current_kprobe_orig_tstate_pil;
59 static unsigned int kprobe_status;
60
61 static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
62 {
63         current_kprobe_orig_tnpc = regs->tnpc;
64         current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
65         regs->tstate |= TSTATE_PIL;
66
67         regs->tpc = (unsigned long) &p->ainsn.insn[0];
68         regs->tnpc = (unsigned long) &p->ainsn.insn[1];
69 }
70
71 static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
72 {
73         *p->addr = p->opcode;
74         flushi(p->addr);
75
76         regs->tpc = (unsigned long) p->addr;
77         regs->tnpc = current_kprobe_orig_tnpc;
78         regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
79                         current_kprobe_orig_tstate_pil);
80 }
81
82 static int kprobe_handler(struct pt_regs *regs)
83 {
84         struct kprobe *p;
85         void *addr = (void *) regs->tpc;
86         int ret = 0;
87
88         preempt_disable();
89
90         if (kprobe_running()) {
91                 /* We *are* holding lock here, so this is safe.
92                  * Disarm the probe we just hit, and ignore it.
93                  */
94                 p = get_kprobe(addr);
95                 if (p) {
96                         disarm_kprobe(p, regs);
97                         ret = 1;
98                 } else {
99                         p = current_kprobe;
100                         if (p->break_handler && p->break_handler(p, regs))
101                                 goto ss_probe;
102                 }
103                 /* If it's not ours, can't be delete race, (we hold lock). */
104                 goto no_kprobe;
105         }
106
107         lock_kprobes();
108         p = get_kprobe(addr);
109         if (!p) {
110                 unlock_kprobes();
111                 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
112                         /*
113                          * The breakpoint instruction was removed right
114                          * after we hit it.  Another cpu has removed
115                          * either a probepoint or a debugger breakpoint
116                          * at this address.  In either case, no further
117                          * handling of this interrupt is appropriate.
118                          */
119                         ret = 1;
120                 }
121                 /* Not one of ours: let kernel handle it */
122                 goto no_kprobe;
123         }
124
125         kprobe_status = KPROBE_HIT_ACTIVE;
126         current_kprobe = p;
127         if (p->pre_handler(p, regs))
128                 return 1;
129
130 ss_probe:
131         prepare_singlestep(p, regs);
132         kprobe_status = KPROBE_HIT_SS;
133         return 1;
134
135 no_kprobe:
136         preempt_enable_no_resched();
137         return ret;
138 }
139
140 /* If INSN is a relative control transfer instruction,
141  * return the corrected branch destination value.
142  *
143  * The original INSN location was REAL_PC, it actually
144  * executed at PC and produced destination address NPC.
145  */
146 static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
147                                      unsigned long pc, unsigned long npc)
148 {
149         /* Branch not taken, no mods necessary.  */
150         if (npc == pc + 0x4UL)
151                 return real_pc + 0x4UL;
152
153         /* The three cases are call, branch w/prediction,
154          * and traditional branch.
155          */
156         if ((insn & 0xc0000000) == 0x40000000 ||
157             (insn & 0xc1c00000) == 0x00400000 ||
158             (insn & 0xc1c00000) == 0x00800000) {
159                 /* The instruction did all the work for us
160                  * already, just apply the offset to the correct
161                  * instruction location.
162                  */
163                 return (real_pc + (npc - pc));
164         }
165
166         return real_pc + 0x4UL;
167 }
168
169 /* If INSN is an instruction which writes it's PC location
170  * into a destination register, fix that up.
171  */
172 static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
173 {
174         unsigned long *slot = NULL;
175
176         /* Simplest cast is call, which always uses %o7 */
177         if ((insn & 0xc0000000) == 0x40000000) {
178                 slot = &regs->u_regs[UREG_I7];
179         }
180
181         /* Jmpl encodes the register inside of the opcode */
182         if ((insn & 0xc1f80000) == 0x81c00000) {
183                 unsigned long rd = ((insn >> 25) & 0x1f);
184
185                 if (rd <= 15) {
186                         slot = &regs->u_regs[rd];
187                 } else {
188                         /* Hard case, it goes onto the stack. */
189                         flushw_all();
190
191                         rd -= 16;
192                         slot = (unsigned long *)
193                                 (regs->u_regs[UREG_FP] + STACK_BIAS);
194                         slot += rd;
195                 }
196         }
197         if (slot != NULL)
198                 *slot = real_pc;
199 }
200
201 /*
202  * Called after single-stepping.  p->addr is the address of the
203  * instruction whose first byte has been replaced by the breakpoint
204  * instruction.  To avoid the SMP problems that can occur when we
205  * temporarily put back the original opcode to single-step, we
206  * single-stepped a copy of the instruction.  The address of this
207  * copy is p->ainsn.insn.
208  *
209  * This function prepares to return from the post-single-step
210  * breakpoint trap.
211  */
212 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
213 {
214         u32 insn = p->ainsn.insn[0];
215
216         regs->tpc = current_kprobe_orig_tnpc;
217         regs->tnpc = relbranch_fixup(insn,
218                                      (unsigned long) p->addr,
219                                      (unsigned long) &p->ainsn.insn[0],
220                                      regs->tnpc);
221         retpc_fixup(regs, insn, (unsigned long) p->addr);
222
223         regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
224                         current_kprobe_orig_tstate_pil);
225 }
226
227 static inline int post_kprobe_handler(struct pt_regs *regs)
228 {
229         if (!kprobe_running())
230                 return 0;
231
232         if (current_kprobe->post_handler)
233                 current_kprobe->post_handler(current_kprobe, regs, 0);
234
235         resume_execution(current_kprobe, regs);
236
237         unlock_kprobes();
238         preempt_enable_no_resched();
239
240         return 1;
241 }
242
243 /* Interrupts disabled, kprobe_lock held. */
244 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
245 {
246         if (current_kprobe->fault_handler
247             && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
248                 return 1;
249
250         if (kprobe_status & KPROBE_HIT_SS) {
251                 resume_execution(current_kprobe, regs);
252
253                 unlock_kprobes();
254                 preempt_enable_no_resched();
255         }
256         return 0;
257 }
258
259 /*
260  * Wrapper routine to for handling exceptions.
261  */
262 int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
263                              void *data)
264 {
265         struct die_args *args = (struct die_args *)data;
266         switch (val) {
267         case DIE_DEBUG:
268                 if (kprobe_handler(args->regs))
269                         return NOTIFY_STOP;
270                 break;
271         case DIE_DEBUG_2:
272                 if (post_kprobe_handler(args->regs))
273                         return NOTIFY_STOP;
274                 break;
275         case DIE_GPF:
276                 if (kprobe_running() &&
277                     kprobe_fault_handler(args->regs, args->trapnr))
278                         return NOTIFY_STOP;
279                 break;
280         case DIE_PAGE_FAULT:
281                 if (kprobe_running() &&
282                     kprobe_fault_handler(args->regs, args->trapnr))
283                         return NOTIFY_STOP;
284                 break;
285         default:
286                 break;
287         }
288         return NOTIFY_DONE;
289 }
290
291 asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs)
292 {
293         BUG_ON(trap_level != 0x170 && trap_level != 0x171);
294
295         if (user_mode(regs)) {
296                 local_irq_enable();
297                 bad_trap(regs, trap_level);
298                 return;
299         }
300
301         /* trap_level == 0x170 --> ta 0x70
302          * trap_level == 0x171 --> ta 0x71
303          */
304         if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
305                        (trap_level == 0x170) ? "debug" : "debug_2",
306                        regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
307                 bad_trap(regs, trap_level);
308 }
309
310 /* Jprobes support.  */
311 static struct pt_regs jprobe_saved_regs;
312 static struct pt_regs *jprobe_saved_regs_location;
313 static struct sparc_stackf jprobe_saved_stack;
314
315 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
316 {
317         struct jprobe *jp = container_of(p, struct jprobe, kp);
318
319         jprobe_saved_regs_location = regs;
320         memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
321
322         /* Save a whole stack frame, this gets arguments
323          * pushed onto the stack after using up all the
324          * arg registers.
325          */
326         memcpy(&jprobe_saved_stack,
327                (char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
328                sizeof(jprobe_saved_stack));
329
330         regs->tpc  = (unsigned long) jp->entry;
331         regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
332         regs->tstate |= TSTATE_PIL;
333
334         return 1;
335 }
336
337 void jprobe_return(void)
338 {
339         preempt_enable_no_resched();
340         __asm__ __volatile__(
341                 ".globl jprobe_return_trap_instruction\n"
342 "jprobe_return_trap_instruction:\n\t"
343                 "ta 0x70");
344 }
345
346 extern void jprobe_return_trap_instruction(void);
347
348 extern void __show_regs(struct pt_regs * regs);
349
350 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
351 {
352         u32 *addr = (u32 *) regs->tpc;
353
354         if (addr == (u32 *) jprobe_return_trap_instruction) {
355                 if (jprobe_saved_regs_location != regs) {
356                         printk("JPROBE: Current regs (%p) does not match "
357                                "saved regs (%p).\n",
358                                regs, jprobe_saved_regs_location);
359                         printk("JPROBE: Saved registers\n");
360                         __show_regs(jprobe_saved_regs_location);
361                         printk("JPROBE: Current registers\n");
362                         __show_regs(regs);
363                         BUG();
364                 }
365                 /* Restore old register state.  Do pt_regs
366                  * first so that UREG_FP is the original one for
367                  * the stack frame restore.
368                  */
369                 memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
370
371                 memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
372                        &jprobe_saved_stack,
373                        sizeof(jprobe_saved_stack));
374
375                 return 1;
376         }
377         return 0;
378 }