static long *jprobe_saved_esp;
/* copy of the kernel stack at the probe fire time */
static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
-void jprobe_return_end(void);
/*
* returns non-zero if opcode modifies the interrupt flag.
return 0;
}
-int arch_prepare_kprobe(struct kprobe *p)
-{
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- return 0;
-}
-
-void arch_remove_kprobe(struct kprobe *p)
+void arch_prepare_kprobe(struct kprobe *p)
{
+ memcpy(p->insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
}
static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
{
regs->eflags |= TF_MASK;
regs->eflags &= ~IF_MASK;
- regs->eip = (unsigned long)&p->ainsn.insn;
+ regs->eip = (unsigned long)&p->insn;
}
/*
p = get_kprobe(addr);
if (!p) {
unlock_kprobes();
- if (regs->eflags & VM_MASK) {
- /* We are in virtual-8086 mode. Return 0 */
- goto no_kprobe;
- }
-
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
+ * copy is p->insn.
*
* This function prepares to return from the post-single-step
* interrupt. We have to fix up the stack as follows:
{
unsigned long *tos = (unsigned long *)®s->esp;
unsigned long next_eip = 0;
- unsigned long copy_eip = (unsigned long)&p->ainsn.insn;
+ unsigned long copy_eip = (unsigned long)&p->insn;
unsigned long orig_eip = (unsigned long)p->addr;
- switch (p->ainsn.insn[0]) {
+ switch (p->insn[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
*tos |= kprobe_old_eflags;
*tos = orig_eip + (*tos - copy_eip);
break;
case 0xff:
- if ((p->ainsn.insn[1] & 0x30) == 0x10) {
+ if ((p->insn[1] & 0x30) == 0x10) {
/* call absolute, indirect */
/* Fix return addr; eip is correct. */
next_eip = regs->eip;
*tos = orig_eip + (*tos - copy_eip);
- } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
- ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
+ } else if (((p->insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
+ ((p->insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
/* eip is correct. */
next_eip = regs->eip;
}
{
preempt_enable_no_resched();
asm volatile (" xchgl %%ebx,%%esp \n"
- " int3 \n"
- " .globl jprobe_return_end \n"
- " jprobe_return_end: \n"
- " nop \n"::"b"
+ " int3 \n"::"b"
(jprobe_saved_esp):"memory");
}
+void jprobe_return_end(void)
+{
+};
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{