X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fkernel%2Fgdb-stub.c;h=6ecbdc1fefd13acdd930998a07c19e633aaab354;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=269889302a27d786c101d6d2da754351607b91c3;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index 269889302..6ecbdc1fe 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c @@ -140,6 +140,7 @@ #include #include #include +#include /* * external low-level support routines @@ -176,8 +177,10 @@ int kgdb_enabled; /* * spin locks for smp case */ -static spinlock_t kgdb_lock = SPIN_LOCK_UNLOCKED; -static spinlock_t kgdb_cpulock[NR_CPUS] = { [0 ... NR_CPUS-1] = SPIN_LOCK_UNLOCKED}; +static DEFINE_SPINLOCK(kgdb_lock); +static raw_spinlock_t kgdb_cpulock[NR_CPUS] = { + [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED, +}; /* * BUFMAX defines the maximum number of characters in inbound/outbound buffers @@ -637,33 +640,94 @@ static struct gdb_bp_save async_bp; * and only one can be active at a time. */ extern spinlock_t smp_call_lock; + void set_async_breakpoint(unsigned long *epc) { /* skip breaking into userland */ if ((*epc & 0x80000000) == 0) return; +#ifdef CONFIG_SMP /* avoid deadlock if someone is make IPC */ if (spin_is_locked(&smp_call_lock)) return; +#endif async_bp.addr = *epc; *epc = (unsigned long)async_breakpoint; } -void kgdb_wait(void *arg) +static void kgdb_wait(void *arg) { unsigned flags; int cpu = smp_processor_id(); local_irq_save(flags); - spin_lock(&kgdb_cpulock[cpu]); - spin_unlock(&kgdb_cpulock[cpu]); + __raw_spin_lock(&kgdb_cpulock[cpu]); + __raw_spin_unlock(&kgdb_cpulock[cpu]); local_irq_restore(flags); } +/* + * GDB stub needs to call kgdb_wait on all processor with interrupts + * disabled, so it uses it's own special variant. + */ +static int kgdb_smp_call_kgdb_wait(void) +{ +#ifdef CONFIG_SMP + struct call_data_struct data; + int i, cpus = num_online_cpus() - 1; + int cpu = smp_processor_id(); + + /* + * Can die spectacularly if this CPU isn't yet marked online + */ + BUG_ON(!cpu_online(cpu)); + + if (!cpus) + return 0; + + if (spin_is_locked(&smp_call_lock)) { + /* + * Some other processor is trying to make us do something + * but we're not going to respond... give up + */ + return -1; + } + + /* + * We will continue here, accepting the fact that + * the kernel may deadlock if another CPU attempts + * to call smp_call_function now... + */ + + data.func = kgdb_wait; + data.info = NULL; + atomic_set(&data.started, 0); + data.wait = 0; + + spin_lock(&smp_call_lock); + call_data = &data; + mb(); + + /* Send a message to all other CPUs and wait for them to respond */ + for (i = 0; i < NR_CPUS; i++) + if (cpu_online(i) && i != cpu) + core_send_ipi(i, SMP_CALL_FUNCTION); + + /* Wait for response */ + /* FIXME: lock-up detection, backtrace on lock-up */ + while (atomic_read(&data.started) != cpus) + barrier(); + + call_data = NULL; + spin_unlock(&smp_call_lock); +#endif + + return 0; +} /* * This function does all command processing for interfacing to gdb. It @@ -687,8 +751,8 @@ void handle_exception (struct gdb_regs *regs) * acquire the big kgdb spinlock */ if (!spin_trylock(&kgdb_lock)) { - /* - * some other CPU has the lock, we should go back to + /* + * some other CPU has the lock, we should go back to * receive the gdb_wait IPC */ return; @@ -703,17 +767,17 @@ void handle_exception (struct gdb_regs *regs) async_bp.addr = 0; } - /* + /* * acquire the CPU spinlocks */ for (i = num_online_cpus()-1; i >= 0; i--) - if (spin_trylock(&kgdb_cpulock[i]) == 0) + if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) panic("kgdb: couldn't get cpulock %d\n", i); /* * force other cpus to enter kgdb */ - smp_call_function(kgdb_wait, NULL, 0, 0); + kgdb_smp_call_kgdb_wait(); /* * If we're in breakpoint() increment the PC @@ -894,7 +958,7 @@ void handle_exception (struct gdb_regs *regs) ptr = &input_buffer[1]; if (hexToLong(&ptr, &addr)) regs->cp0_epc = addr; - + goto exit_kgdb_exception; break; @@ -982,7 +1046,7 @@ finish_kgdb: exit_kgdb_exception: /* release locks so other CPUs can go */ for (i = num_online_cpus()-1; i >= 0; i--) - spin_unlock(&kgdb_cpulock[i]); + __raw_spin_unlock(&kgdb_cpulock[i]); spin_unlock(&kgdb_lock); __flush_cache_all(); @@ -1001,7 +1065,7 @@ void breakpoint(void) return; __asm__ __volatile__( - ".globl breakinst\n\t" + ".globl breakinst\n\t" ".set\tnoreorder\n\t" "nop\n" "breakinst:\tbreak\n\t" @@ -1014,7 +1078,7 @@ void breakpoint(void) void async_breakpoint(void) { __asm__ __volatile__( - ".globl async_breakinst\n\t" + ".globl async_breakinst\n\t" ".set\tnoreorder\n\t" "nop\n" "async_breakinst:\tbreak\n\t" @@ -1036,12 +1100,12 @@ void adel(void) * malloc is needed by gdb client in "call func()", even a private one * will make gdb happy */ -static void *malloc(size_t size) +static void * __attribute_used__ malloc(size_t size) { return kmalloc(size, GFP_ATOMIC); } -static void free(void *where) +static void __attribute_used__ free (void *where) { kfree(where); }