X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=sidebyside;f=arch%2Fmips%2Fkernel%2Fsmp.c;h=298f82fe8440a43d1a8140ed8a256d44595ed6d8;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=20245b7fdfd08da7413165a97ac9204a377d908f;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 20245b7fd..298f82fe8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,10 @@ #include #include +#ifdef CONFIG_MIPS_MT_SMTC +#include +#endif /* CONFIG_MIPS_MT_SMTC */ + cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ @@ -46,14 +51,10 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(phys_cpu_present_map); EXPORT_SYMBOL(cpu_online_map); -cycles_t cacheflush_time; -unsigned long cache_decay_ticks; - static void smp_tune_scheduling (void) { struct cache_desc *cd = ¤t_cpu_data.scache; unsigned long cachesize; /* kB */ - unsigned long bandwidth = 350; /* MB/s */ unsigned long cpu_khz; /* @@ -71,25 +72,10 @@ static void smp_tune_scheduling (void) * L1 cache), on PIIs it's around 50-100 usecs, depending on * the cache size) */ - if (!cpu_khz) { - /* - * This basically disables processor-affinity scheduling on SMP - * without a cycle counter. Currently all SMP capable MIPS - * processors have a cycle counter. - */ - cacheflush_time = 0; + if (!cpu_khz) return; - } cachesize = cd->linesz * cd->sets * cd->ways; - cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; - cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; - - printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", - (long)cacheflush_time/(cpu_khz/1000), - ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); - printk("task migration cache decay timeout: %ld msecs.\n", - (cache_decay_ticks + 1) * 1000 / HZ); } extern void __init calibrate_delay(void); @@ -101,8 +87,12 @@ extern ATTRIB_NORET void cpu_idle(void); */ asmlinkage void start_secondary(void) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu; +#ifdef CONFIG_MIPS_MT_SMTC + /* Only do cpu_probe for first TC of CPU */ + if ((read_c0_tcbind() & TCBIND_CURTC) == 0) +#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(); @@ -114,6 +104,8 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); + preempt_disable(); + cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); @@ -139,7 +131,19 @@ struct call_data_struct *call_data; * or are or have executed. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. + * hardware interrupt handler or from a bottom half handler: + * + * CPU A CPU B + * Disable interrupts + * smp_call_function() + * Take call_lock + * Send IPIs + * Wait for all cpus to acknowledge IPI + * CPU A has not responded, spin waiting + * for cpu A to respond, holding call_lock + * smp_call_function() + * Spin waiting for call_lock + * Deadlock Deadlock */ int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) @@ -148,6 +152,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, int i, cpus = num_online_cpus() - 1; int cpu = smp_processor_id(); + /* + * Can die spectacularly if this CPU isn't yet marked online + */ + BUG_ON(!cpu_online(cpu)); + if (!cpus) return 0; @@ -166,8 +175,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, mb(); /* Send a message to all other CPUs and wait for them to respond */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i) && i != cpu) + for_each_online_cpu(i) + if (i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ @@ -178,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, if (wait) while (atomic_read(&data.finished) != cpus) barrier(); + call_data = NULL; spin_unlock(&smp_call_lock); return 0; } + void smp_call_function_interrupt(void) { void (*func) (void *info) = call_data->func; @@ -232,11 +243,13 @@ void __init smp_cpus_done(unsigned int max_cpus) /* called from main before smp_init() */ void __init smp_prepare_cpus(unsigned int max_cpus) { - cpu_data[0].udelay_val = loops_per_jiffy; init_new_context(current, &init_mm); current_thread_info()->cpu = 0; smp_tune_scheduling(); - prom_prepare_cpus(max_cpus); + plat_prepare_cpus(max_cpus); +#ifndef CONFIG_HOTPLUG_CPU + cpu_present_map = cpu_possible_map; +#endif } /* preload SMP state for boot cpu */ @@ -254,23 +267,28 @@ void __devinit smp_prepare_boot_cpu(void) } /* - * Startup the CPU with this logical number + * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu + * and keep control until "cpu_online(cpu)" is set. Note: cpu is + * physical, not logical. */ -static int __init do_boot_cpu(int cpu) +int __devinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* + * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ idle = fork_idle(cpu); if (IS_ERR(idle)) - panic("failed fork for CPU %d\n", cpu); + panic(KERN_ERR "Fork failed for CPU %d", cpu); prom_boot_secondary(cpu, idle); - /* XXXKW timeout */ + /* + * Trust is futile. We should really have timeouts ... + */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); @@ -279,23 +297,6 @@ static int __init do_boot_cpu(int cpu) return 0; } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -int __devinit __cpu_up(unsigned int cpu) -{ - int ret; - - /* Processor goes to start_secondary(), sets online flag */ - ret = do_boot_cpu(cpu); - if (ret < 0) - return ret; - - return 0; -} - /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { @@ -437,7 +438,24 @@ void flush_tlb_one(unsigned long vaddr) local_flush_tlb_one(vaddr); } +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int cpu; + int ret; + + for_each_present_cpu(cpu) { + ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " + "failed (%d)\n", cpu, ret); + } + + return 0; +} + +subsys_initcall(topology_init); + EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); -EXPORT_SYMBOL(cpu_data); -EXPORT_SYMBOL(synchronize_irq);