X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;ds=inline;f=arch%2Fmips%2Fkernel%2Fsmp.c;h=06ed907524249fed2c3b1a70ee490c5a6031b523;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=3730cc0db5f9e8526665bdeb8f7812b36dc55e82;hpb=5167311cae6aa3a5ff5afd39f88c32a435c969ef;p=linux-2.6.git diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 3730cc0db..06ed90752 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -18,7 +18,6 @@ * Copyright (C) 2000, 2001 Silicon Graphics, Inc. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation */ -#include #include #include #include @@ -30,6 +29,7 @@ #include #include #include +#include #include #include @@ -47,14 +47,10 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(phys_cpu_present_map); EXPORT_SYMBOL(cpu_online_map); -cycles_t cacheflush_time; -unsigned long cache_decay_ticks; - static void smp_tune_scheduling (void) { struct cache_desc *cd = ¤t_cpu_data.scache; unsigned long cachesize; /* kB */ - unsigned long bandwidth = 350; /* MB/s */ unsigned long cpu_khz; /* @@ -72,28 +68,14 @@ static void smp_tune_scheduling (void) * L1 cache), on PIIs it's around 50-100 usecs, depending on * the cache size) */ - if (!cpu_khz) { - /* - * This basically disables processor-affinity scheduling on SMP - * without a cycle counter. Currently all SMP capable MIPS - * processors have a cycle counter. - */ - cacheflush_time = 0; + if (!cpu_khz) return; - } cachesize = cd->linesz * cd->sets * cd->ways; - cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; - cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; - - printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", - (long)cacheflush_time/(cpu_khz/1000), - ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); - printk("task migration cache decay timeout: %ld msecs.\n", - (cache_decay_ticks + 1) * 1000 / HZ); } extern void __init calibrate_delay(void); +extern ATTRIB_NORET void cpu_idle(void); /* * First C code run on the secondary CPUs after being started up by @@ -101,7 +83,7 @@ extern void __init calibrate_delay(void); */ asmlinkage void start_secondary(void) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu; cpu_probe(); cpu_report(); @@ -114,6 +96,8 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); + preempt_disable(); + cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); @@ -123,7 +107,7 @@ asmlinkage void start_secondary(void) cpu_idle(); } -spinlock_t smp_call_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(smp_call_lock); struct call_data_struct *call_data; @@ -139,7 +123,19 @@ struct call_data_struct *call_data; * or are or have executed. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. + * hardware interrupt handler or from a bottom half handler: + * + * CPU A CPU B + * Disable interrupts + * smp_call_function() + * Take call_lock + * Send IPIs + * Wait for all cpus to acknowledge IPI + * CPU A has not responded, spin waiting + * for cpu A to respond, holding call_lock + * smp_call_function() + * Spin waiting for call_lock + * Deadlock Deadlock */ int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) @@ -148,6 +144,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, int i, cpus = num_online_cpus() - 1; int cpu = smp_processor_id(); + /* + * Can die spectacularly if this CPU isn't yet marked online + */ + BUG_ON(!cpu_online(cpu)); + if (!cpus) return 0; @@ -232,11 +233,10 @@ void __init smp_cpus_done(unsigned int max_cpus) /* called from main before smp_init() */ void __init smp_prepare_cpus(unsigned int max_cpus) { - cpu_data[0].udelay_val = loops_per_jiffy; init_new_context(current, &init_mm); current_thread_info()->cpu = 0; smp_tune_scheduling(); - prom_prepare_cpus(max_cpus); + plat_prepare_cpus(max_cpus); } /* preload SMP state for boot cpu */ @@ -254,23 +254,28 @@ void __devinit smp_prepare_boot_cpu(void) } /* - * Startup the CPU with this logical number + * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu + * and keep control until "cpu_online(cpu)" is set. Note: cpu is + * physical, not logical. */ -static int __init do_boot_cpu(int cpu) +int __devinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* + * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ idle = fork_idle(cpu); if (IS_ERR(idle)) - panic("failed fork for CPU %d\n", cpu); + panic(KERN_ERR "Fork failed for CPU %d", cpu); prom_boot_secondary(cpu, idle); - /* XXXKW timeout */ + /* + * Trust is futile. We should really have timeouts ... + */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); @@ -279,23 +284,6 @@ static int __init do_boot_cpu(int cpu) return 0; } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -int __devinit __cpu_up(unsigned int cpu) -{ - int ret; - - /* Processor goes to start_secondary(), sets online flag */ - ret = do_boot_cpu(cpu); - if (ret < 0) - return ret; - - return 0; -} - /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { @@ -437,6 +425,25 @@ void flush_tlb_one(unsigned long vaddr) local_flush_tlb_one(vaddr); } +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int cpu; + int ret; + + for_each_cpu(cpu) { + ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " + "failed (%d)\n", cpu, ret); + } + + return 0; +} + +subsys_initcall(topology_init); + EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); EXPORT_SYMBOL(cpu_data);