X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fmips%2Fkernel%2Fsmp.c;h=0555fc554f6544619e77b7d102ec486d56dbae67;hb=refs%2Fheads%2Fvserver;hp=20245b7fdfd08da7413165a97ac9204a377d908f;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 20245b7fd..0555fc554 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,10 @@ #include #include +#ifdef CONFIG_MIPS_MT_SMTC +#include +#endif /* CONFIG_MIPS_MT_SMTC */ + cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ @@ -46,14 +51,10 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ EXPORT_SYMBOL(phys_cpu_present_map); EXPORT_SYMBOL(cpu_online_map); -cycles_t cacheflush_time; -unsigned long cache_decay_ticks; - static void smp_tune_scheduling (void) { struct cache_desc *cd = ¤t_cpu_data.scache; unsigned long cachesize; /* kB */ - unsigned long bandwidth = 350; /* MB/s */ unsigned long cpu_khz; /* @@ -71,25 +72,10 @@ static void smp_tune_scheduling (void) * L1 cache), on PIIs it's around 50-100 usecs, depending on * the cache size) */ - if (!cpu_khz) { - /* - * This basically disables processor-affinity scheduling on SMP - * without a cycle counter. Currently all SMP capable MIPS - * processors have a cycle counter. - */ - cacheflush_time = 0; + if (!cpu_khz) return; - } cachesize = cd->linesz * cd->sets * cd->ways; - cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth; - cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000; - - printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n", - (long)cacheflush_time/(cpu_khz/1000), - ((long)cacheflush_time*100/(cpu_khz/1000)) % 100); - printk("task migration cache decay timeout: %ld msecs.\n", - (cache_decay_ticks + 1) * 1000 / HZ); } extern void __init calibrate_delay(void); @@ -101,8 +87,12 @@ extern ATTRIB_NORET void cpu_idle(void); */ asmlinkage void start_secondary(void) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu; +#ifdef CONFIG_MIPS_MT_SMTC + /* Only do cpu_probe for first TC of CPU */ + if ((read_c0_tcbind() & TCBIND_CURTC) == 0) +#endif /* CONFIG_MIPS_MT_SMTC */ cpu_probe(); cpu_report(); per_cpu_trap_init(); @@ -114,6 +104,8 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); + preempt_disable(); + cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); @@ -139,7 +131,19 @@ struct call_data_struct *call_data; * or are or have executed. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. + * hardware interrupt handler or from a bottom half handler: + * + * CPU A CPU B + * Disable interrupts + * smp_call_function() + * Take call_lock + * Send IPIs + * Wait for all cpus to acknowledge IPI + * CPU A has not responded, spin waiting + * for cpu A to respond, holding call_lock + * smp_call_function() + * Spin waiting for call_lock + * Deadlock Deadlock */ int smp_call_function (void (*func) (void *info), void *info, int retry, int wait) @@ -148,6 +152,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, int i, cpus = num_online_cpus() - 1; int cpu = smp_processor_id(); + /* + * Can die spectacularly if this CPU isn't yet marked online + */ + BUG_ON(!cpu_online(cpu)); + if (!cpus) return 0; @@ -163,11 +172,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, spin_lock(&smp_call_lock); call_data = &data; - mb(); + smp_mb(); /* Send a message to all other CPUs and wait for them to respond */ - for (i = 0; i < NR_CPUS; i++) - if (cpu_online(i) && i != cpu) + for_each_online_cpu(i) + if (i != cpu) core_send_ipi(i, SMP_CALL_FUNCTION); /* Wait for response */ @@ -178,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry, if (wait) while (atomic_read(&data.finished) != cpus) barrier(); + call_data = NULL; spin_unlock(&smp_call_lock); return 0; } + void smp_call_function_interrupt(void) { void (*func) (void *info) = call_data->func; @@ -193,7 +204,7 @@ void smp_call_function_interrupt(void) * Notify initiating CPU that I've grabbed the data and am * about to execute the function. */ - mb(); + smp_mb(); atomic_inc(&call_data->started); /* @@ -204,7 +215,7 @@ void smp_call_function_interrupt(void) irq_exit(); if (wait) { - mb(); + smp_mb(); atomic_inc(&call_data->finished); } } @@ -232,11 +243,13 @@ void __init smp_cpus_done(unsigned int max_cpus) /* called from main before smp_init() */ void __init smp_prepare_cpus(unsigned int max_cpus) { - cpu_data[0].udelay_val = loops_per_jiffy; init_new_context(current, &init_mm); current_thread_info()->cpu = 0; smp_tune_scheduling(); - prom_prepare_cpus(max_cpus); + plat_prepare_cpus(max_cpus); +#ifndef CONFIG_HOTPLUG_CPU + cpu_present_map = cpu_possible_map; +#endif } /* preload SMP state for boot cpu */ @@ -254,23 +267,28 @@ void __devinit smp_prepare_boot_cpu(void) } /* - * Startup the CPU with this logical number + * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu + * and keep control until "cpu_online(cpu)" is set. Note: cpu is + * physical, not logical. */ -static int __init do_boot_cpu(int cpu) +int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* + * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ idle = fork_idle(cpu); if (IS_ERR(idle)) - panic("failed fork for CPU %d\n", cpu); + panic(KERN_ERR "Fork failed for CPU %d", cpu); prom_boot_secondary(cpu, idle); - /* XXXKW timeout */ + /* + * Trust is futile. We should really have timeouts ... + */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); @@ -279,23 +297,6 @@ static int __init do_boot_cpu(int cpu) return 0; } -/* - * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu - * and keep control until "cpu_online(cpu)" is set. Note: cpu is - * physical, not logical. - */ -int __devinit __cpu_up(unsigned int cpu) -{ - int ret; - - /* Processor goes to start_secondary(), sets online flag */ - ret = do_boot_cpu(cpu); - if (ret < 0) - return ret; - - return 0; -} - /* Not really SMP stuff ... */ int setup_profiling_timer(unsigned int multiplier) { @@ -309,7 +310,7 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { - on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); + on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -317,6 +318,32 @@ static void flush_tlb_mm_ipi(void *mm) local_flush_tlb_mm((struct mm_struct *)mm); } +/* + * Special Variant of smp_call_function for use by TLB functions: + * + * o No return value + * o collapses to normal function call on UP kernels + * o collapses to normal function call on systems with a single shared + * primary cache. + * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. + */ +static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) +{ +#ifndef CONFIG_MIPS_MT_SMTC + smp_call_function(func, info, 1, 1); +#endif +} + +static inline void smp_on_each_tlb(void (*func) (void *info), void *info) +{ + preempt_disable(); + + smp_on_other_tlbs(func, info); + func(info); + + preempt_enable(); +} + /* * The following tlb flush calls are invoked when old translations are * being torn down, or pte attributes are changing. For single threaded @@ -335,7 +362,7 @@ void flush_tlb_mm(struct mm_struct *mm) preempt_disable(); if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { - smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); + smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -371,7 +398,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l fd.vma = vma; fd.addr1 = start; fd.addr2 = end; - smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); + smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -413,7 +440,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) fd.vma = vma; fd.addr1 = page; - smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); + smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); } else { int i; for (i = 0; i < num_online_cpus(); i++) @@ -433,11 +460,8 @@ static void flush_tlb_one_ipi(void *info) void flush_tlb_one(unsigned long vaddr) { - smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1); - local_flush_tlb_one(vaddr); + smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); } EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); -EXPORT_SYMBOL(cpu_data); -EXPORT_SYMBOL(synchronize_irq);