X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fum%2Fkernel%2Fsmp.c;h=b5e66b291cc32f1baa23eb29d26821258e5665c0;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=b8158556c4a57516a1b726971a62df6fcaa8dc8b;hpb=a2c21200f1c81b08cb55e417b68150bba439b646;p=linux-2.6.git diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index b8158556c..b5e66b291 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c @@ -1,9 +1,15 @@ /* - * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com) * Licensed under the GPL */ #include "linux/config.h" +#include "linux/percpu.h" +#include "asm/pgalloc.h" +#include "asm/tlb.h" + +/* For some reason, mmu_gathers are referenced when CONFIG_SMP is off. */ +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); #ifdef CONFIG_SMP @@ -12,10 +18,10 @@ #include "linux/threads.h" #include "linux/interrupt.h" #include "linux/err.h" +#include "linux/hardirq.h" #include "asm/smp.h" #include "asm/processor.h" #include "asm/spinlock.h" -#include "asm/hardirq.h" #include "user_util.h" #include "kern_util.h" #include "kern.h" @@ -23,9 +29,11 @@ #include "os.h" /* CPU online map, set by smp_boot_cpus */ -unsigned long cpu_online_map = cpumask_of_cpu(0); +cpumask_t cpu_online_map = CPU_MASK_NONE; +cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_online_map); +EXPORT_SYMBOL(cpu_possible_map); /* Per CPU bogomips and other parameters * The only piece used here is the ipi pipe, which is set before SMP is @@ -33,14 +41,6 @@ EXPORT_SYMBOL(cpu_online_map); */ struct cpuinfo_um cpu_data[NR_CPUS]; -spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED; - -atomic_t global_bh_count; - -/* Not used by UML */ -unsigned char global_irq_holder = NO_PROC_ID; -unsigned volatile long global_irq_lock; - /* Set when the idlers are all forked */ int smp_threads_ready = 0; @@ -55,80 +55,44 @@ struct task_struct *idle_threads[NR_CPUS]; void smp_send_reschedule(int cpu) { - write(cpu_data[cpu].ipi_pipe[1], "R", 1); + os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1); num_reschedules_sent++; } -static void show(char * str) -{ - int cpu = smp_processor_id(); - - printk(KERN_INFO "\n%s, CPU %d:\n", str, cpu); -} - -#define MAXCOUNT 100000000 - -static inline void wait_on_bh(void) -{ - int count = MAXCOUNT; - do { - if (!--count) { - show("wait_on_bh"); - count = ~0; - } - /* nothing .. wait for the other bh's to go away */ - } while (atomic_read(&global_bh_count) != 0); -} - -/* - * This is called when we want to synchronize with - * bottom half handlers. We need to wait until - * no other CPU is executing any bottom half handler. - * - * Don't wait if we're already running in an interrupt - * context or are inside a bh handler. - */ -void synchronize_bh(void) -{ - if (atomic_read(&global_bh_count) && !in_interrupt()) - wait_on_bh(); -} - void smp_send_stop(void) { int i; printk(KERN_INFO "Stopping all CPUs..."); for(i = 0; i < num_online_cpus(); i++){ - if(i == current->thread_info->cpu) + if(i == current_thread->cpu) continue; - write(cpu_data[i].ipi_pipe[1], "S", 1); + os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); } printk("done\n"); } -static cpumask_t smp_commenced_mask; -static cpumask_t smp_callin_map = CPU_MASK_NONE; +static cpumask_t smp_commenced_mask = CPU_MASK_NONE; +static cpumask_t cpu_callin_map = CPU_MASK_NONE; static int idle_proc(void *cpup) { int cpu = (int) cpup, err; err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1); - if(err) - panic("CPU#%d failed to create IPI pipe, errno = %d", cpu, - -err); + if(err < 0) + panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); activate_ipi(cpu_data[cpu].ipi_pipe[0], current->thread.mode.tt.extern_pid); wmb(); - if (cpu_test_and_set(cpu, &smp_callin_map)) { + if (cpu_test_and_set(cpu, cpu_callin_map)) { printk("huh, CPU#%d already present??\n", cpu); BUG(); } - while (!cpu_isset(cpu, &smp_commenced_mask)) + while (!cpu_isset(cpu, smp_commenced_mask)) cpu_relax(); cpu_set(cpu, cpu_online_map); @@ -143,14 +107,16 @@ static struct task_struct *idle_thread(int cpu) current->thread.request.u.thread.proc = idle_proc; current->thread.request.u.thread.arg = (void *) cpu; - new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL, NULL); - if(IS_ERR(new_task)) panic("do_fork failed in idle_thread"); + new_task = fork_idle(cpu); + if(IS_ERR(new_task)) + panic("copy_process failed in idle_thread, error = %ld", + PTR_ERR(new_task)); cpu_tasks[cpu] = ((struct cpu_task) { .pid = new_task->thread.mode.tt.extern_pid, .task = new_task } ); idle_threads[cpu] = new_task; - CHOOSE_MODE(write(new_task->thread.mode.tt.switch_pipe[1], &c, + CHOOSE_MODE(os_write_file(new_task->thread.mode.tt.switch_pipe[1], &c, sizeof(c)), ({ panic("skas mode doesn't support SMP"); })); return(new_task); @@ -160,15 +126,21 @@ void smp_prepare_cpus(unsigned int maxcpus) { struct task_struct *idle; unsigned long waittime; - int err, cpu; + int err, cpu, me = smp_processor_id(); + int i; + + for (i = 0; i < ncpus; ++i) + cpu_set(i, cpu_possible_map); - cpu_set(0, cpu_online_map); - cpu_set(0, smp_callin_map); + cpu_clear(me, cpu_online_map); + cpu_set(me, cpu_online_map); + cpu_set(me, cpu_callin_map); - err = os_pipe(cpu_data[0].ipi_pipe, 1, 1); - if(err) panic("CPU#0 failed to create IPI pipe, errno = %d", -err); + err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); + if(err < 0) + panic("CPU#0 failed to create IPI pipe, errno = %d", -err); - activate_ipi(cpu_data[0].ipi_pipe[0], + activate_ipi(cpu_data[me].ipi_pipe[0], current->thread.mode.tt.extern_pid); for(cpu = 1; cpu < ncpus; cpu++){ @@ -180,10 +152,10 @@ void smp_prepare_cpus(unsigned int maxcpus) unhash_process(idle); waittime = 200000000; - while (waittime-- && !cpu_isset(cpu, smp_callin_map)) + while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) cpu_relax(); - if (cpu_isset(cpu, smp_callin_map)) + if (cpu_isset(cpu, cpu_callin_map)) printk("done\n"); else printk("failed\n"); } @@ -216,7 +188,7 @@ void IPI_handler(int cpu) int fd; fd = cpu_data[cpu].ipi_pipe[0]; - while (read(fd, &c, 1) == 1) { + while (os_read_file(fd, &c, 1) == 1) { switch (c) { case 'C': smp_call_function_slave(cpu); @@ -276,9 +248,9 @@ int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, info = _info; for (i=0;ithread_info->cpu) && + if((i != current_thread->cpu) && cpu_isset(i, cpu_online_map)) - write(cpu_data[i].ipi_pipe[1], "C", 1); + os_write_file(cpu_data[i].ipi_pipe[1], "C", 1); while (atomic_read(&scf_started) != cpus) barrier();