2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include "linux/config.h"
10 #include "linux/sched.h"
11 #include "linux/module.h"
12 #include "linux/threads.h"
13 #include "linux/interrupt.h"
14 #include "linux/err.h"
16 #include "asm/processor.h"
17 #include "asm/spinlock.h"
18 #include "asm/hardirq.h"
19 #include "user_util.h"
20 #include "kern_util.h"
25 /* CPU online map, set by smp_boot_cpus */
26 unsigned long cpu_online_map = cpumask_of_cpu(0);
28 EXPORT_SYMBOL(cpu_online_map);
30 /* Per CPU bogomips and other parameters
31 * The only piece used here is the ipi pipe, which is set before SMP is
32 * started and never changed.
34 struct cpuinfo_um cpu_data[NR_CPUS];
36 spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED;
38 atomic_t global_bh_count;
41 unsigned char global_irq_holder = NO_PROC_ID;
42 unsigned volatile long global_irq_lock;
44 /* Set when the idlers are all forked */
45 int smp_threads_ready = 0;
47 /* A statistic, can be a little off */
48 int num_reschedules_sent = 0;
50 /* Small, random number, never changed */
51 unsigned long cache_decay_ticks = 5;
53 /* Not changed after boot */
54 struct task_struct *idle_threads[NR_CPUS];
56 void smp_send_reschedule(int cpu)
58 write(cpu_data[cpu].ipi_pipe[1], "R", 1);
59 num_reschedules_sent++;
62 static void show(char * str)
64 int cpu = smp_processor_id();
66 printk(KERN_INFO "\n%s, CPU %d:\n", str, cpu);
69 #define MAXCOUNT 100000000
71 static inline void wait_on_bh(void)
79 /* nothing .. wait for the other bh's to go away */
80 } while (atomic_read(&global_bh_count) != 0);
84 * This is called when we want to synchronize with
85 * bottom half handlers. We need to wait until
86 * no other CPU is executing any bottom half handler.
88 * Don't wait if we're already running in an interrupt
89 * context or are inside a bh handler.
91 void synchronize_bh(void)
93 if (atomic_read(&global_bh_count) && !in_interrupt())
97 void smp_send_stop(void)
101 printk(KERN_INFO "Stopping all CPUs...");
102 for(i = 0; i < num_online_cpus(); i++){
103 if(i == current->thread_info->cpu)
105 write(cpu_data[i].ipi_pipe[1], "S", 1);
110 static cpumask_t smp_commenced_mask;
111 static cpumask_t smp_callin_map = CPU_MASK_NONE;
113 static int idle_proc(void *cpup)
115 int cpu = (int) cpup, err;
117 err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1);
119 panic("CPU#%d failed to create IPI pipe, errno = %d", cpu,
122 activate_ipi(cpu_data[cpu].ipi_pipe[0],
123 current->thread.mode.tt.extern_pid);
126 if (cpu_test_and_set(cpu, &smp_callin_map)) {
127 printk("huh, CPU#%d already present??\n", cpu);
131 while (!cpu_isset(cpu, &smp_commenced_mask))
134 cpu_set(cpu, cpu_online_map);
139 static struct task_struct *idle_thread(int cpu)
141 struct task_struct *new_task;
144 current->thread.request.u.thread.proc = idle_proc;
145 current->thread.request.u.thread.arg = (void *) cpu;
146 new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL, NULL);
147 if(IS_ERR(new_task)) panic("do_fork failed in idle_thread");
149 cpu_tasks[cpu] = ((struct cpu_task)
150 { .pid = new_task->thread.mode.tt.extern_pid,
151 .task = new_task } );
152 idle_threads[cpu] = new_task;
153 CHOOSE_MODE(write(new_task->thread.mode.tt.switch_pipe[1], &c,
155 ({ panic("skas mode doesn't support SMP"); }));
159 void smp_prepare_cpus(unsigned int maxcpus)
161 struct task_struct *idle;
162 unsigned long waittime;
165 cpu_set(0, cpu_online_map);
166 cpu_set(0, smp_callin_map);
168 err = os_pipe(cpu_data[0].ipi_pipe, 1, 1);
169 if(err) panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
171 activate_ipi(cpu_data[0].ipi_pipe[0],
172 current->thread.mode.tt.extern_pid);
174 for(cpu = 1; cpu < ncpus; cpu++){
175 printk("Booting processor %d...\n", cpu);
177 idle = idle_thread(cpu);
179 init_idle(idle, cpu);
180 unhash_process(idle);
182 waittime = 200000000;
183 while (waittime-- && !cpu_isset(cpu, smp_callin_map))
186 if (cpu_isset(cpu, smp_callin_map))
188 else printk("failed\n");
192 void smp_prepare_boot_cpu(void)
194 cpu_set(smp_processor_id(), cpu_online_map);
197 int __cpu_up(unsigned int cpu)
199 cpu_set(cpu, smp_commenced_mask);
200 while (!cpu_isset(cpu, cpu_online_map))
205 int setup_profiling_timer(unsigned int multiplier)
207 printk(KERN_INFO "setup_profiling_timer\n");
211 void smp_call_function_slave(int cpu);
213 void IPI_handler(int cpu)
218 fd = cpu_data[cpu].ipi_pipe[0];
219 while (read(fd, &c, 1) == 1) {
222 smp_call_function_slave(cpu);
226 set_tsk_need_resched(current);
230 printk("CPU#%d stopping\n", cpu);
236 printk("CPU#%d received unknown IPI [%c]!\n", cpu, c);
242 int hard_smp_processor_id(void)
244 return(pid_to_processor_id(os_getpid()));
247 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
248 static atomic_t scf_started;
249 static atomic_t scf_finished;
250 static void (*func)(void *info);
253 void smp_call_function_slave(int cpu)
255 atomic_inc(&scf_started);
257 atomic_inc(&scf_finished);
260 int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic,
263 int cpus = num_online_cpus() - 1;
269 /* Can deadlock when called with interrupts disabled */
270 WARN_ON(irqs_disabled());
272 spin_lock_bh(&call_lock);
273 atomic_set(&scf_started, 0);
274 atomic_set(&scf_finished, 0);
278 for (i=0;i<NR_CPUS;i++)
279 if((i != current->thread_info->cpu) &&
280 cpu_isset(i, cpu_online_map))
281 write(cpu_data[i].ipi_pipe[1], "C", 1);
283 while (atomic_read(&scf_started) != cpus)
287 while (atomic_read(&scf_finished) != cpus)
290 spin_unlock_bh(&call_lock);
297 * Overrides for Emacs so that we follow Linus's tabbing style.
298 * Emacs will notice this stuff at the end of the file and automatically
299 * adjust the settings for this buffer only. This must remain at the end
301 * ---------------------------------------------------------------------------
303 * c-file-style: "linux"