2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/config.h>
22 #include <linux/cache.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
34 #include <asm/atomic.h>
36 #include <asm/processor.h>
37 #include <asm/system.h>
38 #include <asm/hardirq.h>
39 #include <asm/mmu_context.h>
42 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
43 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
44 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
45 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
46 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
48 EXPORT_SYMBOL(cpu_online_map);
50 cycles_t cacheflush_time;
51 unsigned long cache_decay_ticks;
53 static void smp_tune_scheduling (void)
55 struct cache_desc *cd = ¤t_cpu_data.scache;
56 unsigned long cachesize; /* kB */
57 unsigned long bandwidth = 350; /* MB/s */
58 unsigned long cpu_khz;
61 * Crude estimate until we actually meassure ...
63 cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
66 * Rough estimation for SMP scheduling, this is the number of
67 * cycles it takes for a fully memory-limited process to flush
68 * the SMP-local cache.
70 * (For a P5 this pretty much means we will choose another idle
71 * CPU almost always at wakeup time (this is due to the small
72 * L1 cache), on PIIs it's around 50-100 usecs, depending on
77 * This basically disables processor-affinity scheduling on SMP
78 * without a cycle counter. Currently all SMP capable MIPS
79 * processors have a cycle counter.
85 cachesize = cd->linesz * cd->sets * cd->ways;
86 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
87 cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
89 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
90 (long)cacheflush_time/(cpu_khz/1000),
91 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
92 printk("task migration cache decay timeout: %ld msecs.\n",
93 (cache_decay_ticks + 1) * 1000 / HZ);
96 extern void __init calibrate_delay(void);
99 * First C code run on the secondary CPUs after being started up by
102 asmlinkage void start_secondary(void)
104 unsigned int cpu = smp_processor_id();
109 prom_init_secondary();
112 * XXX parity protection should be folded in here when it's converted
113 * to an option instead of something based on .cputype
117 cpu_data[cpu].udelay_val = loops_per_jiffy;
121 cpu_set(cpu, cpu_callin_map);
126 spinlock_t smp_call_lock = SPIN_LOCK_UNLOCKED;
128 struct call_data_struct *call_data;
131 * Run a function on all other CPUs.
132 * <func> The function to run. This must be fast and non-blocking.
133 * <info> An arbitrary pointer to pass to the function.
134 * <retry> If true, keep retrying until ready.
135 * <wait> If true, wait until function has completed on other CPUs.
136 * [RETURNS] 0 on success, else a negative status code.
138 * Does not return until remote CPUs are nearly ready to execute <func>
139 * or are or have executed.
141 * You must not call this function with disabled interrupts or from a
142 * hardware interrupt handler or from a bottom half handler.
144 int smp_call_function (void (*func) (void *info), void *info, int retry,
147 struct call_data_struct data;
148 int i, cpus = num_online_cpus() - 1;
149 int cpu = smp_processor_id();
156 atomic_set(&data.started, 0);
159 atomic_set(&data.finished, 0);
161 spin_lock(&smp_call_lock);
165 /* Send a message to all other CPUs and wait for them to respond */
166 for (i = 0; i < NR_CPUS; i++)
167 if (cpu_online(i) && i != cpu)
168 core_send_ipi(i, SMP_CALL_FUNCTION);
170 /* Wait for response */
171 /* FIXME: lock-up detection, backtrace on lock-up */
172 while (atomic_read(&data.started) != cpus)
176 while (atomic_read(&data.finished) != cpus)
178 spin_unlock(&smp_call_lock);
183 void smp_call_function_interrupt(void)
185 void (*func) (void *info) = call_data->func;
186 void *info = call_data->info;
187 int wait = call_data->wait;
190 * Notify initiating CPU that I've grabbed the data and am
191 * about to execute the function.
194 atomic_inc(&call_data->started);
197 * At this point the info structure may be out of scope unless wait==1.
205 atomic_inc(&call_data->finished);
209 static void stop_this_cpu(void *dummy)
214 cpu_clear(smp_processor_id(), cpu_online_map);
215 local_irq_enable(); /* May need to service _machine_restart IPI */
216 for (;;); /* Wait if available. */
219 void smp_send_stop(void)
221 smp_call_function(stop_this_cpu, NULL, 1, 0);
224 void __init smp_cpus_done(unsigned int max_cpus)
229 /* called from main before smp_init() */
230 void __init smp_prepare_cpus(unsigned int max_cpus)
232 cpu_data[0].udelay_val = loops_per_jiffy;
233 init_new_context(current, &init_mm);
234 current_thread_info()->cpu = 0;
235 smp_tune_scheduling();
236 prom_build_cpu_map();
237 prom_prepare_cpus(max_cpus);
240 /* preload SMP state for boot cpu */
241 void __devinit smp_prepare_boot_cpu(void)
244 * This assumes that bootup is always handled by the processor
245 * with the logic and physical number 0.
247 __cpu_number_map[0] = 0;
248 __cpu_logical_map[0] = 0;
249 cpu_set(0, phys_cpu_present_map);
250 cpu_set(0, cpu_online_map);
251 cpu_set(0, cpu_callin_map);
254 static struct task_struct * __init fork_by_hand(void)
258 * don't care about the eip and regs settings since
259 * we'll never reschedule the forked task.
261 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
265 * Startup the CPU with this logical number
267 static int __init do_boot_cpu(int cpu)
269 struct task_struct *idle;
272 * The following code is purely to make sure
273 * Linux can schedule processes on this slave.
275 idle = fork_by_hand();
277 panic("failed fork for CPU %d\n", cpu);
279 wake_up_forked_process(idle);
282 * We remove it from the pidhash and the runqueue once we've
285 init_idle(idle, cpu);
287 unhash_process(idle);
289 prom_boot_secondary(cpu, idle);
292 while (!cpu_isset(cpu, cpu_callin_map))
295 cpu_set(cpu, cpu_online_map);
301 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
302 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
303 * physical, not logical.
305 int __devinit __cpu_up(unsigned int cpu)
309 /* Processor goes to start_secondary(), sets online flag */
310 ret = do_boot_cpu(cpu);
317 /* Not really SMP stuff ... */
318 int setup_profiling_timer(unsigned int multiplier)
323 static void flush_tlb_all_ipi(void *info)
325 local_flush_tlb_all();
328 void flush_tlb_all(void)
330 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
333 static void flush_tlb_mm_ipi(void *mm)
335 local_flush_tlb_mm((struct mm_struct *)mm);
339 * The following tlb flush calls are invoked when old translations are
340 * being torn down, or pte attributes are changing. For single threaded
341 * address spaces, a new context is obtained on the current cpu, and tlb
342 * context on other cpus are invalidated to force a new context allocation
343 * at switch_mm time, should the mm ever be used on other cpus. For
344 * multithreaded address spaces, intercpu interrupts have to be sent.
345 * Another case where intercpu interrupts are required is when the target
346 * mm might be active on another cpu (eg debuggers doing the flushes on
347 * behalf of debugees, kswapd stealing pages from another process etc).
351 void flush_tlb_mm(struct mm_struct *mm)
355 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
356 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
359 for (i = 0; i < num_online_cpus(); i++)
360 if (smp_processor_id() != i)
361 cpu_context(i, mm) = 0;
363 local_flush_tlb_mm(mm);
368 struct flush_tlb_data {
369 struct vm_area_struct *vma;
374 static void flush_tlb_range_ipi(void *info)
376 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
378 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
381 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
383 struct mm_struct *mm = vma->vm_mm;
386 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
387 struct flush_tlb_data fd;
392 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
395 for (i = 0; i < num_online_cpus(); i++)
396 if (smp_processor_id() != i)
397 cpu_context(i, mm) = 0;
399 local_flush_tlb_range(vma, start, end);
403 static void flush_tlb_kernel_range_ipi(void *info)
405 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
407 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
410 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
412 struct flush_tlb_data fd;
416 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
419 static void flush_tlb_page_ipi(void *info)
421 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
423 local_flush_tlb_page(fd->vma, fd->addr1);
426 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
429 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
430 struct flush_tlb_data fd;
434 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
437 for (i = 0; i < num_online_cpus(); i++)
438 if (smp_processor_id() != i)
439 cpu_context(i, vma->vm_mm) = 0;
441 local_flush_tlb_page(vma, page);
445 static void flush_tlb_one_ipi(void *info)
447 unsigned long vaddr = (unsigned long) info;
449 local_flush_tlb_one(vaddr);
452 void flush_tlb_one(unsigned long vaddr)
454 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
455 local_flush_tlb_one(vaddr);
458 EXPORT_SYMBOL(flush_tlb_page);
459 EXPORT_SYMBOL(flush_tlb_one);
460 EXPORT_SYMBOL(cpu_data);
461 EXPORT_SYMBOL(synchronize_irq);