2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
33 #include <asm/atomic.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/mmu_context.h>
40 cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
41 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
42 cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
43 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
44 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
46 EXPORT_SYMBOL(phys_cpu_present_map);
47 EXPORT_SYMBOL(cpu_online_map);
49 cycles_t cacheflush_time;
50 unsigned long cache_decay_ticks;
52 static void smp_tune_scheduling (void)
54 struct cache_desc *cd = ¤t_cpu_data.scache;
55 unsigned long cachesize; /* kB */
56 unsigned long bandwidth = 350; /* MB/s */
57 unsigned long cpu_khz;
60 * Crude estimate until we actually meassure ...
62 cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
65 * Rough estimation for SMP scheduling, this is the number of
66 * cycles it takes for a fully memory-limited process to flush
67 * the SMP-local cache.
69 * (For a P5 this pretty much means we will choose another idle
70 * CPU almost always at wakeup time (this is due to the small
71 * L1 cache), on PIIs it's around 50-100 usecs, depending on
76 * This basically disables processor-affinity scheduling on SMP
77 * without a cycle counter. Currently all SMP capable MIPS
78 * processors have a cycle counter.
84 cachesize = cd->linesz * cd->sets * cd->ways;
85 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
86 cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
88 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
89 (long)cacheflush_time/(cpu_khz/1000),
90 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
91 printk("task migration cache decay timeout: %ld msecs.\n",
92 (cache_decay_ticks + 1) * 1000 / HZ);
95 extern void __init calibrate_delay(void);
96 extern ATTRIB_NORET void cpu_idle(void);
99 * First C code run on the secondary CPUs after being started up by
102 asmlinkage void start_secondary(void)
104 unsigned int cpu = smp_processor_id();
109 prom_init_secondary();
112 * XXX parity protection should be folded in here when it's converted
113 * to an option instead of something based on .cputype
117 cpu_data[cpu].udelay_val = loops_per_jiffy;
121 cpu_set(cpu, cpu_callin_map);
126 DEFINE_SPINLOCK(smp_call_lock);
128 struct call_data_struct *call_data;
131 * Run a function on all other CPUs.
132 * <func> The function to run. This must be fast and non-blocking.
133 * <info> An arbitrary pointer to pass to the function.
134 * <retry> If true, keep retrying until ready.
135 * <wait> If true, wait until function has completed on other CPUs.
136 * [RETURNS] 0 on success, else a negative status code.
138 * Does not return until remote CPUs are nearly ready to execute <func>
139 * or are or have executed.
141 * You must not call this function with disabled interrupts or from a
142 * hardware interrupt handler or from a bottom half handler.
144 int smp_call_function (void (*func) (void *info), void *info, int retry,
147 struct call_data_struct data;
148 int i, cpus = num_online_cpus() - 1;
149 int cpu = smp_processor_id();
154 /* Can deadlock when called with interrupts disabled */
155 WARN_ON(irqs_disabled());
159 atomic_set(&data.started, 0);
162 atomic_set(&data.finished, 0);
164 spin_lock(&smp_call_lock);
168 /* Send a message to all other CPUs and wait for them to respond */
169 for (i = 0; i < NR_CPUS; i++)
170 if (cpu_online(i) && i != cpu)
171 core_send_ipi(i, SMP_CALL_FUNCTION);
173 /* Wait for response */
174 /* FIXME: lock-up detection, backtrace on lock-up */
175 while (atomic_read(&data.started) != cpus)
179 while (atomic_read(&data.finished) != cpus)
181 spin_unlock(&smp_call_lock);
186 void smp_call_function_interrupt(void)
188 void (*func) (void *info) = call_data->func;
189 void *info = call_data->info;
190 int wait = call_data->wait;
193 * Notify initiating CPU that I've grabbed the data and am
194 * about to execute the function.
197 atomic_inc(&call_data->started);
200 * At this point the info structure may be out of scope unless wait==1.
208 atomic_inc(&call_data->finished);
212 static void stop_this_cpu(void *dummy)
217 cpu_clear(smp_processor_id(), cpu_online_map);
218 local_irq_enable(); /* May need to service _machine_restart IPI */
219 for (;;); /* Wait if available. */
222 void smp_send_stop(void)
224 smp_call_function(stop_this_cpu, NULL, 1, 0);
227 void __init smp_cpus_done(unsigned int max_cpus)
232 /* called from main before smp_init() */
233 void __init smp_prepare_cpus(unsigned int max_cpus)
235 cpu_data[0].udelay_val = loops_per_jiffy;
236 init_new_context(current, &init_mm);
237 current_thread_info()->cpu = 0;
238 smp_tune_scheduling();
239 prom_prepare_cpus(max_cpus);
242 /* preload SMP state for boot cpu */
243 void __devinit smp_prepare_boot_cpu(void)
246 * This assumes that bootup is always handled by the processor
247 * with the logic and physical number 0.
249 __cpu_number_map[0] = 0;
250 __cpu_logical_map[0] = 0;
251 cpu_set(0, phys_cpu_present_map);
252 cpu_set(0, cpu_online_map);
253 cpu_set(0, cpu_callin_map);
257 * Startup the CPU with this logical number
259 static int __init do_boot_cpu(int cpu)
261 struct task_struct *idle;
264 * The following code is purely to make sure
265 * Linux can schedule processes on this slave.
267 idle = fork_idle(cpu);
269 panic("failed fork for CPU %d\n", cpu);
271 prom_boot_secondary(cpu, idle);
274 while (!cpu_isset(cpu, cpu_callin_map))
277 cpu_set(cpu, cpu_online_map);
283 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
284 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
285 * physical, not logical.
287 int __devinit __cpu_up(unsigned int cpu)
291 /* Processor goes to start_secondary(), sets online flag */
292 ret = do_boot_cpu(cpu);
299 /* Not really SMP stuff ... */
300 int setup_profiling_timer(unsigned int multiplier)
305 static void flush_tlb_all_ipi(void *info)
307 local_flush_tlb_all();
310 void flush_tlb_all(void)
312 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
315 static void flush_tlb_mm_ipi(void *mm)
317 local_flush_tlb_mm((struct mm_struct *)mm);
321 * The following tlb flush calls are invoked when old translations are
322 * being torn down, or pte attributes are changing. For single threaded
323 * address spaces, a new context is obtained on the current cpu, and tlb
324 * context on other cpus are invalidated to force a new context allocation
325 * at switch_mm time, should the mm ever be used on other cpus. For
326 * multithreaded address spaces, intercpu interrupts have to be sent.
327 * Another case where intercpu interrupts are required is when the target
328 * mm might be active on another cpu (eg debuggers doing the flushes on
329 * behalf of debugees, kswapd stealing pages from another process etc).
333 void flush_tlb_mm(struct mm_struct *mm)
337 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
338 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
341 for (i = 0; i < num_online_cpus(); i++)
342 if (smp_processor_id() != i)
343 cpu_context(i, mm) = 0;
345 local_flush_tlb_mm(mm);
350 struct flush_tlb_data {
351 struct vm_area_struct *vma;
356 static void flush_tlb_range_ipi(void *info)
358 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
360 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
363 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
365 struct mm_struct *mm = vma->vm_mm;
368 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
369 struct flush_tlb_data fd;
374 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
377 for (i = 0; i < num_online_cpus(); i++)
378 if (smp_processor_id() != i)
379 cpu_context(i, mm) = 0;
381 local_flush_tlb_range(vma, start, end);
385 static void flush_tlb_kernel_range_ipi(void *info)
387 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
389 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
392 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
394 struct flush_tlb_data fd;
398 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
401 static void flush_tlb_page_ipi(void *info)
403 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
405 local_flush_tlb_page(fd->vma, fd->addr1);
408 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
411 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
412 struct flush_tlb_data fd;
416 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
419 for (i = 0; i < num_online_cpus(); i++)
420 if (smp_processor_id() != i)
421 cpu_context(i, vma->vm_mm) = 0;
423 local_flush_tlb_page(vma, page);
427 static void flush_tlb_one_ipi(void *info)
429 unsigned long vaddr = (unsigned long) info;
431 local_flush_tlb_one(vaddr);
434 void flush_tlb_one(unsigned long vaddr)
436 smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
437 local_flush_tlb_one(vaddr);
440 EXPORT_SYMBOL(flush_tlb_page);
441 EXPORT_SYMBOL(flush_tlb_one);
442 EXPORT_SYMBOL(cpu_data);
443 EXPORT_SYMBOL(synchronize_irq);