4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11 ** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
19 #undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
21 #include <linux/autoconf.h>
23 #include <linux/types.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/smp.h>
33 #include <linux/kernel_stat.h>
35 #include <linux/delay.h>
37 #include <asm/system.h>
38 #include <asm/atomic.h>
39 #include <asm/bitops.h>
40 #include <asm/current.h>
41 #include <asm/delay.h>
42 #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
45 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
46 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
49 #include <asm/pgalloc.h>
50 #include <asm/processor.h>
51 #include <asm/ptrace.h>
52 #include <asm/unistd.h>
53 #include <asm/cacheflush.h>
57 spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
59 spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
61 volatile struct task_struct *smp_init_current_idle_task;
63 static volatile int smp_commenced = 0; /* Set when the idlers are all forked */
64 static volatile int cpu_now_booting = 0; /* track which CPU is booting */
65 cpumask_t cpu_online_map = CPU_MASK_NONE; /* Bitmap of online CPUs */
66 #define IS_LOGGED_IN(cpunum) (cpu_isset(cpunum, cpu_online_map))
68 EXPORT_SYMBOL(cpu_online_map);
71 int smp_threads_ready = 0;
72 unsigned long cache_decay_ticks;
73 static int max_cpus = -1; /* Command line */
74 cpumask_t cpu_present_mask;
76 EXPORT_SYMBOL(cpu_present_mask);
78 struct smp_call_struct {
79 void (*func) (void *info);
82 atomic_t unstarted_count;
83 atomic_t unfinished_count;
85 static volatile struct smp_call_struct *smp_call_function_data;
87 enum ipi_message_type {
97 /********** SMP inter processor interrupt and communication routines */
99 #undef PER_CPU_IRQ_REGION
100 #ifdef PER_CPU_IRQ_REGION
101 /* XXX REVISIT Ignore for now.
102 ** *May* need this "hook" to register IPI handler
103 ** once we have perCPU ExtIntr switch tables.
109 /* If CPU is present ... */
110 #ifdef ENTRY_SYS_CPUS
111 /* *and* running (not stopped) ... */
112 #error iCOD support wants state checked here.
115 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
117 if(IS_LOGGED_IN(cpuid) )
119 switch_to_idle_task(current);
128 ** Yoink this CPU from the runnable list...
134 #ifdef ENTRY_SYS_CPUS
135 #error halt_processor() needs rework
137 ** o migrate I/O interrupts off this CPU.
138 ** o leave IPI enabled - __cli() will disable IPI.
139 ** o leave CPU in online map - just change the state
141 cpu_data[this_cpu].state = STATE_STOPPED;
144 /* REVISIT : redirect I/O Interrupts to another CPU? */
145 /* REVISIT : does PM *know* this CPU isn't available? */
146 cpu_clear(smp_processor_id(), cpu_online_map);
155 ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
157 int this_cpu = smp_processor_id();
158 struct cpuinfo_parisc *p = &cpu_data[this_cpu];
162 /* Count this now; we may make a call that never returns. */
165 mb(); /* Order interrupt and bit testing. */
168 spin_lock_irqsave(&(p->lock),flags);
169 ops = p->pending_ipi;
171 spin_unlock_irqrestore(&(p->lock),flags);
173 mb(); /* Order bit clearing and data access. */
179 unsigned long which = ffz(~ops);
184 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
186 ops &= ~(1 << IPI_RESCHEDULE);
188 * Reschedule callback. Everything to be
189 * done is done by the interrupt return path.
195 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
197 ops &= ~(1 << IPI_CALL_FUNC);
199 volatile struct smp_call_struct *data;
200 void (*func)(void *info);
204 data = smp_call_function_data;
210 atomic_dec ((atomic_t *)&data->unstarted_count);
212 /* At this point, *data can't
218 /* Notify the sending CPU that the
223 atomic_dec ((atomic_t *)&data->unfinished_count);
229 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
231 ops &= ~(1 << IPI_CPU_START);
232 #ifdef ENTRY_SYS_CPUS
233 p->state = STATE_RUNNING;
239 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
241 ops &= ~(1 << IPI_CPU_STOP);
242 #ifdef ENTRY_SYS_CPUS
250 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
252 ops &= ~(1 << IPI_CPU_TEST);
256 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
258 ops &= ~(1 << which);
268 ipi_send(int cpu, enum ipi_message_type op)
270 struct cpuinfo_parisc *p = &cpu_data[cpu];
273 spin_lock_irqsave(&(p->lock),flags);
274 p->pending_ipi |= 1 << op;
275 __raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
276 spin_unlock_irqrestore(&(p->lock),flags);
281 send_IPI_single(int dest_cpu, enum ipi_message_type op)
283 if (dest_cpu == NO_PROC_ID) {
288 ipi_send(dest_cpu, op);
292 send_IPI_allbutself(enum ipi_message_type op)
296 for (i = 0; i < smp_num_cpus; i++) {
297 if (i != smp_processor_id())
298 send_IPI_single(i, op);
303 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
306 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
309 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
313 * Run a function on all other CPUs.
314 * <func> The function to run. This must be fast and non-blocking.
315 * <info> An arbitrary pointer to pass to the function.
316 * <retry> If true, keep retrying until ready.
317 * <wait> If true, wait until function has completed on other CPUs.
318 * [RETURNS] 0 on success, else a negative status code.
320 * Does not return until remote CPUs are nearly ready to execute <func>
325 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
327 struct smp_call_struct data;
328 unsigned long timeout;
329 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
334 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
335 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
339 while (smp_call_function_data != 0)
344 if (smp_call_function_data) {
350 smp_call_function_data = &data;
353 /* Send a message to all other CPUs and wait for them to respond */
354 send_IPI_allbutself(IPI_CALL_FUNC);
356 /* Wait for response */
357 timeout = jiffies + HZ;
358 while ( (atomic_read (&data.unstarted_count) > 0) &&
359 time_before (jiffies, timeout) )
362 /* We either got one or timed out. Release the lock */
365 smp_call_function_data = NULL;
366 if (atomic_read (&data.unstarted_count) > 0) {
367 printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
372 while (wait && atomic_read (&data.unfinished_count) > 0)
378 EXPORT_SYMBOL(smp_call_function);
383 * Setup routine for controlling SMP activation
385 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
386 * activation entirely (the MPS table probe still happens, though).
388 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
389 * greater than 0, limits the maximum number of CPUs activated in
393 static int __init nosmp(char *str)
399 __setup("nosmp", nosmp);
401 static int __init maxcpus(char *str)
403 get_option(&str, &max_cpus);
407 __setup("maxcpus=", maxcpus);
410 * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
411 * as we want to ensure all TLB's flushed before proceeding.
414 extern void flush_tlb_all_local(void);
417 smp_flush_tlb_all(void)
419 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
424 smp_do_timer(struct pt_regs *regs)
426 int cpu = smp_processor_id();
427 struct cpuinfo_parisc *data = &cpu_data[cpu];
429 if (!--data->prof_counter) {
430 data->prof_counter = data->prof_multiplier;
431 update_process_times(user_mode(regs));
436 * Called by secondaries to update state and initialize CPU registers.
439 smp_cpu_init(int cpunum)
441 extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
442 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
444 /* Set modes and Enable floating point coprocessor */
445 (void) init_per_cpu(cpunum);
447 disable_sr_hashing();
451 /* Well, support 2.4 linux scheme as well. */
452 if (cpu_test_and_set(cpunum, cpu_online_map))
454 extern void machine_halt(void); /* arch/parisc.../process.c */
456 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
460 /* Initialise the idle task for this CPU */
461 atomic_inc(&init_mm.mm_count);
462 current->active_mm = &init_mm;
465 enter_lazy_tlb(&init_mm, current);
467 init_IRQ(); /* make sure no IRQ's are enabled or pending */
472 * Slaves start using C here. Indirectly called from smp_slave_stext.
473 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
475 void __init smp_callin(void)
477 extern void cpu_idle(void); /* arch/parisc/kernel/process.c */
478 int slave_id = cpu_now_booting;
483 smp_cpu_init(slave_id);
485 #if 0 /* NOT WORKING YET - see entry.S */
486 istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
487 if (istack == NULL) {
488 printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
494 flush_cache_all_local(); /* start with known state */
495 flush_tlb_all_local();
497 local_irq_enable(); /* Interrupts have been off until now */
499 /* Slaves wait here until Big Poppa daddy say "jump" */
501 while (!smp_commenced) ;
504 cpu_idle(); /* Wait for timer to schedule some work */
507 panic("smp_callin() AAAAaaaaahhhh....\n");
511 * Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
512 * because that could end up calling schedule(). If it did, the new idle
513 * task could get scheduled before we had a chance to remove it from the
516 static struct task_struct *fork_by_hand(void)
521 * don't care about the regs settings since
522 * we'll never reschedule the forked task.
524 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
529 * Bring one cpu online.
531 static int __init smp_boot_one_cpu(int cpuid, int cpunum)
533 struct task_struct *idle;
537 * Create an idle task for this CPU. Note the address wed* give
538 * to kernel_thread is irrelevant -- it's going to start
539 * where OS_BOOT_RENDEVZ vector in SAL says to start. But
540 * this gets all the other task-y sort of data structures set
541 * up like we wish. We need to pull the just created idle task
542 * off the run queue and stuff it into the init_tasks[] array.
546 idle = fork_by_hand();
548 panic("SMP: fork failed for CPU:%d", cpuid);
550 wake_up_forked_process(idle);
551 init_idle(idle, cpunum);
552 unhash_process(idle);
553 idle->thread_info->cpu = cpunum;
555 /* Let _start know what logical CPU we're booting
556 ** (offset into init_tasks[],cpu_data[])
558 cpu_now_booting = cpunum;
561 ** boot strap code needs to know the task address since
562 ** it also contains the process stack.
564 smp_init_current_idle_task = idle ;
568 ** This gets PDC to release the CPU from a very tight loop.
569 ** See MEM_RENDEZ comments in head.S.
571 __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
575 * OK, wait a bit for that CPU to finish staggering about.
576 * Slave will set a bit when it reaches smp_cpu_init() and then
577 * wait for smp_commenced to be 1.
578 * Once we see the bit change, we can move on.
580 for (timeout = 0; timeout < 10000; timeout++) {
581 if(IS_LOGGED_IN(cpunum)) {
582 /* Which implies Slave has started up */
584 smp_init_current_idle_task = NULL;
591 put_task_struct(idle);
594 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
598 /* Remember the Slave data */
600 printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
601 cpuid, cpunum, timeout * 100);
603 #ifdef ENTRY_SYS_CPUS
604 cpu_data[cpunum].state = STATE_RUNNING;
613 ** inventory.c:do_inventory() has already 'discovered' the additional CPU's.
614 ** We are ready to wrest them from PDC's control now.
615 ** Called by smp_init bring all the secondaries online and hold them.
617 ** o Setup of the IPI irq handler is done in irq.c.
618 ** o MEM_RENDEZ is initialzed in head.S:stext()
621 void __init smp_boot_cpus(void)
623 int i, cpu_count = 1;
624 unsigned long bogosum = cpu_data[0].loops_per_jiffy; /* Count Monarch */
626 /* REVISIT - assumes first CPU reported by PAT PDC is BSP */
627 int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
629 /* Setup BSP mappings */
630 printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
631 init_task.thread_info->cpu = bootstrap_processor;
632 current->thread_info->cpu = bootstrap_processor;
633 /* Mark Boostrap processor as present */
634 cpu_online_map = cpumask_of_cpu(bootstrap_processor);
635 current->active_mm = &init_mm;
637 #ifdef ENTRY_SYS_CPUS
638 cpu_data[0].state = STATE_RUNNING;
640 cpu_present_mask = cpumask_of_cpu(bootstrap_processor);
642 /* Nothing to do when told not to. */
644 printk(KERN_INFO "SMP mode deactivated.\n");
649 printk(KERN_INFO "Limiting CPUs to %d\n", max_cpus);
651 /* We found more than one CPU.... */
652 if (boot_cpu_data.cpu_count > 1) {
654 for (i = 0; i < NR_CPUS; i++) {
655 if (cpu_data[i].cpuid == NO_PROC_ID ||
656 cpu_data[i].cpuid == bootstrap_processor)
659 if (smp_boot_one_cpu(cpu_data[i].cpuid, cpu_count) < 0)
662 bogosum += cpu_data[i].loops_per_jiffy;
663 cpu_count++; /* Count good CPUs only... */
665 cpu_present_mask |= 1UL << i;
667 /* Bail when we've started as many CPUS as told to */
668 if (cpu_count == max_cpus)
672 if (cpu_count == 1) {
673 printk(KERN_INFO "SMP: Bootstrap processor only.\n");
679 cache_decay_ticks = HZ/100;
681 printk(KERN_INFO "SMP: Total %d of %d processors activated "
682 "(%lu.%02lu BogoMIPS noticed) (Present Mask: %lu).\n",
683 cpu_count, boot_cpu_data.cpu_count, (bogosum + 25) / 5000,
684 ((bogosum + 25) / 50) % 100, cpu_present_mask);
686 smp_num_cpus = cpu_count;
687 #ifdef PER_CPU_IRQ_REGION
694 * Called from main.c by Monarch Processor.
695 * After this, any CPU can schedule any task.
697 void smp_commence(void)
705 * XXX FIXME : do nothing
707 void smp_cpus_done(unsigned int cpu_max)
709 smp_threads_ready = 1;
712 void __init smp_prepare_cpus(unsigned int max_cpus)
717 void __devinit smp_prepare_boot_cpu(void)
719 cpu_set(smp_processor_id(), cpu_online_map);
720 cpu_set(smp_processor_id(), cpu_present_mask);
723 int __devinit __cpu_up(unsigned int cpu)
725 return cpu_online(cpu) ? 0 : -ENOSYS;
730 #ifdef ENTRY_SYS_CPUS
731 /* Code goes along with:
732 ** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
734 int sys_cpus(int argc, char **argv)
737 extern int current_pid(int cpu);
740 printk("sys_cpus:Only one argument supported\n");
745 #ifdef DUMP_MORE_STATE
746 for(i=0; i<NR_CPUS; i++) {
747 int cpus_per_line = 4;
748 if(IS_LOGGED_IN(i)) {
749 if (j++ % cpus_per_line)
759 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
760 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
761 #ifdef DUMP_MORE_STATE
762 for(i=0;i<NR_CPUS;i++) {
763 if (!IS_LOGGED_IN(i))
765 if (cpu_data[i].cpuid != NO_PROC_ID) {
766 switch(cpu_data[i].state) {
767 case STATE_RENDEZVOUS:
771 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
780 printk("%08x?", cpu_data[i].state);
783 if(IS_LOGGED_IN(i)) {
784 printk(" %4d",current_pid(i));
786 printk(" %6d",cpu_number_map(i));
788 printk(" 0x%lx\n",cpu_data[i].hpa);
792 printk("\n%s %4d 0 0 --------",
793 (current->pid)?"RUNNING ": "IDLING ",current->pid);
795 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
796 #ifdef DUMP_MORE_STATE
797 printk("\nCPUSTATE CPUID\n");
798 for (i=0;i<NR_CPUS;i++) {
799 if (!IS_LOGGED_IN(i))
801 if (cpu_data[i].cpuid != NO_PROC_ID) {
802 switch(cpu_data[i].state) {
803 case STATE_RENDEZVOUS:
804 printk("RENDEZVS");break;
806 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
809 printk("STOPPED ");break;
811 printk("HALTED ");break;
818 printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
821 printk("sys_cpus:Unknown request\n");
826 #endif /* ENTRY_SYS_CPUS */
828 #ifdef CONFIG_PROC_FS
830 setup_profiling_timer(unsigned int multiplier)