4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
34 #include <asm/ptrace.h>
35 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/hardirq.h>
45 #include <asm/iSeries/LparData.h>
46 #include <asm/iSeries/HvCall.h>
47 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/ppcdebug.h>
51 #include <asm/machdep.h>
53 #include <asm/cputable.h>
54 #include <asm/system.h>
57 int smp_threads_ready;
58 unsigned long cache_decay_ticks;
60 cpumask_t cpu_possible_map = CPU_MASK_NONE;
61 cpumask_t cpu_online_map = CPU_MASK_NONE;
62 cpumask_t cpu_available_map = CPU_MASK_NONE;
63 cpumask_t cpu_present_at_boot = CPU_MASK_NONE;
65 EXPORT_SYMBOL(cpu_online_map);
66 EXPORT_SYMBOL(cpu_possible_map);
68 struct smp_ops_t *smp_ops;
70 static volatile unsigned int cpu_callin_map[NR_CPUS];
72 extern unsigned char stab_array[];
74 extern int cpu_idle(void *unused);
75 void smp_call_function_interrupt(void);
76 extern long register_vpa(unsigned long flags, unsigned long proc,
79 /* Low level assembly function used to backup CPU 0 state */
80 extern void __save_cpu_setup(void);
82 #ifdef CONFIG_PPC_ISERIES
83 static unsigned long iSeries_smp_message[NR_CPUS];
85 void iSeries_smp_message_recv( struct pt_regs * regs )
87 int cpu = smp_processor_id();
90 if ( num_online_cpus() < 2 )
93 for ( msg = 0; msg < 4; ++msg )
94 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
95 smp_message_recv( msg, regs );
98 static inline void smp_iSeries_do_message(int cpu, int msg)
100 set_bit(msg, &iSeries_smp_message[cpu]);
101 HvCall_sendIPI(&(paca[cpu]));
104 static void smp_iSeries_message_pass(int target, int msg)
108 if (target < NR_CPUS)
109 smp_iSeries_do_message(target, msg);
111 for_each_online_cpu(i) {
112 if (target == MSG_ALL_BUT_SELF
113 && i == smp_processor_id())
115 smp_iSeries_do_message(i, msg);
120 static int smp_iSeries_numProcs(void)
125 for (i=0; i < NR_CPUS; ++i) {
126 if (paca[i].lppaca.xDynProcStatus < 2) {
127 cpu_set(i, cpu_available_map);
128 cpu_set(i, cpu_possible_map);
129 cpu_set(i, cpu_present_at_boot);
136 static int smp_iSeries_probe(void)
141 for (i=0; i < NR_CPUS; ++i) {
142 if (paca[i].lppaca.xDynProcStatus < 2) {
143 /*paca[i].active = 1;*/
151 static void smp_iSeries_kick_cpu(int nr)
153 BUG_ON(nr < 0 || nr >= NR_CPUS);
155 /* Verify that our partition has a processor nr */
156 if (paca[nr].lppaca.xDynProcStatus >= 2)
159 /* The processor is currently spinning, waiting
160 * for the cpu_start field to become non-zero
161 * After we set cpu_start, the processor will
162 * continue on to secondary_start in iSeries_head.S
164 paca[nr].cpu_start = 1;
167 static void __devinit smp_iSeries_setup_cpu(int nr)
171 static struct smp_ops_t iSeries_smp_ops = {
172 .message_pass = smp_iSeries_message_pass,
173 .probe = smp_iSeries_probe,
174 .kick_cpu = smp_iSeries_kick_cpu,
175 .setup_cpu = smp_iSeries_setup_cpu,
178 /* This is called very early. */
179 void __init smp_init_iSeries(void)
181 smp_ops = &iSeries_smp_ops;
182 systemcfg->processorCount = smp_iSeries_numProcs();
186 #ifdef CONFIG_PPC_PSERIES
187 void smp_openpic_message_pass(int target, int msg)
189 /* make sure we're sending something that translates to an IPI */
191 printk("SMP %d: smp_message_pass: unknown msg %d\n",
192 smp_processor_id(), msg);
198 openpic_cause_IPI(msg, 0xffffffff);
200 case MSG_ALL_BUT_SELF:
201 openpic_cause_IPI(msg,
202 0xffffffff & ~(1 << smp_processor_id()));
205 openpic_cause_IPI(msg, 1<<target);
210 static int __init smp_openpic_probe(void)
214 nr_cpus = cpus_weight(cpu_possible_map);
217 openpic_request_IPIs();
222 static void __devinit smp_openpic_setup_cpu(int cpu)
224 do_openpic_setup_cpu();
227 #ifdef CONFIG_HOTPLUG_CPU
228 /* Get state of physical CPU.
230 * 0 - The processor is in the RTAS stopped state
231 * 1 - stop-self is in progress
232 * 2 - The processor is not in the RTAS stopped state
233 * -1 - Hardware Error
234 * -2 - Hardware Busy, Try again later.
236 static int query_cpu_stopped(unsigned int pcpu)
239 int status, qcss_tok;
241 qcss_tok = rtas_token("query-cpu-stopped-state");
242 BUG_ON(qcss_tok == RTAS_UNKNOWN_SERVICE);
243 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
246 "RTAS query-cpu-stopped-state failed: %i\n", status);
253 int __cpu_disable(void)
255 /* FIXME: go put this in a header somewhere */
256 extern void xics_migrate_irqs_away(void);
258 systemcfg->processorCount--;
260 /*fix boot_cpuid here*/
261 if (smp_processor_id() == boot_cpuid)
262 boot_cpuid = any_online_cpu(cpu_online_map);
264 /* FIXME: abstract this to not be platform specific later on */
265 xics_migrate_irqs_away();
269 void __cpu_die(unsigned int cpu)
273 unsigned int pcpu = get_hard_smp_processor_id(cpu);
275 for (tries = 0; tries < 5; tries++) {
276 cpu_status = query_cpu_stopped(pcpu);
280 set_current_state(TASK_UNINTERRUPTIBLE);
281 schedule_timeout(HZ);
283 if (cpu_status != 0) {
284 printk("Querying DEAD? cpu %i (%i) shows %i\n",
285 cpu, pcpu, cpu_status);
288 /* Isolation and deallocation are definatly done by
289 * drslot_chrp_cpu. If they were not they would be
290 * done here. Change isolate state to Isolate and
291 * change allocation-state to Unusable.
293 paca[cpu].cpu_start = 0;
295 /* So we can recognize if it fails to come up next time. */
296 cpu_callin_map[cpu] = 0;
303 /* Some hardware requires clearing the CPPR, while other hardware does not
304 * it is safe either way
306 pSeriesLP_cppr_info(0, 0);
308 /* Should never get here... */
313 /* Search all cpu device nodes for an offline logical cpu. If a
314 * device node has a "ibm,my-drc-index" property (meaning this is an
315 * LPAR), paranoid-check whether we own the cpu. For each "thread"
316 * of a cpu, if it is offline and has the same hw index as before,
317 * grab that in preference.
319 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
321 struct device_node *np = NULL;
322 unsigned int best = -1U;
324 while ((np = of_find_node_by_type(np, "cpu"))) {
326 u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
328 get_property(np, "ibm,ppc-interrupt-server#s", &len);
331 tid = (u32 *)get_property(np, "reg", &len);
336 /* If there is a drc-index, make sure that we own
341 int rc = rtas_get_sensor(9003, *index, &state);
342 if (rc != 0 || state != 1)
346 nr_threads = len / sizeof(u32);
348 while (nr_threads--) {
349 if (0 == query_cpu_stopped(tid[nr_threads])) {
350 best = tid[nr_threads];
351 if (best == old_hwindex)
362 * smp_startup_cpu() - start the given cpu
364 * At boot time, there is nothing to do. At run-time, call RTAS with
365 * the appropriate start location, if the cpu is in the RTAS stopped
372 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
375 extern void (*pseries_secondary_smp_init)(unsigned int cpu);
376 unsigned long start_here = __pa(pseries_secondary_smp_init);
379 /* At boot time the cpus are already spinning in hold
380 * loops, so nothing to do. */
381 if (system_state == SYSTEM_BOOTING)
384 pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
386 printk(KERN_INFO "No more cpus available, failing\n");
390 /* Fixup atomic count: it exited inside IRQ handler. */
391 paca[lcpu].__current->thread_info->preempt_count = 0;
393 /* At boot this is done in prom.c. */
394 paca[lcpu].hw_cpu_id = pcpu;
396 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
397 pcpu, start_here, lcpu);
399 printk(KERN_ERR "start-cpu failed: %i\n", status);
405 static inline void look_for_more_cpus(void)
407 int num_addr_cell, num_size_cell, len, i, maxcpus;
408 struct device_node *np;
411 /* Find the property which will tell us about how many CPUs
412 * we're allowed to have. */
413 if ((np = find_path_device("/rtas")) == NULL) {
414 printk(KERN_ERR "Could not find /rtas in device tree!");
417 num_addr_cell = prom_n_addr_cells(np);
418 num_size_cell = prom_n_size_cells(np);
420 ireg = (unsigned int *)get_property(np, "ibm,lrdr-capacity", &len);
422 /* FIXME: make sure not marked as lrdr_capable() */
426 maxcpus = ireg[num_addr_cell + num_size_cell];
428 /* Double maxcpus for processors which have SMT capability */
429 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
433 if (maxcpus > NR_CPUS) {
435 "Partition configured for %d cpus, "
436 "operating system maximum is %d.\n", maxcpus, NR_CPUS);
439 printk(KERN_INFO "Partition configured for %d cpus.\n",
442 /* Make those cpus (which might appear later) possible too. */
443 for (i = 0; i < maxcpus; i++)
444 cpu_set(i, cpu_possible_map);
446 #else /* ... CONFIG_HOTPLUG_CPU */
447 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
451 static inline void look_for_more_cpus(void)
454 #endif /* CONFIG_HOTPLUG_CPU */
456 static void smp_pSeries_kick_cpu(int nr)
458 BUG_ON(nr < 0 || nr >= NR_CPUS);
460 if (!smp_startup_cpu(nr))
464 * The processor is currently spinning, waiting for the
465 * cpu_start field to become non-zero After we set cpu_start,
466 * the processor will continue on to secondary_start
468 paca[nr].cpu_start = 1;
470 #endif /* CONFIG_PPC_PSERIES */
472 static void __init smp_space_timers(unsigned int max_cpus)
475 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
476 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
479 if (i != boot_cpuid) {
480 paca[i].next_jiffy_update_tb =
481 previous_tb + offset;
482 previous_tb = paca[i].next_jiffy_update_tb;
487 #ifdef CONFIG_PPC_PSERIES
488 void vpa_init(int cpu)
492 /* Register the Virtual Processor Area (VPA) */
493 flags = 1UL << (63 - 18);
494 register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].lppaca)));
497 static inline void smp_xics_do_message(int cpu, int msg)
499 set_bit(msg, &xics_ipi_message[cpu].value);
504 static void smp_xics_message_pass(int target, int msg)
508 if (target < NR_CPUS) {
509 smp_xics_do_message(target, msg);
511 for_each_online_cpu(i) {
512 if (target == MSG_ALL_BUT_SELF
513 && i == smp_processor_id())
515 smp_xics_do_message(i, msg);
520 extern void xics_request_IPIs(void);
522 static int __init smp_xics_probe(void)
528 return cpus_weight(cpu_possible_map);
531 static void __devinit smp_xics_setup_cpu(int cpu)
533 if (cpu != boot_cpuid)
537 static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
538 static unsigned long timebase = 0;
540 static void __devinit pSeries_give_timebase(void)
542 spin_lock(&timebase_lock);
543 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
545 spin_unlock(&timebase_lock);
549 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
552 static void __devinit pSeries_take_timebase(void)
556 spin_lock(&timebase_lock);
557 set_tb(timebase >> 32, timebase & 0xffffffff);
559 spin_unlock(&timebase_lock);
562 static struct smp_ops_t pSeries_openpic_smp_ops = {
563 .message_pass = smp_openpic_message_pass,
564 .probe = smp_openpic_probe,
565 .kick_cpu = smp_pSeries_kick_cpu,
566 .setup_cpu = smp_openpic_setup_cpu,
569 static struct smp_ops_t pSeries_xics_smp_ops = {
570 .message_pass = smp_xics_message_pass,
571 .probe = smp_xics_probe,
572 .kick_cpu = smp_pSeries_kick_cpu,
573 .setup_cpu = smp_xics_setup_cpu,
576 /* This is called very early */
577 void __init smp_init_pSeries(void)
580 if (naca->interrupt_controller == IC_OPEN_PIC)
581 smp_ops = &pSeries_openpic_smp_ops;
583 smp_ops = &pSeries_xics_smp_ops;
585 /* Non-lpar has additional take/give timebase */
586 if (systemcfg->platform == PLATFORM_PSERIES) {
587 smp_ops->give_timebase = pSeries_give_timebase;
588 smp_ops->take_timebase = pSeries_take_timebase;
593 void smp_local_timer_interrupt(struct pt_regs * regs)
595 if (!--(get_paca()->prof_counter)) {
596 update_process_times(user_mode(regs));
597 (get_paca()->prof_counter)=get_paca()->prof_multiplier;
601 void smp_message_recv(int msg, struct pt_regs *regs)
604 case PPC_MSG_CALL_FUNCTION:
605 smp_call_function_interrupt();
607 case PPC_MSG_RESCHEDULE:
608 /* XXX Do we have to do this? */
612 case PPC_MSG_MIGRATE_TASK:
616 #ifdef CONFIG_DEBUGGER
617 case PPC_MSG_DEBUGGER_BREAK:
622 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
623 smp_processor_id(), msg);
628 void smp_send_reschedule(int cpu)
630 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
633 #ifdef CONFIG_DEBUGGER
634 void smp_send_debugger_break(int cpu)
636 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
640 static void stop_this_cpu(void *dummy)
647 void smp_send_stop(void)
649 smp_call_function(stop_this_cpu, NULL, 1, 0);
653 * Structure and data for smp_call_function(). This is designed to minimise
654 * static memory requirements. It also looks cleaner.
655 * Stolen from the i386 version.
657 static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
659 static struct call_data_struct {
660 void (*func) (void *info);
667 /* delay of at least 8 seconds on 1GHz cpu */
668 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
671 * This function sends a 'generic call function' IPI to all other CPUs
674 * [SUMMARY] Run a function on all other CPUs.
675 * <func> The function to run. This must be fast and non-blocking.
676 * <info> An arbitrary pointer to pass to the function.
677 * <nonatomic> currently unused.
678 * <wait> If true, wait (atomically) until function has completed on other CPUs.
679 * [RETURNS] 0 on success, else a negative status code. Does not return until
680 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
682 * You must not call this function with disabled interrupts or from a
683 * hardware interrupt handler or from a bottom half handler.
685 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
688 struct call_data_struct data;
690 unsigned long timeout;
692 /* Can deadlock when called with interrupts disabled */
693 WARN_ON(irqs_disabled());
697 atomic_set(&data.started, 0);
700 atomic_set(&data.finished, 0);
702 spin_lock(&call_lock);
703 /* Must grab online cpu count with preempt disabled, otherwise
705 cpus = num_online_cpus() - 1;
713 /* Send a message to all other CPUs and wait for them to respond */
714 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
716 /* Wait for response */
717 timeout = SMP_CALL_TIMEOUT;
718 while (atomic_read(&data.started) != cpus) {
720 if (--timeout == 0) {
721 printk("smp_call_function on cpu %d: other cpus not "
722 "responding (%d)\n", smp_processor_id(),
723 atomic_read(&data.started));
730 timeout = SMP_CALL_TIMEOUT;
731 while (atomic_read(&data.finished) != cpus) {
733 if (--timeout == 0) {
734 printk("smp_call_function on cpu %d: other "
735 "cpus not finishing (%d/%d)\n",
737 atomic_read(&data.finished),
738 atomic_read(&data.started));
750 spin_unlock(&call_lock);
754 EXPORT_SYMBOL_GPL(smp_call_function);
756 void smp_call_function_interrupt(void)
758 void (*func) (void *info);
762 /* call_data will be NULL if the sender timed out while
763 * waiting on us to receive the call.
768 func = call_data->func;
769 info = call_data->info;
770 wait = call_data->wait;
773 smp_mb__before_atomic_inc();
776 * Notify initiating CPU that I've grabbed the data and am
777 * about to execute the function
779 atomic_inc(&call_data->started);
781 * At this point the info structure may be out of scope unless wait==1
785 smp_mb__before_atomic_inc();
786 atomic_inc(&call_data->finished);
790 extern unsigned long decr_overclock;
791 extern struct gettimeofday_struct do_gtod;
793 struct thread_info *current_set[NR_CPUS];
795 DECLARE_PER_CPU(unsigned int, pvr);
797 static void __devinit smp_store_cpu_info(int id)
799 per_cpu(pvr, id) = _get_PVR();
802 static void __init smp_create_idle(unsigned int cpu)
805 struct task_struct *p;
807 /* create a process for the processor */
808 /* only regs.msr is actually used, and 0 is OK for it */
809 memset(®s, 0, sizeof(struct pt_regs));
810 p = copy_process(CLONE_VM | CLONE_IDLETASK,
811 0, ®s, 0, NULL, NULL);
813 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
815 wake_up_forked_process(p);
819 paca[cpu].__current = p;
820 current_set[cpu] = p->thread_info;
823 void __init smp_prepare_cpus(unsigned int max_cpus)
828 * setup_cpu may need to be called on the boot cpu. We havent
829 * spun any cpus up but lets be paranoid.
831 BUG_ON(boot_cpuid != smp_processor_id());
834 smp_store_cpu_info(boot_cpuid);
835 cpu_callin_map[boot_cpuid] = 1;
836 paca[boot_cpuid].prof_counter = 1;
837 paca[boot_cpuid].prof_multiplier = 1;
839 #ifndef CONFIG_PPC_ISERIES
840 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
843 * Should update do_gtod.stamp_xsec.
844 * For now we leave it which means the time can be some
845 * number of msecs off until someone does a settimeofday()
847 do_gtod.tb_orig_stamp = tb_last_stamp;
849 look_for_more_cpus();
852 max_cpus = smp_ops->probe();
854 /* Backup CPU 0 state if necessary */
857 smp_space_timers(max_cpus);
860 if (cpu != boot_cpuid)
861 smp_create_idle(cpu);
864 void __devinit smp_prepare_boot_cpu(void)
866 BUG_ON(smp_processor_id() != boot_cpuid);
868 /* cpu_possible is set up in prom.c */
869 cpu_set(boot_cpuid, cpu_online_map);
871 paca[boot_cpuid].__current = current;
872 current_set[boot_cpuid] = current->thread_info;
875 int __devinit __cpu_up(unsigned int cpu)
879 /* At boot, don't bother with non-present cpus -JSCHOPP */
880 if (system_state == SYSTEM_BOOTING && !cpu_present_at_boot(cpu))
883 paca[cpu].prof_counter = 1;
884 paca[cpu].prof_multiplier = 1;
885 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
887 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
890 /* maximum of 48 CPUs on machines with a segment table */
894 tmp = &stab_array[PAGE_SIZE * cpu];
895 memset(tmp, 0, PAGE_SIZE);
896 paca[cpu].stab_addr = (unsigned long)tmp;
897 paca[cpu].stab_real = virt_to_abs(tmp);
900 /* The information for processor bringup must
901 * be written out to main store before we release
907 smp_ops->kick_cpu(cpu);
910 * wait to see if the cpu made a callin (is actually up).
911 * use this value that I found through experimentation.
914 if (system_state == SYSTEM_BOOTING)
915 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
917 #ifdef CONFIG_HOTPLUG_CPU
920 * CPUs can take much longer to come up in the
921 * hotplug case. Wait five seconds.
923 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
924 set_current_state(TASK_UNINTERRUPTIBLE);
925 schedule_timeout(HZ/5);
929 if (!cpu_callin_map[cpu]) {
930 printk("Processor %u is stuck.\n", cpu);
934 printk("Processor %u found.\n", cpu);
936 if (smp_ops->give_timebase)
937 smp_ops->give_timebase();
939 /* Wait until cpu puts itself in the online map */
940 while (!cpu_online(cpu))
946 extern unsigned int default_distrib_server;
947 /* Activate a secondary processor. */
948 int __devinit start_secondary(void *unused)
950 unsigned int cpu = smp_processor_id();
952 atomic_inc(&init_mm.mm_count);
953 current->active_mm = &init_mm;
955 smp_store_cpu_info(cpu);
956 set_dec(paca[cpu].default_decr);
957 cpu_callin_map[cpu] = 1;
959 smp_ops->setup_cpu(cpu);
960 if (smp_ops->take_timebase)
961 smp_ops->take_timebase();
963 #ifdef CONFIG_PPC_PSERIES
964 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
968 #ifdef CONFIG_IRQ_ALL_CPUS
969 /* Put the calling processor into the GIQ. This is really only
970 * necessary from a secondary thread as the OF start-cpu interface
971 * performs this function for us on primary threads.
973 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
974 rtas_set_indicator(9005, default_distrib_server, 1);
978 spin_lock(&call_lock);
979 cpu_set(cpu, cpu_online_map);
980 spin_unlock(&call_lock);
984 return cpu_idle(NULL);
987 int setup_profiling_timer(unsigned int multiplier)
992 void __init smp_cpus_done(unsigned int max_cpus)
996 /* We want the setup_cpu() here to be called from CPU 0, but our
997 * init thread may have been "borrowed" by another CPU in the meantime
998 * se we pin us down to CPU 0 for a short while
1000 old_mask = current->cpus_allowed;
1001 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
1003 smp_ops->setup_cpu(boot_cpuid);
1005 /* XXX fix this, xics currently relies on it - Anton */
1006 smp_threads_ready = 1;
1008 set_cpus_allowed(current, old_mask);
1011 #ifdef CONFIG_SCHED_SMT
1013 static struct sched_group sched_group_cpus[NR_CPUS];
1014 static struct sched_group sched_group_phys[NR_CPUS];
1015 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1016 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1017 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1018 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1019 __init void arch_init_sched_domains(void)
1022 struct sched_group *first = NULL, *last = NULL;
1024 /* Set up domains */
1026 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1027 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1028 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1029 int node = cpu_to_node(i);
1030 cpumask_t nodemask = node_to_cpumask(node);
1031 cpumask_t my_cpumask = cpumask_of_cpu(i);
1032 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1034 *cpu_domain = SD_SIBLING_INIT;
1035 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1036 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1038 cpu_domain->span = my_cpumask;
1039 cpu_domain->parent = phys_domain;
1040 cpu_domain->groups = &sched_group_cpus[i];
1042 *phys_domain = SD_CPU_INIT;
1043 phys_domain->span = nodemask;
1044 phys_domain->parent = node_domain;
1045 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1047 *node_domain = SD_NODE_INIT;
1048 node_domain->span = cpu_possible_map;
1049 node_domain->groups = &sched_group_nodes[node];
1052 /* Set up CPU (sibling) groups */
1054 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1056 first = last = NULL;
1058 if (i != first_cpu(cpu_domain->span))
1061 for_each_cpu_mask(j, cpu_domain->span) {
1062 struct sched_group *cpu = &sched_group_cpus[j];
1064 cpus_clear(cpu->cpumask);
1065 cpu_set(j, cpu->cpumask);
1066 cpu->cpu_power = SCHED_LOAD_SCALE;
1077 for (i = 0; i < MAX_NUMNODES; i++) {
1080 struct sched_group *node = &sched_group_nodes[i];
1081 cpumask_t node_cpumask = node_to_cpumask(i);
1082 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1084 if (cpus_empty(nodemask))
1087 first = last = NULL;
1088 /* Set up physical groups */
1089 for_each_cpu_mask(j, nodemask) {
1090 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1091 struct sched_group *cpu = &sched_group_phys[j];
1093 if (j != first_cpu(cpu_domain->span))
1096 cpu->cpumask = cpu_domain->span;
1098 * Make each extra sibling increase power by 10% of
1099 * the basic CPU. This is very arbitrary.
1101 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1102 node->cpu_power += cpu->cpu_power;
1114 first = last = NULL;
1115 for (i = 0; i < MAX_NUMNODES; i++) {
1116 struct sched_group *cpu = &sched_group_nodes[i];
1118 cpumask_t node_cpumask = node_to_cpumask(i);
1119 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1121 if (cpus_empty(nodemask))
1124 cpu->cpumask = nodemask;
1125 /* ->cpu_power already setup */
1137 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1138 cpu_attach_domain(cpu_domain, i);
1141 #else /* !CONFIG_NUMA */
1142 static struct sched_group sched_group_cpus[NR_CPUS];
1143 static struct sched_group sched_group_phys[NR_CPUS];
1144 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1145 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1146 __init void arch_init_sched_domains(void)
1149 struct sched_group *first = NULL, *last = NULL;
1151 /* Set up domains */
1153 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1154 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1155 cpumask_t my_cpumask = cpumask_of_cpu(i);
1156 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1158 *cpu_domain = SD_SIBLING_INIT;
1159 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1160 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1162 cpu_domain->span = my_cpumask;
1163 cpu_domain->parent = phys_domain;
1164 cpu_domain->groups = &sched_group_cpus[i];
1166 *phys_domain = SD_CPU_INIT;
1167 phys_domain->span = cpu_possible_map;
1168 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1171 /* Set up CPU (sibling) groups */
1173 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1175 first = last = NULL;
1177 if (i != first_cpu(cpu_domain->span))
1180 for_each_cpu_mask(j, cpu_domain->span) {
1181 struct sched_group *cpu = &sched_group_cpus[j];
1183 cpus_clear(cpu->cpumask);
1184 cpu_set(j, cpu->cpumask);
1185 cpu->cpu_power = SCHED_LOAD_SCALE;
1196 first = last = NULL;
1197 /* Set up physical groups */
1199 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1200 struct sched_group *cpu = &sched_group_phys[i];
1202 if (i != first_cpu(cpu_domain->span))
1205 cpu->cpumask = cpu_domain->span;
1206 /* See SMT+NUMA setup for comment */
1207 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1219 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1220 cpu_attach_domain(cpu_domain, i);
1223 #endif /* CONFIG_NUMA */
1224 #endif /* CONFIG_SCHED_SMT */