4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
34 #include <asm/ptrace.h>
35 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/hardirq.h>
45 #include <asm/iSeries/LparData.h>
46 #include <asm/iSeries/HvCall.h>
47 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/ppcdebug.h>
51 #include <asm/machdep.h>
53 #include <asm/cputable.h>
54 #include <asm/system.h>
57 int smp_threads_ready;
58 unsigned long cache_decay_ticks;
60 cpumask_t cpu_possible_map = CPU_MASK_NONE;
61 cpumask_t cpu_online_map = CPU_MASK_NONE;
62 cpumask_t cpu_available_map = CPU_MASK_NONE;
63 cpumask_t cpu_present_at_boot = CPU_MASK_NONE;
65 EXPORT_SYMBOL(cpu_online_map);
66 EXPORT_SYMBOL(cpu_possible_map);
68 struct smp_ops_t *smp_ops;
70 static volatile unsigned int cpu_callin_map[NR_CPUS];
72 extern unsigned char stab_array[];
74 extern int cpu_idle(void *unused);
75 void smp_call_function_interrupt(void);
76 extern long register_vpa(unsigned long flags, unsigned long proc,
79 /* Low level assembly function used to backup CPU 0 state */
80 extern void __save_cpu_setup(void);
82 #ifdef CONFIG_PPC_ISERIES
83 static unsigned long iSeries_smp_message[NR_CPUS];
85 void iSeries_smp_message_recv( struct pt_regs * regs )
87 int cpu = smp_processor_id();
90 if ( num_online_cpus() < 2 )
93 for ( msg = 0; msg < 4; ++msg )
94 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
95 smp_message_recv( msg, regs );
98 static inline void smp_iSeries_do_message(int cpu, int msg)
100 set_bit(msg, &iSeries_smp_message[cpu]);
101 HvCall_sendIPI(&(paca[cpu]));
104 static void smp_iSeries_message_pass(int target, int msg)
108 if (target < NR_CPUS)
109 smp_iSeries_do_message(target, msg);
111 for_each_online_cpu(i) {
112 if (target == MSG_ALL_BUT_SELF
113 && i == smp_processor_id())
115 smp_iSeries_do_message(i, msg);
120 static int smp_iSeries_numProcs(void)
125 for (i=0; i < NR_CPUS; ++i) {
126 if (paca[i].lppaca.xDynProcStatus < 2) {
127 cpu_set(i, cpu_available_map);
128 cpu_set(i, cpu_possible_map);
129 cpu_set(i, cpu_present_at_boot);
136 static int smp_iSeries_probe(void)
141 for (i=0; i < NR_CPUS; ++i) {
142 if (paca[i].lppaca.xDynProcStatus < 2) {
143 /*paca[i].active = 1;*/
151 static void smp_iSeries_kick_cpu(int nr)
153 BUG_ON(nr < 0 || nr >= NR_CPUS);
155 /* Verify that our partition has a processor nr */
156 if (paca[nr].lppaca.xDynProcStatus >= 2)
159 /* The processor is currently spinning, waiting
160 * for the cpu_start field to become non-zero
161 * After we set cpu_start, the processor will
162 * continue on to secondary_start in iSeries_head.S
164 paca[nr].cpu_start = 1;
167 static void __devinit smp_iSeries_setup_cpu(int nr)
171 static struct smp_ops_t iSeries_smp_ops = {
172 .message_pass = smp_iSeries_message_pass,
173 .probe = smp_iSeries_probe,
174 .kick_cpu = smp_iSeries_kick_cpu,
175 .setup_cpu = smp_iSeries_setup_cpu,
178 /* This is called very early. */
179 void __init smp_init_iSeries(void)
181 smp_ops = &iSeries_smp_ops;
182 systemcfg->processorCount = smp_iSeries_numProcs();
186 #ifdef CONFIG_PPC_PSERIES
187 void smp_openpic_message_pass(int target, int msg)
189 /* make sure we're sending something that translates to an IPI */
191 printk("SMP %d: smp_message_pass: unknown msg %d\n",
192 smp_processor_id(), msg);
198 openpic_cause_IPI(msg, 0xffffffff);
200 case MSG_ALL_BUT_SELF:
201 openpic_cause_IPI(msg,
202 0xffffffff & ~(1 << smp_processor_id()));
205 openpic_cause_IPI(msg, 1<<target);
210 static int __init smp_openpic_probe(void)
214 nr_cpus = cpus_weight(cpu_possible_map);
217 openpic_request_IPIs();
222 static void __devinit smp_openpic_setup_cpu(int cpu)
224 do_openpic_setup_cpu();
227 #ifdef CONFIG_HOTPLUG_CPU
228 /* Get state of physical CPU.
230 * 0 - The processor is in the RTAS stopped state
231 * 1 - stop-self is in progress
232 * 2 - The processor is not in the RTAS stopped state
233 * -1 - Hardware Error
234 * -2 - Hardware Busy, Try again later.
236 static int query_cpu_stopped(unsigned int pcpu)
239 int status, qcss_tok;
241 qcss_tok = rtas_token("query-cpu-stopped-state");
242 BUG_ON(qcss_tok == RTAS_UNKNOWN_SERVICE);
243 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
246 "RTAS query-cpu-stopped-state failed: %i\n", status);
253 int __cpu_disable(void)
255 /* FIXME: go put this in a header somewhere */
256 extern void xics_migrate_irqs_away(void);
258 systemcfg->processorCount--;
260 /*fix boot_cpuid here*/
261 if (smp_processor_id() == boot_cpuid)
262 boot_cpuid = any_online_cpu(cpu_online_map);
264 /* FIXME: abstract this to not be platform specific later on */
265 xics_migrate_irqs_away();
269 void __cpu_die(unsigned int cpu)
273 unsigned int pcpu = get_hard_smp_processor_id(cpu);
275 for (tries = 0; tries < 5; tries++) {
276 cpu_status = query_cpu_stopped(pcpu);
280 set_current_state(TASK_UNINTERRUPTIBLE);
281 schedule_timeout(HZ);
283 if (cpu_status != 0) {
284 printk("Querying DEAD? cpu %i (%i) shows %i\n",
285 cpu, pcpu, cpu_status);
288 /* Isolation and deallocation are definatly done by
289 * drslot_chrp_cpu. If they were not they would be
290 * done here. Change isolate state to Isolate and
291 * change allocation-state to Unusable.
293 paca[cpu].cpu_start = 0;
295 /* So we can recognize if it fails to come up next time. */
296 cpu_callin_map[cpu] = 0;
304 /* Should never get here... */
309 /* Search all cpu device nodes for an offline logical cpu. If a
310 * device node has a "ibm,my-drc-index" property (meaning this is an
311 * LPAR), paranoid-check whether we own the cpu. For each "thread"
312 * of a cpu, if it is offline and has the same hw index as before,
313 * grab that in preference.
315 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
317 struct device_node *np = NULL;
318 unsigned int best = -1U;
320 while ((np = of_find_node_by_type(np, "cpu"))) {
322 u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
324 get_property(np, "ibm,ppc-interrupt-server#s", &len);
327 tid = (u32 *)get_property(np, "reg", &len);
332 /* If there is a drc-index, make sure that we own
337 int rc = rtas_get_sensor(9003, *index, &state);
338 if (rc != 0 || state != 1)
342 nr_threads = len / sizeof(u32);
344 while (nr_threads--) {
345 if (0 == query_cpu_stopped(tid[nr_threads])) {
346 best = tid[nr_threads];
347 if (best == old_hwindex)
358 * smp_startup_cpu() - start the given cpu
360 * At boot time, there is nothing to do. At run-time, call RTAS with
361 * the appropriate start location, if the cpu is in the RTAS stopped
368 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
371 extern void (*pseries_secondary_smp_init)(unsigned int cpu);
372 unsigned long start_here = __pa(pseries_secondary_smp_init);
375 /* At boot time the cpus are already spinning in hold
376 * loops, so nothing to do. */
377 if (system_state == SYSTEM_BOOTING)
380 pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
382 printk(KERN_INFO "No more cpus available, failing\n");
386 /* Fixup atomic count: it exited inside IRQ handler. */
387 paca[lcpu].__current->thread_info->preempt_count = 0;
388 /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
389 paca[lcpu].stab_next_rr = 0;
391 /* At boot this is done in prom.c. */
392 paca[lcpu].hw_cpu_id = pcpu;
394 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
395 pcpu, start_here, lcpu);
397 printk(KERN_ERR "start-cpu failed: %i\n", status);
403 static inline void look_for_more_cpus(void)
405 int num_addr_cell, num_size_cell, len, i, maxcpus;
406 struct device_node *np;
409 /* Find the property which will tell us about how many CPUs
410 * we're allowed to have. */
411 if ((np = find_path_device("/rtas")) == NULL) {
412 printk(KERN_ERR "Could not find /rtas in device tree!");
415 num_addr_cell = prom_n_addr_cells(np);
416 num_size_cell = prom_n_size_cells(np);
418 ireg = (unsigned int *)get_property(np, "ibm,lrdr-capacity", &len);
420 /* FIXME: make sure not marked as lrdr_capable() */
424 maxcpus = ireg[num_addr_cell + num_size_cell];
425 /* DRENG need to account for threads here too */
427 if (maxcpus > NR_CPUS) {
429 "Partition configured for %d cpus, "
430 "operating system maximum is %d.\n", maxcpus, NR_CPUS);
433 printk(KERN_INFO "Partition configured for %d cpus.\n",
436 /* Make those cpus (which might appear later) possible too. */
437 for (i = 0; i < maxcpus; i++)
438 cpu_set(i, cpu_possible_map);
440 #else /* ... CONFIG_HOTPLUG_CPU */
441 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
445 static inline void look_for_more_cpus(void)
448 #endif /* CONFIG_HOTPLUG_CPU */
450 static void smp_pSeries_kick_cpu(int nr)
452 BUG_ON(nr < 0 || nr >= NR_CPUS);
454 if (!smp_startup_cpu(nr))
458 * The processor is currently spinning, waiting for the
459 * cpu_start field to become non-zero After we set cpu_start,
460 * the processor will continue on to secondary_start
462 paca[nr].cpu_start = 1;
464 #endif /* CONFIG_PPC_PSERIES */
466 static void __init smp_space_timers(unsigned int max_cpus)
469 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
470 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
473 if (i != boot_cpuid) {
474 paca[i].next_jiffy_update_tb =
475 previous_tb + offset;
476 previous_tb = paca[i].next_jiffy_update_tb;
481 #ifdef CONFIG_PPC_PSERIES
482 void vpa_init(int cpu)
486 /* Register the Virtual Processor Area (VPA) */
487 flags = 1UL << (63 - 18);
488 register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].lppaca)));
491 static inline void smp_xics_do_message(int cpu, int msg)
493 set_bit(msg, &xics_ipi_message[cpu].value);
498 static void smp_xics_message_pass(int target, int msg)
502 if (target < NR_CPUS) {
503 smp_xics_do_message(target, msg);
505 for_each_online_cpu(i) {
506 if (target == MSG_ALL_BUT_SELF
507 && i == smp_processor_id())
509 smp_xics_do_message(i, msg);
514 extern void xics_request_IPIs(void);
516 static int __init smp_xics_probe(void)
522 return cpus_weight(cpu_possible_map);
525 static void __devinit smp_xics_setup_cpu(int cpu)
527 if (cpu != boot_cpuid)
531 static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
532 static unsigned long timebase = 0;
534 static void __devinit pSeries_give_timebase(void)
536 spin_lock(&timebase_lock);
537 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
539 spin_unlock(&timebase_lock);
543 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
546 static void __devinit pSeries_take_timebase(void)
550 spin_lock(&timebase_lock);
551 set_tb(timebase >> 32, timebase & 0xffffffff);
553 spin_unlock(&timebase_lock);
556 static struct smp_ops_t pSeries_openpic_smp_ops = {
557 .message_pass = smp_openpic_message_pass,
558 .probe = smp_openpic_probe,
559 .kick_cpu = smp_pSeries_kick_cpu,
560 .setup_cpu = smp_openpic_setup_cpu,
563 static struct smp_ops_t pSeries_xics_smp_ops = {
564 .message_pass = smp_xics_message_pass,
565 .probe = smp_xics_probe,
566 .kick_cpu = smp_pSeries_kick_cpu,
567 .setup_cpu = smp_xics_setup_cpu,
570 /* This is called very early */
571 void __init smp_init_pSeries(void)
574 if (naca->interrupt_controller == IC_OPEN_PIC)
575 smp_ops = &pSeries_openpic_smp_ops;
577 smp_ops = &pSeries_xics_smp_ops;
579 /* Non-lpar has additional take/give timebase */
580 if (systemcfg->platform == PLATFORM_PSERIES) {
581 smp_ops->give_timebase = pSeries_give_timebase;
582 smp_ops->take_timebase = pSeries_take_timebase;
587 void smp_local_timer_interrupt(struct pt_regs * regs)
589 if (!--(get_paca()->prof_counter)) {
590 update_process_times(user_mode(regs));
591 (get_paca()->prof_counter)=get_paca()->prof_multiplier;
595 void smp_message_recv(int msg, struct pt_regs *regs)
598 case PPC_MSG_CALL_FUNCTION:
599 smp_call_function_interrupt();
601 case PPC_MSG_RESCHEDULE:
602 /* XXX Do we have to do this? */
606 case PPC_MSG_MIGRATE_TASK:
610 #ifdef CONFIG_DEBUGGER
611 case PPC_MSG_DEBUGGER_BREAK:
616 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
617 smp_processor_id(), msg);
622 void smp_send_reschedule(int cpu)
624 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
627 #ifdef CONFIG_DEBUGGER
628 void smp_send_debugger_break(int cpu)
630 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
634 static void stop_this_cpu(void *dummy)
641 void smp_send_stop(void)
643 smp_call_function(stop_this_cpu, NULL, 1, 0);
647 * Structure and data for smp_call_function(). This is designed to minimise
648 * static memory requirements. It also looks cleaner.
649 * Stolen from the i386 version.
651 static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
653 static struct call_data_struct {
654 void (*func) (void *info);
661 /* delay of at least 8 seconds on 1GHz cpu */
662 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
665 * This function sends a 'generic call function' IPI to all other CPUs
668 * [SUMMARY] Run a function on all other CPUs.
669 * <func> The function to run. This must be fast and non-blocking.
670 * <info> An arbitrary pointer to pass to the function.
671 * <nonatomic> currently unused.
672 * <wait> If true, wait (atomically) until function has completed on other CPUs.
673 * [RETURNS] 0 on success, else a negative status code. Does not return until
674 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
676 * You must not call this function with disabled interrupts or from a
677 * hardware interrupt handler or from a bottom half handler.
679 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
682 struct call_data_struct data;
684 unsigned long timeout;
686 /* Can deadlock when called with interrupts disabled */
687 WARN_ON(irqs_disabled());
691 atomic_set(&data.started, 0);
694 atomic_set(&data.finished, 0);
696 spin_lock(&call_lock);
697 /* Must grab online cpu count with preempt disabled, otherwise
699 cpus = num_online_cpus() - 1;
707 /* Send a message to all other CPUs and wait for them to respond */
708 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
710 /* Wait for response */
711 timeout = SMP_CALL_TIMEOUT;
712 while (atomic_read(&data.started) != cpus) {
714 if (--timeout == 0) {
715 printk("smp_call_function on cpu %d: other cpus not "
716 "responding (%d)\n", smp_processor_id(),
717 atomic_read(&data.started));
724 timeout = SMP_CALL_TIMEOUT;
725 while (atomic_read(&data.finished) != cpus) {
727 if (--timeout == 0) {
728 printk("smp_call_function on cpu %d: other "
729 "cpus not finishing (%d/%d)\n",
731 atomic_read(&data.finished),
732 atomic_read(&data.started));
744 spin_unlock(&call_lock);
748 EXPORT_SYMBOL_GPL(smp_call_function);
750 void smp_call_function_interrupt(void)
752 void (*func) (void *info);
756 /* call_data will be NULL if the sender timed out while
757 * waiting on us to receive the call.
762 func = call_data->func;
763 info = call_data->info;
764 wait = call_data->wait;
767 smp_mb__before_atomic_inc();
770 * Notify initiating CPU that I've grabbed the data and am
771 * about to execute the function
773 atomic_inc(&call_data->started);
775 * At this point the info structure may be out of scope unless wait==1
779 smp_mb__before_atomic_inc();
780 atomic_inc(&call_data->finished);
784 extern unsigned long decr_overclock;
785 extern struct gettimeofday_struct do_gtod;
787 struct thread_info *current_set[NR_CPUS];
789 DECLARE_PER_CPU(unsigned int, pvr);
791 static void __devinit smp_store_cpu_info(int id)
793 per_cpu(pvr, id) = _get_PVR();
796 static void __init smp_create_idle(unsigned int cpu)
799 struct task_struct *p;
801 /* create a process for the processor */
802 /* only regs.msr is actually used, and 0 is OK for it */
803 memset(®s, 0, sizeof(struct pt_regs));
804 p = copy_process(CLONE_VM | CLONE_IDLETASK,
805 0, ®s, 0, NULL, NULL);
807 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
809 wake_up_forked_process(p);
813 paca[cpu].__current = p;
814 current_set[cpu] = p->thread_info;
817 void __init smp_prepare_cpus(unsigned int max_cpus)
822 * setup_cpu may need to be called on the boot cpu. We havent
823 * spun any cpus up but lets be paranoid.
825 BUG_ON(boot_cpuid != smp_processor_id());
828 smp_store_cpu_info(boot_cpuid);
829 cpu_callin_map[boot_cpuid] = 1;
830 paca[boot_cpuid].prof_counter = 1;
831 paca[boot_cpuid].prof_multiplier = 1;
833 #ifndef CONFIG_PPC_ISERIES
834 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
837 * Should update do_gtod.stamp_xsec.
838 * For now we leave it which means the time can be some
839 * number of msecs off until someone does a settimeofday()
841 do_gtod.tb_orig_stamp = tb_last_stamp;
843 look_for_more_cpus();
846 max_cpus = smp_ops->probe();
848 /* Backup CPU 0 state if necessary */
851 smp_space_timers(max_cpus);
854 if (cpu != boot_cpuid)
855 smp_create_idle(cpu);
858 void __devinit smp_prepare_boot_cpu(void)
860 BUG_ON(smp_processor_id() != boot_cpuid);
862 /* cpu_possible is set up in prom.c */
863 cpu_set(boot_cpuid, cpu_online_map);
865 paca[boot_cpuid].__current = current;
866 current_set[boot_cpuid] = current->thread_info;
869 int __devinit __cpu_up(unsigned int cpu)
873 /* At boot, don't bother with non-present cpus -JSCHOPP */
874 if (system_state == SYSTEM_BOOTING && !cpu_present_at_boot(cpu))
877 paca[cpu].prof_counter = 1;
878 paca[cpu].prof_multiplier = 1;
879 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
881 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
884 /* maximum of 48 CPUs on machines with a segment table */
888 tmp = &stab_array[PAGE_SIZE * cpu];
889 memset(tmp, 0, PAGE_SIZE);
890 paca[cpu].stab_addr = (unsigned long)tmp;
891 paca[cpu].stab_real = virt_to_abs(tmp);
894 /* The information for processor bringup must
895 * be written out to main store before we release
901 smp_ops->kick_cpu(cpu);
904 * wait to see if the cpu made a callin (is actually up).
905 * use this value that I found through experimentation.
908 if (system_state == SYSTEM_BOOTING)
909 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
911 #ifdef CONFIG_HOTPLUG_CPU
914 * CPUs can take much longer to come up in the
915 * hotplug case. Wait five seconds.
917 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
918 set_current_state(TASK_UNINTERRUPTIBLE);
919 schedule_timeout(HZ/5);
923 if (!cpu_callin_map[cpu]) {
924 printk("Processor %u is stuck.\n", cpu);
928 printk("Processor %u found.\n", cpu);
930 if (smp_ops->give_timebase)
931 smp_ops->give_timebase();
932 cpu_set(cpu, cpu_online_map);
936 extern unsigned int default_distrib_server;
937 /* Activate a secondary processor. */
938 int __devinit start_secondary(void *unused)
940 unsigned int cpu = smp_processor_id();
942 atomic_inc(&init_mm.mm_count);
943 current->active_mm = &init_mm;
945 smp_store_cpu_info(cpu);
946 set_dec(paca[cpu].default_decr);
947 cpu_callin_map[cpu] = 1;
949 smp_ops->setup_cpu(cpu);
950 if (smp_ops->take_timebase)
951 smp_ops->take_timebase();
953 #ifdef CONFIG_PPC_PSERIES
954 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
958 #ifdef CONFIG_IRQ_ALL_CPUS
959 /* Put the calling processor into the GIQ. This is really only
960 * necessary from a secondary thread as the OF start-cpu interface
961 * performs this function for us on primary threads.
963 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
964 rtas_set_indicator(9005, default_distrib_server, 1);
970 return cpu_idle(NULL);
973 int setup_profiling_timer(unsigned int multiplier)
978 void __init smp_cpus_done(unsigned int max_cpus)
982 /* We want the setup_cpu() here to be called from CPU 0, but our
983 * init thread may have been "borrowed" by another CPU in the meantime
984 * se we pin us down to CPU 0 for a short while
986 old_mask = current->cpus_allowed;
987 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
989 smp_ops->setup_cpu(boot_cpuid);
991 /* XXX fix this, xics currently relies on it - Anton */
992 smp_threads_ready = 1;
994 set_cpus_allowed(current, old_mask);
997 #ifdef CONFIG_SCHED_SMT
999 static struct sched_group sched_group_cpus[NR_CPUS];
1000 static struct sched_group sched_group_phys[NR_CPUS];
1001 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1002 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1003 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1004 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1005 __init void arch_init_sched_domains(void)
1008 struct sched_group *first = NULL, *last = NULL;
1010 /* Set up domains */
1012 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1013 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1014 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1015 int node = cpu_to_node(i);
1016 cpumask_t nodemask = node_to_cpumask(node);
1017 cpumask_t my_cpumask = cpumask_of_cpu(i);
1018 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1020 *cpu_domain = SD_SIBLING_INIT;
1021 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1022 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1024 cpu_domain->span = my_cpumask;
1025 cpu_domain->parent = phys_domain;
1026 cpu_domain->groups = &sched_group_cpus[i];
1028 *phys_domain = SD_CPU_INIT;
1029 phys_domain->span = nodemask;
1030 phys_domain->parent = node_domain;
1031 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1033 *node_domain = SD_NODE_INIT;
1034 node_domain->span = cpu_possible_map;
1035 node_domain->groups = &sched_group_nodes[node];
1038 /* Set up CPU (sibling) groups */
1040 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1042 first = last = NULL;
1044 if (i != first_cpu(cpu_domain->span))
1047 for_each_cpu_mask(j, cpu_domain->span) {
1048 struct sched_group *cpu = &sched_group_cpus[j];
1050 cpus_clear(cpu->cpumask);
1051 cpu_set(j, cpu->cpumask);
1052 cpu->cpu_power = SCHED_LOAD_SCALE;
1063 for (i = 0; i < MAX_NUMNODES; i++) {
1066 struct sched_group *node = &sched_group_nodes[i];
1067 cpumask_t node_cpumask = node_to_cpumask(i);
1068 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1070 if (cpus_empty(nodemask))
1073 first = last = NULL;
1074 /* Set up physical groups */
1075 for_each_cpu_mask(j, nodemask) {
1076 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1077 struct sched_group *cpu = &sched_group_phys[j];
1079 if (j != first_cpu(cpu_domain->span))
1082 cpu->cpumask = cpu_domain->span;
1084 * Make each extra sibling increase power by 10% of
1085 * the basic CPU. This is very arbitrary.
1087 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1088 node->cpu_power += cpu->cpu_power;
1100 first = last = NULL;
1101 for (i = 0; i < MAX_NUMNODES; i++) {
1102 struct sched_group *cpu = &sched_group_nodes[i];
1104 cpumask_t node_cpumask = node_to_cpumask(i);
1105 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1107 if (cpus_empty(nodemask))
1110 cpu->cpumask = nodemask;
1111 /* ->cpu_power already setup */
1123 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1124 cpu_attach_domain(cpu_domain, i);
1127 #else /* !CONFIG_NUMA */
1128 static struct sched_group sched_group_cpus[NR_CPUS];
1129 static struct sched_group sched_group_phys[NR_CPUS];
1130 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1131 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1132 __init void arch_init_sched_domains(void)
1135 struct sched_group *first = NULL, *last = NULL;
1137 /* Set up domains */
1139 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1140 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1141 cpumask_t my_cpumask = cpumask_of_cpu(i);
1142 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1144 *cpu_domain = SD_SIBLING_INIT;
1145 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1146 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1148 cpu_domain->span = my_cpumask;
1149 cpu_domain->parent = phys_domain;
1150 cpu_domain->groups = &sched_group_cpus[i];
1152 *phys_domain = SD_CPU_INIT;
1153 phys_domain->span = cpu_possible_map;
1154 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1157 /* Set up CPU (sibling) groups */
1159 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1161 first = last = NULL;
1163 if (i != first_cpu(cpu_domain->span))
1166 for_each_cpu_mask(j, cpu_domain->span) {
1167 struct sched_group *cpu = &sched_group_cpus[j];
1169 cpus_clear(cpu->cpumask);
1170 cpu_set(j, cpu->cpumask);
1171 cpu->cpu_power = SCHED_LOAD_SCALE;
1182 first = last = NULL;
1183 /* Set up physical groups */
1185 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1186 struct sched_group *cpu = &sched_group_phys[i];
1188 if (i != first_cpu(cpu_domain->span))
1191 cpu->cpumask = cpu_domain->span;
1192 /* See SMT+NUMA setup for comment */
1193 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1205 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1206 cpu_attach_domain(cpu_domain, i);
1209 #endif /* CONFIG_NUMA */
1210 #endif /* CONFIG_SCHED_SMT */