4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
34 #include <asm/ptrace.h>
35 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/hardirq.h>
45 #include <asm/iSeries/LparData.h>
46 #include <asm/iSeries/HvCall.h>
47 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/ppcdebug.h>
51 #include <asm/machdep.h>
53 #include <asm/cputable.h>
54 #include <asm/system.h>
56 int smp_threads_ready;
57 unsigned long cache_decay_ticks;
59 cpumask_t cpu_possible_map = CPU_MASK_NONE;
60 cpumask_t cpu_online_map = CPU_MASK_NONE;
61 cpumask_t cpu_available_map = CPU_MASK_NONE;
62 cpumask_t cpu_present_at_boot = CPU_MASK_NONE;
64 EXPORT_SYMBOL(cpu_online_map);
65 EXPORT_SYMBOL(cpu_possible_map);
67 struct smp_ops_t *smp_ops;
69 static volatile unsigned int cpu_callin_map[NR_CPUS];
71 extern unsigned char stab_array[];
73 extern int cpu_idle(void *unused);
74 void smp_call_function_interrupt(void);
75 extern long register_vpa(unsigned long flags, unsigned long proc,
78 /* Low level assembly function used to backup CPU 0 state */
79 extern void __save_cpu_setup(void);
81 #ifdef CONFIG_PPC_ISERIES
82 static unsigned long iSeries_smp_message[NR_CPUS];
84 void iSeries_smp_message_recv( struct pt_regs * regs )
86 int cpu = smp_processor_id();
89 if ( num_online_cpus() < 2 )
92 for ( msg = 0; msg < 4; ++msg )
93 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
94 smp_message_recv( msg, regs );
97 static inline void smp_iSeries_do_message(int cpu, int msg)
99 set_bit(msg, &iSeries_smp_message[cpu]);
100 HvCall_sendIPI(&(paca[cpu]));
103 static void smp_iSeries_message_pass(int target, int msg)
107 if (target < NR_CPUS)
108 smp_iSeries_do_message(target, msg);
110 for_each_online_cpu(i) {
111 if (target == MSG_ALL_BUT_SELF
112 && i == smp_processor_id())
114 smp_iSeries_do_message(i, msg);
119 static int smp_iSeries_numProcs(void)
122 struct ItLpPaca * lpPaca;
125 for (i=0; i < NR_CPUS; ++i) {
126 lpPaca = paca[i].xLpPacaPtr;
127 if ( lpPaca->xDynProcStatus < 2 ) {
128 cpu_set(i, cpu_available_map);
129 cpu_set(i, cpu_possible_map);
130 cpu_set(i, cpu_present_at_boot);
137 static int smp_iSeries_probe(void)
141 struct ItLpPaca *lpPaca;
143 for (i=0; i < NR_CPUS; ++i) {
144 lpPaca = paca[i].xLpPacaPtr;
145 if (lpPaca->xDynProcStatus < 2) {
146 /*paca[i].active = 1;*/
154 static void smp_iSeries_kick_cpu(int nr)
156 struct ItLpPaca *lpPaca;
158 BUG_ON(nr < 0 || nr >= NR_CPUS);
160 /* Verify that our partition has a processor nr */
161 lpPaca = paca[nr].xLpPacaPtr;
162 if (lpPaca->xDynProcStatus >= 2)
165 /* The processor is currently spinning, waiting
166 * for the xProcStart field to become non-zero
167 * After we set xProcStart, the processor will
168 * continue on to secondary_start in iSeries_head.S
170 paca[nr].xProcStart = 1;
173 static void __devinit smp_iSeries_setup_cpu(int nr)
177 static struct smp_ops_t iSeries_smp_ops = {
178 .message_pass = smp_iSeries_message_pass,
179 .probe = smp_iSeries_probe,
180 .kick_cpu = smp_iSeries_kick_cpu,
181 .setup_cpu = smp_iSeries_setup_cpu,
184 /* This is called very early. */
185 void __init smp_init_iSeries(void)
187 smp_ops = &iSeries_smp_ops;
188 systemcfg->processorCount = smp_iSeries_numProcs();
192 #ifdef CONFIG_PPC_PSERIES
193 void smp_openpic_message_pass(int target, int msg)
195 /* make sure we're sending something that translates to an IPI */
197 printk("SMP %d: smp_message_pass: unknown msg %d\n",
198 smp_processor_id(), msg);
204 openpic_cause_IPI(msg, 0xffffffff);
206 case MSG_ALL_BUT_SELF:
207 openpic_cause_IPI(msg,
208 0xffffffff & ~(1 << smp_processor_id()));
211 openpic_cause_IPI(msg, 1<<target);
216 static int __init smp_openpic_probe(void)
220 nr_cpus = cpus_weight(cpu_possible_map);
223 openpic_request_IPIs();
228 static void __devinit smp_openpic_setup_cpu(int cpu)
230 do_openpic_setup_cpu();
233 #ifdef CONFIG_HOTPLUG_CPU
234 /* Get state of physical CPU.
236 * 0 - The processor is in the RTAS stopped state
237 * 1 - stop-self is in progress
238 * 2 - The processor is not in the RTAS stopped state
239 * -1 - Hardware Error
240 * -2 - Hardware Busy, Try again later.
242 static int query_cpu_stopped(unsigned int pcpu)
245 int status, qcss_tok;
247 qcss_tok = rtas_token("query-cpu-stopped-state");
248 BUG_ON(qcss_tok == RTAS_UNKNOWN_SERVICE);
249 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
252 "RTAS query-cpu-stopped-state failed: %i\n", status);
259 int __cpu_disable(void)
261 /* FIXME: go put this in a header somewhere */
262 extern void xics_migrate_irqs_away(void);
264 systemcfg->processorCount--;
266 /*fix boot_cpuid here*/
267 if (smp_processor_id() == boot_cpuid)
268 boot_cpuid = any_online_cpu(cpu_online_map);
270 /* FIXME: abstract this to not be platform specific later on */
271 xics_migrate_irqs_away();
275 void __cpu_die(unsigned int cpu)
279 unsigned int pcpu = get_hard_smp_processor_id(cpu);
281 for (tries = 0; tries < 5; tries++) {
282 cpu_status = query_cpu_stopped(pcpu);
286 set_current_state(TASK_UNINTERRUPTIBLE);
287 schedule_timeout(HZ);
289 if (cpu_status != 0) {
290 printk("Querying DEAD? cpu %i (%i) shows %i\n",
291 cpu, pcpu, cpu_status);
294 /* Isolation and deallocation are definatly done by
295 * drslot_chrp_cpu. If they were not they would be
296 * done here. Change isolate state to Isolate and
297 * change allocation-state to Unusable.
299 paca[cpu].xProcStart = 0;
301 /* So we can recognize if it fails to come up next time. */
302 cpu_callin_map[cpu] = 0;
310 /* Should never get here... */
315 /* Search all cpu device nodes for an offline logical cpu. If a
316 * device node has a "ibm,my-drc-index" property (meaning this is an
317 * LPAR), paranoid-check whether we own the cpu. For each "thread"
318 * of a cpu, if it is offline and has the same hw index as before,
319 * grab that in preference.
321 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
323 struct device_node *np = NULL;
324 unsigned int best = -1U;
326 while ((np = of_find_node_by_type(np, "cpu"))) {
328 u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
330 get_property(np, "ibm,ppc-interrupt-server#s", &len);
333 tid = (u32 *)get_property(np, "reg", &len);
338 /* If there is a drc-index, make sure that we own
343 int rc = rtas_get_sensor(9003, *index, &state);
344 if (rc != 0 || state != 1)
348 nr_threads = len / sizeof(u32);
350 while (nr_threads--) {
351 if (0 == query_cpu_stopped(tid[nr_threads])) {
352 best = tid[nr_threads];
353 if (best == old_hwindex)
364 * smp_startup_cpu() - start the given cpu
366 * At boot time, there is nothing to do. At run-time, call RTAS with
367 * the appropriate start location, if the cpu is in the RTAS stopped
374 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
377 extern void (*pseries_secondary_smp_init)(unsigned int cpu);
378 unsigned long start_here = __pa(pseries_secondary_smp_init);
381 /* At boot time the cpus are already spinning in hold
382 * loops, so nothing to do. */
383 if (system_state == SYSTEM_BOOTING)
386 pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
388 printk(KERN_INFO "No more cpus available, failing\n");
392 /* Fixup atomic count: it exited inside IRQ handler. */
393 paca[lcpu].xCurrent->thread_info->preempt_count = 0;
394 /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
395 paca[lcpu].xStab_data.next_round_robin = 0;
397 /* At boot this is done in prom.c. */
398 paca[lcpu].xHwProcNum = pcpu;
400 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
401 pcpu, start_here, lcpu);
403 printk(KERN_ERR "start-cpu failed: %i\n", status);
409 static inline void look_for_more_cpus(void)
411 int num_addr_cell, num_size_cell, len, i, maxcpus;
412 struct device_node *np;
415 /* Find the property which will tell us about how many CPUs
416 * we're allowed to have. */
417 if ((np = find_path_device("/rtas")) == NULL) {
418 printk(KERN_ERR "Could not find /rtas in device tree!");
421 num_addr_cell = prom_n_addr_cells(np);
422 num_size_cell = prom_n_size_cells(np);
424 ireg = (unsigned int *)get_property(np, "ibm,lrdr-capacity", &len);
426 /* FIXME: make sure not marked as lrdr_capable() */
430 maxcpus = ireg[num_addr_cell + num_size_cell];
431 /* DRENG need to account for threads here too */
433 if (maxcpus > NR_CPUS) {
435 "Partition configured for %d cpus, "
436 "operating system maximum is %d.\n", maxcpus, NR_CPUS);
439 printk(KERN_INFO "Partition configured for %d cpus.\n",
442 /* Make those cpus (which might appear later) possible too. */
443 for (i = 0; i < maxcpus; i++)
444 cpu_set(i, cpu_possible_map);
446 #else /* ... CONFIG_HOTPLUG_CPU */
447 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
451 static inline void look_for_more_cpus(void)
454 #endif /* CONFIG_HOTPLUG_CPU */
456 static void smp_pSeries_kick_cpu(int nr)
458 BUG_ON(nr < 0 || nr >= NR_CPUS);
460 if (!smp_startup_cpu(nr))
463 /* The processor is currently spinning, waiting
464 * for the xProcStart field to become non-zero
465 * After we set xProcStart, the processor will
466 * continue on to secondary_start
468 paca[nr].xProcStart = 1;
470 #endif /* CONFIG_PPC_PSERIES */
472 static void __init smp_space_timers(unsigned int max_cpus)
475 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
476 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
479 if (i != boot_cpuid) {
480 paca[i].next_jiffy_update_tb =
481 previous_tb + offset;
482 previous_tb = paca[i].next_jiffy_update_tb;
487 #ifdef CONFIG_PPC_PSERIES
488 void vpa_init(int cpu)
492 /* Register the Virtual Processor Area (VPA) */
493 printk(KERN_INFO "register_vpa: cpu 0x%x\n", cpu);
494 flags = 1UL << (63 - 18);
495 paca[cpu].xLpPaca.xSLBCount = 64; /* SLB restore highwater mark */
496 register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].xLpPaca)));
499 static inline void smp_xics_do_message(int cpu, int msg)
501 set_bit(msg, &xics_ipi_message[cpu].value);
506 static void smp_xics_message_pass(int target, int msg)
510 if (target < NR_CPUS) {
511 smp_xics_do_message(target, msg);
513 for_each_online_cpu(i) {
514 if (target == MSG_ALL_BUT_SELF
515 && i == smp_processor_id())
517 smp_xics_do_message(i, msg);
522 extern void xics_request_IPIs(void);
524 static int __init smp_xics_probe(void)
530 return cpus_weight(cpu_possible_map);
533 static void __devinit smp_xics_setup_cpu(int cpu)
535 if (cpu != boot_cpuid)
539 static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
540 static unsigned long timebase = 0;
542 static void __devinit pSeries_give_timebase(void)
544 spin_lock(&timebase_lock);
545 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
547 spin_unlock(&timebase_lock);
551 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
554 static void __devinit pSeries_take_timebase(void)
558 spin_lock(&timebase_lock);
559 set_tb(timebase >> 32, timebase & 0xffffffff);
561 spin_unlock(&timebase_lock);
564 static struct smp_ops_t pSeries_openpic_smp_ops = {
565 .message_pass = smp_openpic_message_pass,
566 .probe = smp_openpic_probe,
567 .kick_cpu = smp_pSeries_kick_cpu,
568 .setup_cpu = smp_openpic_setup_cpu,
571 static struct smp_ops_t pSeries_xics_smp_ops = {
572 .message_pass = smp_xics_message_pass,
573 .probe = smp_xics_probe,
574 .kick_cpu = smp_pSeries_kick_cpu,
575 .setup_cpu = smp_xics_setup_cpu,
578 /* This is called very early */
579 void __init smp_init_pSeries(void)
582 if (naca->interrupt_controller == IC_OPEN_PIC)
583 smp_ops = &pSeries_openpic_smp_ops;
585 smp_ops = &pSeries_xics_smp_ops;
587 /* Non-lpar has additional take/give timebase */
588 if (systemcfg->platform == PLATFORM_PSERIES) {
589 smp_ops->give_timebase = pSeries_give_timebase;
590 smp_ops->take_timebase = pSeries_take_timebase;
595 void smp_local_timer_interrupt(struct pt_regs * regs)
597 if (!--(get_paca()->prof_counter)) {
598 update_process_times(user_mode(regs));
599 (get_paca()->prof_counter)=get_paca()->prof_multiplier;
603 void smp_message_recv(int msg, struct pt_regs *regs)
606 case PPC_MSG_CALL_FUNCTION:
607 smp_call_function_interrupt();
609 case PPC_MSG_RESCHEDULE:
610 /* XXX Do we have to do this? */
614 case PPC_MSG_MIGRATE_TASK:
618 #ifdef CONFIG_DEBUGGER
619 case PPC_MSG_DEBUGGER_BREAK:
624 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
625 smp_processor_id(), msg);
630 void smp_send_reschedule(int cpu)
632 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
635 #ifdef CONFIG_DEBUGGER
636 void smp_send_debugger_break(int cpu)
638 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
642 static void stop_this_cpu(void *dummy)
649 void smp_send_stop(void)
651 smp_call_function(stop_this_cpu, NULL, 1, 0);
655 * Structure and data for smp_call_function(). This is designed to minimise
656 * static memory requirements. It also looks cleaner.
657 * Stolen from the i386 version.
659 static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
661 static struct call_data_struct {
662 void (*func) (void *info);
669 /* delay of at least 8 seconds on 1GHz cpu */
670 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
673 * This function sends a 'generic call function' IPI to all other CPUs
676 * [SUMMARY] Run a function on all other CPUs.
677 * <func> The function to run. This must be fast and non-blocking.
678 * <info> An arbitrary pointer to pass to the function.
679 * <nonatomic> currently unused.
680 * <wait> If true, wait (atomically) until function has completed on other CPUs.
681 * [RETURNS] 0 on success, else a negative status code. Does not return until
682 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
684 * You must not call this function with disabled interrupts or from a
685 * hardware interrupt handler or from a bottom half handler.
687 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
690 struct call_data_struct data;
692 unsigned long timeout;
694 /* Can deadlock when called with interrupts disabled */
695 WARN_ON(irqs_disabled());
699 atomic_set(&data.started, 0);
702 atomic_set(&data.finished, 0);
704 spin_lock(&call_lock);
705 /* Must grab online cpu count with preempt disabled, otherwise
707 cpus = num_online_cpus() - 1;
715 /* Send a message to all other CPUs and wait for them to respond */
716 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
718 /* Wait for response */
719 timeout = SMP_CALL_TIMEOUT;
720 while (atomic_read(&data.started) != cpus) {
722 if (--timeout == 0) {
723 printk("smp_call_function on cpu %d: other cpus not "
724 "responding (%d)\n", smp_processor_id(),
725 atomic_read(&data.started));
732 timeout = SMP_CALL_TIMEOUT;
733 while (atomic_read(&data.finished) != cpus) {
735 if (--timeout == 0) {
736 printk("smp_call_function on cpu %d: other "
737 "cpus not finishing (%d/%d)\n",
739 atomic_read(&data.finished),
740 atomic_read(&data.started));
752 spin_unlock(&call_lock);
756 void smp_call_function_interrupt(void)
758 void (*func) (void *info);
762 /* call_data will be NULL if the sender timed out while
763 * waiting on us to receive the call.
768 func = call_data->func;
769 info = call_data->info;
770 wait = call_data->wait;
773 smp_mb__before_atomic_inc();
776 * Notify initiating CPU that I've grabbed the data and am
777 * about to execute the function
779 atomic_inc(&call_data->started);
781 * At this point the info structure may be out of scope unless wait==1
785 smp_mb__before_atomic_inc();
786 atomic_inc(&call_data->finished);
790 extern unsigned long decr_overclock;
791 extern struct gettimeofday_struct do_gtod;
793 struct thread_info *current_set[NR_CPUS];
795 DECLARE_PER_CPU(unsigned int, pvr);
797 static void __devinit smp_store_cpu_info(int id)
799 per_cpu(pvr, id) = _get_PVR();
802 static void __init smp_create_idle(unsigned int cpu)
805 struct task_struct *p;
807 /* create a process for the processor */
808 /* only regs.msr is actually used, and 0 is OK for it */
809 memset(®s, 0, sizeof(struct pt_regs));
810 p = copy_process(CLONE_VM | CLONE_IDLETASK,
811 0, ®s, 0, NULL, NULL);
813 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
815 wake_up_forked_process(p);
819 paca[cpu].xCurrent = p;
820 current_set[cpu] = p->thread_info;
823 void __init smp_prepare_cpus(unsigned int max_cpus)
828 * setup_cpu may need to be called on the boot cpu. We havent
829 * spun any cpus up but lets be paranoid.
831 BUG_ON(boot_cpuid != smp_processor_id());
834 smp_store_cpu_info(boot_cpuid);
835 cpu_callin_map[boot_cpuid] = 1;
836 paca[boot_cpuid].prof_counter = 1;
837 paca[boot_cpuid].prof_multiplier = 1;
839 #ifndef CONFIG_PPC_ISERIES
840 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
843 * Should update do_gtod.stamp_xsec.
844 * For now we leave it which means the time can be some
845 * number of msecs off until someone does a settimeofday()
847 do_gtod.tb_orig_stamp = tb_last_stamp;
849 look_for_more_cpus();
852 max_cpus = smp_ops->probe();
854 /* Backup CPU 0 state if necessary */
857 smp_space_timers(max_cpus);
860 if (cpu != boot_cpuid)
861 smp_create_idle(cpu);
864 void __devinit smp_prepare_boot_cpu(void)
866 BUG_ON(smp_processor_id() != boot_cpuid);
868 /* cpu_possible is set up in prom.c */
869 cpu_set(boot_cpuid, cpu_online_map);
871 paca[boot_cpuid].xCurrent = current;
872 current_set[boot_cpuid] = current->thread_info;
875 int __devinit __cpu_up(unsigned int cpu)
879 /* At boot, don't bother with non-present cpus -JSCHOPP */
880 if (system_state == SYSTEM_BOOTING && !cpu_present_at_boot(cpu))
883 paca[cpu].prof_counter = 1;
884 paca[cpu].prof_multiplier = 1;
885 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
887 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
890 /* maximum of 48 CPUs on machines with a segment table */
894 tmp = &stab_array[PAGE_SIZE * cpu];
895 memset(tmp, 0, PAGE_SIZE);
896 paca[cpu].xStab_data.virt = (unsigned long)tmp;
897 paca[cpu].xStab_data.real = virt_to_abs(tmp);
900 /* The information for processor bringup must
901 * be written out to main store before we release
907 smp_ops->kick_cpu(cpu);
910 * wait to see if the cpu made a callin (is actually up).
911 * use this value that I found through experimentation.
914 if (system_state == SYSTEM_BOOTING)
915 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
917 #ifdef CONFIG_HOTPLUG_CPU
920 * CPUs can take much longer to come up in the
921 * hotplug case. Wait five seconds.
923 for (c = 25; c && !cpu_callin_map[cpu]; c--) {
924 set_current_state(TASK_UNINTERRUPTIBLE);
925 schedule_timeout(HZ/5);
929 if (!cpu_callin_map[cpu]) {
930 printk("Processor %u is stuck.\n", cpu);
934 printk("Processor %u found.\n", cpu);
936 if (smp_ops->give_timebase)
937 smp_ops->give_timebase();
938 cpu_set(cpu, cpu_online_map);
942 extern unsigned int default_distrib_server;
943 /* Activate a secondary processor. */
944 int __devinit start_secondary(void *unused)
946 unsigned int cpu = smp_processor_id();
948 atomic_inc(&init_mm.mm_count);
949 current->active_mm = &init_mm;
951 smp_store_cpu_info(cpu);
952 set_dec(paca[cpu].default_decr);
953 cpu_callin_map[cpu] = 1;
955 smp_ops->setup_cpu(cpu);
956 if (smp_ops->take_timebase)
957 smp_ops->take_timebase();
959 get_paca()->yielded = 0;
961 #ifdef CONFIG_PPC_PSERIES
962 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
966 #ifdef CONFIG_IRQ_ALL_CPUS
967 /* Put the calling processor into the GIQ. This is really only
968 * necessary from a secondary thread as the OF start-cpu interface
969 * performs this function for us on primary threads.
971 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
972 rtas_set_indicator(9005, default_distrib_server, 1);
978 return cpu_idle(NULL);
981 int setup_profiling_timer(unsigned int multiplier)
986 void __init smp_cpus_done(unsigned int max_cpus)
990 /* We want the setup_cpu() here to be called from CPU 0, but our
991 * init thread may have been "borrowed" by another CPU in the meantime
992 * se we pin us down to CPU 0 for a short while
994 old_mask = current->cpus_allowed;
995 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
997 smp_ops->setup_cpu(boot_cpuid);
999 /* XXX fix this, xics currently relies on it - Anton */
1000 smp_threads_ready = 1;
1002 set_cpus_allowed(current, old_mask);
1005 #ifdef CONFIG_SCHED_SMT
1007 static struct sched_group sched_group_cpus[NR_CPUS];
1008 static struct sched_group sched_group_phys[NR_CPUS];
1009 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1010 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1011 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1012 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1013 __init void arch_init_sched_domains(void)
1016 struct sched_group *first = NULL, *last = NULL;
1018 /* Set up domains */
1020 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1021 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1022 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1023 int node = cpu_to_node(i);
1024 cpumask_t nodemask = node_to_cpumask(node);
1025 cpumask_t my_cpumask = cpumask_of_cpu(i);
1026 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1028 *cpu_domain = SD_SIBLING_INIT;
1029 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1030 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1032 cpu_domain->span = my_cpumask;
1033 cpu_domain->parent = phys_domain;
1034 cpu_domain->groups = &sched_group_cpus[i];
1036 *phys_domain = SD_CPU_INIT;
1037 phys_domain->span = nodemask;
1038 phys_domain->parent = node_domain;
1039 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1041 *node_domain = SD_NODE_INIT;
1042 node_domain->span = cpu_possible_map;
1043 node_domain->groups = &sched_group_nodes[node];
1046 /* Set up CPU (sibling) groups */
1048 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1050 first = last = NULL;
1052 if (i != first_cpu(cpu_domain->span))
1055 for_each_cpu_mask(j, cpu_domain->span) {
1056 struct sched_group *cpu = &sched_group_cpus[j];
1058 cpus_clear(cpu->cpumask);
1059 cpu_set(j, cpu->cpumask);
1060 cpu->cpu_power = SCHED_LOAD_SCALE;
1071 for (i = 0; i < MAX_NUMNODES; i++) {
1074 struct sched_group *node = &sched_group_nodes[i];
1075 cpumask_t node_cpumask = node_to_cpumask(i);
1076 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1078 if (cpus_empty(nodemask))
1081 first = last = NULL;
1082 /* Set up physical groups */
1083 for_each_cpu_mask(j, nodemask) {
1084 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1085 struct sched_group *cpu = &sched_group_phys[j];
1087 if (j != first_cpu(cpu_domain->span))
1090 cpu->cpumask = cpu_domain->span;
1092 * Make each extra sibling increase power by 10% of
1093 * the basic CPU. This is very arbitrary.
1095 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1096 node->cpu_power += cpu->cpu_power;
1108 first = last = NULL;
1109 for (i = 0; i < MAX_NUMNODES; i++) {
1110 struct sched_group *cpu = &sched_group_nodes[i];
1112 cpumask_t node_cpumask = node_to_cpumask(i);
1113 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1115 if (cpus_empty(nodemask))
1118 cpu->cpumask = nodemask;
1119 /* ->cpu_power already setup */
1131 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1132 cpu_attach_domain(cpu_domain, i);
1135 #else /* !CONFIG_NUMA */
1136 static struct sched_group sched_group_cpus[NR_CPUS];
1137 static struct sched_group sched_group_phys[NR_CPUS];
1138 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1139 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1140 __init void arch_init_sched_domains(void)
1143 struct sched_group *first = NULL, *last = NULL;
1145 /* Set up domains */
1147 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1148 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1149 cpumask_t my_cpumask = cpumask_of_cpu(i);
1150 cpumask_t sibling_cpumask = cpumask_of_cpu(i ^ 0x1);
1152 *cpu_domain = SD_SIBLING_INIT;
1153 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
1154 cpus_or(cpu_domain->span, my_cpumask, sibling_cpumask);
1156 cpu_domain->span = my_cpumask;
1157 cpu_domain->parent = phys_domain;
1158 cpu_domain->groups = &sched_group_cpus[i];
1160 *phys_domain = SD_CPU_INIT;
1161 phys_domain->span = cpu_possible_map;
1162 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1165 /* Set up CPU (sibling) groups */
1167 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1169 first = last = NULL;
1171 if (i != first_cpu(cpu_domain->span))
1174 for_each_cpu_mask(j, cpu_domain->span) {
1175 struct sched_group *cpu = &sched_group_cpus[j];
1177 cpus_clear(cpu->cpumask);
1178 cpu_set(j, cpu->cpumask);
1179 cpu->cpu_power = SCHED_LOAD_SCALE;
1190 first = last = NULL;
1191 /* Set up physical groups */
1193 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1194 struct sched_group *cpu = &sched_group_phys[i];
1196 if (i != first_cpu(cpu_domain->span))
1199 cpu->cpumask = cpu_domain->span;
1200 /* See SMT+NUMA setup for comment */
1201 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1213 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1214 cpu_attach_domain(cpu_domain, i);
1217 #endif /* CONFIG_NUMA */
1218 #endif /* CONFIG_SCHED_SMT */