4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/sysdev.h>
32 #include <linux/cpu.h>
34 #include <asm/ptrace.h>
35 #include <asm/atomic.h>
38 #include <asm/pgtable.h>
39 #include <asm/hardirq.h>
45 #include <asm/iSeries/LparData.h>
46 #include <asm/iSeries/HvCall.h>
47 #include <asm/iSeries/HvCallCfg.h>
49 #include <asm/ppcdebug.h>
51 #include <asm/machdep.h>
53 #include <asm/cputable.h>
54 #include <asm/system.h>
56 int smp_threads_ready;
57 unsigned long cache_decay_ticks;
59 cpumask_t cpu_possible_map = CPU_MASK_NONE;
60 cpumask_t cpu_online_map = CPU_MASK_NONE;
61 cpumask_t cpu_available_map = CPU_MASK_NONE;
62 cpumask_t cpu_present_at_boot = CPU_MASK_NONE;
64 EXPORT_SYMBOL(cpu_online_map);
65 EXPORT_SYMBOL(cpu_possible_map);
67 struct smp_ops_t *smp_ops;
69 static volatile unsigned int cpu_callin_map[NR_CPUS];
71 extern unsigned char stab_array[];
73 extern int cpu_idle(void *unused);
74 void smp_call_function_interrupt(void);
75 extern long register_vpa(unsigned long flags, unsigned long proc,
78 /* Low level assembly function used to backup CPU 0 state */
79 extern void __save_cpu_setup(void);
81 #ifdef CONFIG_PPC_ISERIES
82 static unsigned long iSeries_smp_message[NR_CPUS];
84 void iSeries_smp_message_recv( struct pt_regs * regs )
86 int cpu = smp_processor_id();
89 if ( num_online_cpus() < 2 )
92 for ( msg = 0; msg < 4; ++msg )
93 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
94 smp_message_recv( msg, regs );
97 static inline void smp_iSeries_do_message(int cpu, int msg)
99 set_bit(msg, &iSeries_smp_message[cpu]);
100 HvCall_sendIPI(&(paca[cpu]));
103 static void smp_iSeries_message_pass(int target, int msg)
107 if (target < NR_CPUS)
108 smp_iSeries_do_message(target, msg);
110 for_each_online_cpu(i) {
111 if (target == MSG_ALL_BUT_SELF
112 && i == smp_processor_id())
114 smp_iSeries_do_message(i, msg);
119 static int smp_iSeries_numProcs(void)
122 struct ItLpPaca * lpPaca;
125 for (i=0; i < NR_CPUS; ++i) {
126 lpPaca = paca[i].xLpPacaPtr;
127 if ( lpPaca->xDynProcStatus < 2 ) {
128 cpu_set(i, cpu_available_map);
129 cpu_set(i, cpu_possible_map);
130 cpu_set(i, cpu_present_at_boot);
137 static int smp_iSeries_probe(void)
141 struct ItLpPaca *lpPaca;
143 for (i=0; i < NR_CPUS; ++i) {
144 lpPaca = paca[i].xLpPacaPtr;
145 if (lpPaca->xDynProcStatus < 2) {
146 /*paca[i].active = 1;*/
154 static void smp_iSeries_kick_cpu(int nr)
156 struct ItLpPaca *lpPaca;
158 BUG_ON(nr < 0 || nr >= NR_CPUS);
160 /* Verify that our partition has a processor nr */
161 lpPaca = paca[nr].xLpPacaPtr;
162 if (lpPaca->xDynProcStatus >= 2)
165 /* The processor is currently spinning, waiting
166 * for the xProcStart field to become non-zero
167 * After we set xProcStart, the processor will
168 * continue on to secondary_start in iSeries_head.S
170 paca[nr].xProcStart = 1;
173 static void __devinit smp_iSeries_setup_cpu(int nr)
177 static struct smp_ops_t iSeries_smp_ops = {
178 .message_pass = smp_iSeries_message_pass,
179 .probe = smp_iSeries_probe,
180 .kick_cpu = smp_iSeries_kick_cpu,
181 .setup_cpu = smp_iSeries_setup_cpu,
184 /* This is called very early. */
185 void __init smp_init_iSeries(void)
187 smp_ops = &iSeries_smp_ops;
188 systemcfg->processorCount = smp_iSeries_numProcs();
192 #ifdef CONFIG_PPC_PSERIES
193 void smp_openpic_message_pass(int target, int msg)
195 /* make sure we're sending something that translates to an IPI */
197 printk("SMP %d: smp_message_pass: unknown msg %d\n",
198 smp_processor_id(), msg);
204 openpic_cause_IPI(msg, 0xffffffff);
206 case MSG_ALL_BUT_SELF:
207 openpic_cause_IPI(msg,
208 0xffffffff & ~(1 << smp_processor_id()));
211 openpic_cause_IPI(msg, 1<<target);
216 static int __init smp_openpic_probe(void)
220 nr_cpus = cpus_weight(cpu_possible_map);
223 openpic_request_IPIs();
228 static void __devinit smp_openpic_setup_cpu(int cpu)
230 do_openpic_setup_cpu();
233 #ifdef CONFIG_HOTPLUG_CPU
234 /* Get state of physical CPU.
236 * 0 - The processor is in the RTAS stopped state
237 * 1 - stop-self is in progress
238 * 2 - The processor is not in the RTAS stopped state
239 * -1 - Hardware Error
240 * -2 - Hardware Busy, Try again later.
242 static int query_cpu_stopped(unsigned int pcpu)
245 int status, qcss_tok;
247 qcss_tok = rtas_token("query-cpu-stopped-state");
248 BUG_ON(qcss_tok == RTAS_UNKNOWN_SERVICE);
249 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
252 "RTAS query-cpu-stopped-state failed: %i\n", status);
259 int __cpu_disable(void)
261 /* FIXME: go put this in a header somewhere */
262 extern void xics_migrate_irqs_away(void);
264 systemcfg->processorCount--;
266 /*fix boot_cpuid here*/
267 if (smp_processor_id() == boot_cpuid)
268 boot_cpuid = any_online_cpu(cpu_online_map);
270 /* FIXME: abstract this to not be platform specific later on */
271 xics_migrate_irqs_away();
275 void __cpu_die(unsigned int cpu)
279 unsigned int pcpu = get_hard_smp_processor_id(cpu);
281 for (tries = 0; tries < 5; tries++) {
282 cpu_status = query_cpu_stopped(pcpu);
286 set_current_state(TASK_UNINTERRUPTIBLE);
287 schedule_timeout(HZ);
289 if (cpu_status != 0) {
290 printk("Querying DEAD? cpu %i (%i) shows %i\n",
291 cpu, pcpu, cpu_status);
294 /* Isolation and deallocation are definatly done by
295 * drslot_chrp_cpu. If they were not they would be
296 * done here. Change isolate state to Isolate and
297 * change allocation-state to Unusable.
299 paca[cpu].xProcStart = 0;
301 /* So we can recognize if it fails to come up next time. */
302 cpu_callin_map[cpu] = 0;
310 /* Should never get here... */
315 /* Search all cpu device nodes for an offline logical cpu. If a
316 * device node has a "ibm,my-drc-index" property (meaning this is an
317 * LPAR), paranoid-check whether we own the cpu. For each "thread"
318 * of a cpu, if it is offline and has the same hw index as before,
319 * grab that in preference.
321 static unsigned int find_physical_cpu_to_start(unsigned int old_hwindex)
323 struct device_node *np = NULL;
324 unsigned int best = -1U;
326 while ((np = of_find_node_by_type(np, "cpu"))) {
328 u32 *index = (u32 *)get_property(np, "ibm,my-drc-index", NULL);
330 get_property(np, "ibm,ppc-interrupt-server#s", &len);
333 tid = (u32 *)get_property(np, "reg", &len);
338 /* If there is a drc-index, make sure that we own
343 int rc = rtas_get_sensor(9003, *index, &state);
344 if (rc != 0 || state != 1)
348 nr_threads = len / sizeof(u32);
350 while (nr_threads--) {
351 if (0 == query_cpu_stopped(tid[nr_threads])) {
352 best = tid[nr_threads];
353 if (best == old_hwindex)
364 * smp_startup_cpu() - start the given cpu
366 * At boot time, there is nothing to do. At run-time, call RTAS with
367 * the appropriate start location, if the cpu is in the RTAS stopped
374 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
377 extern void (*pseries_secondary_smp_init)(unsigned int cpu);
378 unsigned long start_here = __pa(pseries_secondary_smp_init);
381 /* At boot time the cpus are already spinning in hold
382 * loops, so nothing to do. */
383 if (system_state == SYSTEM_BOOTING)
386 pcpu = find_physical_cpu_to_start(get_hard_smp_processor_id(lcpu));
388 printk(KERN_INFO "No more cpus available, failing\n");
392 /* Fixup atomic count: it exited inside IRQ handler. */
393 ((struct task_struct *)paca[lcpu].xCurrent)->thread_info->preempt_count
395 /* Fixup SLB round-robin so next segment (kernel) goes in segment 0 */
396 paca[lcpu].xStab_data.next_round_robin = 0;
398 /* At boot this is done in prom.c. */
399 paca[lcpu].xHwProcNum = pcpu;
401 status = rtas_call(rtas_token("start-cpu"), 3, 1, NULL,
402 pcpu, start_here, lcpu);
404 printk(KERN_ERR "start-cpu failed: %i\n", status);
410 static inline void look_for_more_cpus(void)
412 int num_addr_cell, num_size_cell, len, i, maxcpus;
413 struct device_node *np;
416 /* Find the property which will tell us about how many CPUs
417 * we're allowed to have. */
418 if ((np = find_path_device("/rtas")) == NULL) {
419 printk(KERN_ERR "Could not find /rtas in device tree!");
422 num_addr_cell = prom_n_addr_cells(np);
423 num_size_cell = prom_n_size_cells(np);
425 ireg = (unsigned int *)get_property(np, "ibm,lrdr-capacity", &len);
427 /* FIXME: make sure not marked as lrdr_capable() */
431 maxcpus = ireg[num_addr_cell + num_size_cell];
432 /* DRENG need to account for threads here too */
434 if (maxcpus > NR_CPUS) {
436 "Partition configured for %d cpus, "
437 "operating system maximum is %d.\n", maxcpus, NR_CPUS);
440 printk(KERN_INFO "Partition configured for %d cpus.\n",
443 /* Make those cpus (which might appear later) possible too. */
444 for (i = 0; i < maxcpus; i++)
445 cpu_set(i, cpu_possible_map);
447 #else /* ... CONFIG_HOTPLUG_CPU */
448 static inline int __devinit smp_startup_cpu(unsigned int lcpu)
452 static inline void look_for_more_cpus(void)
455 #endif /* CONFIG_HOTPLUG_CPU */
457 static void smp_pSeries_kick_cpu(int nr)
459 BUG_ON(nr < 0 || nr >= NR_CPUS);
461 if (!smp_startup_cpu(nr))
464 /* The processor is currently spinning, waiting
465 * for the xProcStart field to become non-zero
466 * After we set xProcStart, the processor will
467 * continue on to secondary_start
469 paca[nr].xProcStart = 1;
471 #endif /* CONFIG_PPC_PSERIES */
473 static void __init smp_space_timers(unsigned int max_cpus)
476 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
477 unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
480 if (i != boot_cpuid) {
481 paca[i].next_jiffy_update_tb =
482 previous_tb + offset;
483 previous_tb = paca[i].next_jiffy_update_tb;
488 #ifdef CONFIG_PPC_PSERIES
489 void vpa_init(int cpu)
493 /* Register the Virtual Processor Area (VPA) */
494 printk(KERN_INFO "register_vpa: cpu 0x%x\n", cpu);
495 flags = 1UL << (63 - 18);
496 paca[cpu].xLpPaca.xSLBCount = 64; /* SLB restore highwater mark */
497 register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].xLpPaca)));
500 static inline void smp_xics_do_message(int cpu, int msg)
502 set_bit(msg, &xics_ipi_message[cpu].value);
507 static void smp_xics_message_pass(int target, int msg)
511 if (target < NR_CPUS) {
512 smp_xics_do_message(target, msg);
514 for_each_online_cpu(i) {
515 if (target == MSG_ALL_BUT_SELF
516 && i == smp_processor_id())
518 smp_xics_do_message(i, msg);
523 extern void xics_request_IPIs(void);
525 static int __init smp_xics_probe(void)
531 return cpus_weight(cpu_possible_map);
534 static void __devinit smp_xics_setup_cpu(int cpu)
536 if (cpu != boot_cpuid)
540 static spinlock_t timebase_lock = SPIN_LOCK_UNLOCKED;
541 static unsigned long timebase = 0;
543 static void __devinit pSeries_give_timebase(void)
545 spin_lock(&timebase_lock);
546 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
548 spin_unlock(&timebase_lock);
552 rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
555 static void __devinit pSeries_take_timebase(void)
559 spin_lock(&timebase_lock);
560 set_tb(timebase >> 32, timebase & 0xffffffff);
562 spin_unlock(&timebase_lock);
565 static struct smp_ops_t pSeries_openpic_smp_ops = {
566 .message_pass = smp_openpic_message_pass,
567 .probe = smp_openpic_probe,
568 .kick_cpu = smp_pSeries_kick_cpu,
569 .setup_cpu = smp_openpic_setup_cpu,
572 static struct smp_ops_t pSeries_xics_smp_ops = {
573 .message_pass = smp_xics_message_pass,
574 .probe = smp_xics_probe,
575 .kick_cpu = smp_pSeries_kick_cpu,
576 .setup_cpu = smp_xics_setup_cpu,
579 /* This is called very early */
580 void __init smp_init_pSeries(void)
583 if (naca->interrupt_controller == IC_OPEN_PIC)
584 smp_ops = &pSeries_openpic_smp_ops;
586 smp_ops = &pSeries_xics_smp_ops;
588 /* Non-lpar has additional take/give timebase */
589 if (systemcfg->platform == PLATFORM_PSERIES) {
590 smp_ops->give_timebase = pSeries_give_timebase;
591 smp_ops->take_timebase = pSeries_take_timebase;
596 void smp_local_timer_interrupt(struct pt_regs * regs)
598 if (!--(get_paca()->prof_counter)) {
599 update_process_times(user_mode(regs));
600 (get_paca()->prof_counter)=get_paca()->prof_multiplier;
604 void smp_message_recv(int msg, struct pt_regs *regs)
607 case PPC_MSG_CALL_FUNCTION:
608 smp_call_function_interrupt();
610 case PPC_MSG_RESCHEDULE:
611 /* XXX Do we have to do this? */
615 case PPC_MSG_MIGRATE_TASK:
619 #ifdef CONFIG_DEBUGGER
620 case PPC_MSG_DEBUGGER_BREAK:
625 printk("SMP %d: smp_message_recv(): unknown msg %d\n",
626 smp_processor_id(), msg);
631 void smp_send_reschedule(int cpu)
633 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
636 #ifdef CONFIG_DEBUGGER
637 void smp_send_debugger_break(int cpu)
639 smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
643 static void stop_this_cpu(void *dummy)
650 void smp_send_stop(void)
652 smp_call_function(stop_this_cpu, NULL, 1, 0);
656 * Structure and data for smp_call_function(). This is designed to minimise
657 * static memory requirements. It also looks cleaner.
658 * Stolen from the i386 version.
660 static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
662 static struct call_data_struct {
663 void (*func) (void *info);
670 /* delay of at least 8 seconds on 1GHz cpu */
671 #define SMP_CALL_TIMEOUT (1UL << (30 + 3))
674 * This function sends a 'generic call function' IPI to all other CPUs
677 * [SUMMARY] Run a function on all other CPUs.
678 * <func> The function to run. This must be fast and non-blocking.
679 * <info> An arbitrary pointer to pass to the function.
680 * <nonatomic> currently unused.
681 * <wait> If true, wait (atomically) until function has completed on other CPUs.
682 * [RETURNS] 0 on success, else a negative status code. Does not return until
683 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
685 * You must not call this function with disabled interrupts or from a
686 * hardware interrupt handler or from a bottom half handler.
688 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
691 struct call_data_struct data;
693 unsigned long timeout;
697 atomic_set(&data.started, 0);
700 atomic_set(&data.finished, 0);
702 spin_lock(&call_lock);
703 /* Must grab online cpu count with preempt disabled, otherwise
705 cpus = num_online_cpus() - 1;
713 /* Send a message to all other CPUs and wait for them to respond */
714 smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION);
716 /* Wait for response */
717 timeout = SMP_CALL_TIMEOUT;
718 while (atomic_read(&data.started) != cpus) {
720 if (--timeout == 0) {
721 printk("smp_call_function on cpu %d: other cpus not "
722 "responding (%d)\n", smp_processor_id(),
723 atomic_read(&data.started));
730 timeout = SMP_CALL_TIMEOUT;
731 while (atomic_read(&data.finished) != cpus) {
733 if (--timeout == 0) {
734 printk("smp_call_function on cpu %d: other "
735 "cpus not finishing (%d/%d)\n",
737 atomic_read(&data.finished),
738 atomic_read(&data.started));
750 spin_unlock(&call_lock);
754 void smp_call_function_interrupt(void)
756 void (*func) (void *info);
760 /* call_data will be NULL if the sender timed out while
761 * waiting on us to receive the call.
766 func = call_data->func;
767 info = call_data->info;
768 wait = call_data->wait;
771 smp_mb__before_atomic_inc();
774 * Notify initiating CPU that I've grabbed the data and am
775 * about to execute the function
777 atomic_inc(&call_data->started);
779 * At this point the info structure may be out of scope unless wait==1
783 smp_mb__before_atomic_inc();
784 atomic_inc(&call_data->finished);
788 extern unsigned long decr_overclock;
789 extern struct gettimeofday_struct do_gtod;
791 struct thread_info *current_set[NR_CPUS];
793 DECLARE_PER_CPU(unsigned int, pvr);
795 static void __devinit smp_store_cpu_info(int id)
797 per_cpu(pvr, id) = _get_PVR();
800 static void __init smp_create_idle(unsigned int cpu)
803 struct task_struct *p;
805 /* create a process for the processor */
806 /* only regs.msr is actually used, and 0 is OK for it */
807 memset(®s, 0, sizeof(struct pt_regs));
808 p = copy_process(CLONE_VM | CLONE_IDLETASK,
809 0, ®s, 0, NULL, NULL);
811 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
813 wake_up_forked_process(p);
817 paca[cpu].xCurrent = (u64)p;
818 current_set[cpu] = p->thread_info;
821 void __init smp_prepare_cpus(unsigned int max_cpus)
826 * setup_cpu may need to be called on the boot cpu. We havent
827 * spun any cpus up but lets be paranoid.
829 BUG_ON(boot_cpuid != smp_processor_id());
832 smp_store_cpu_info(boot_cpuid);
833 cpu_callin_map[boot_cpuid] = 1;
834 paca[boot_cpuid].prof_counter = 1;
835 paca[boot_cpuid].prof_multiplier = 1;
840 cache_decay_ticks = HZ/100;
842 #ifndef CONFIG_PPC_ISERIES
843 paca[boot_cpuid].next_jiffy_update_tb = tb_last_stamp = get_tb();
846 * Should update do_gtod.stamp_xsec.
847 * For now we leave it which means the time can be some
848 * number of msecs off until someone does a settimeofday()
850 do_gtod.tb_orig_stamp = tb_last_stamp;
852 look_for_more_cpus();
855 max_cpus = smp_ops->probe();
857 /* Backup CPU 0 state if necessary */
860 smp_space_timers(max_cpus);
863 if (cpu != boot_cpuid)
864 smp_create_idle(cpu);
867 void __devinit smp_prepare_boot_cpu(void)
869 BUG_ON(smp_processor_id() != boot_cpuid);
871 /* cpu_possible is set up in prom.c */
872 cpu_set(boot_cpuid, cpu_online_map);
874 paca[boot_cpuid].xCurrent = (u64)current;
875 current_set[boot_cpuid] = current->thread_info;
878 int __devinit __cpu_up(unsigned int cpu)
882 /* At boot, don't bother with non-present cpus -JSCHOPP */
883 if (system_state == SYSTEM_BOOTING && !cpu_present_at_boot(cpu))
886 paca[cpu].prof_counter = 1;
887 paca[cpu].prof_multiplier = 1;
888 paca[cpu].default_decr = tb_ticks_per_jiffy / decr_overclock;
890 if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
893 /* maximum of 48 CPUs on machines with a segment table */
897 tmp = &stab_array[PAGE_SIZE * cpu];
898 memset(tmp, 0, PAGE_SIZE);
899 paca[cpu].xStab_data.virt = (unsigned long)tmp;
900 paca[cpu].xStab_data.real = virt_to_abs(tmp);
903 /* The information for processor bringup must
904 * be written out to main store before we release
910 smp_ops->kick_cpu(cpu);
913 * wait to see if the cpu made a callin (is actually up).
914 * use this value that I found through experimentation.
917 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
920 if (!cpu_callin_map[cpu]) {
921 printk("Processor %u is stuck.\n", cpu);
925 printk("Processor %u found.\n", cpu);
927 if (smp_ops->give_timebase)
928 smp_ops->give_timebase();
929 cpu_set(cpu, cpu_online_map);
933 extern unsigned int default_distrib_server;
934 /* Activate a secondary processor. */
935 int __devinit start_secondary(void *unused)
937 unsigned int cpu = smp_processor_id();
939 atomic_inc(&init_mm.mm_count);
940 current->active_mm = &init_mm;
942 smp_store_cpu_info(cpu);
943 set_dec(paca[cpu].default_decr);
944 cpu_callin_map[cpu] = 1;
946 smp_ops->setup_cpu(cpu);
947 if (smp_ops->take_timebase)
948 smp_ops->take_timebase();
950 get_paca()->yielded = 0;
952 #ifdef CONFIG_PPC_PSERIES
953 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
957 #ifdef CONFIG_IRQ_ALL_CPUS
958 /* Put the calling processor into the GIQ. This is really only
959 * necessary from a secondary thread as the OF start-cpu interface
960 * performs this function for us on primary threads.
962 /* TODO: 9005 is #defined in rtas-proc.c -- move to a header */
963 rtas_set_indicator(9005, default_distrib_server, 1);
969 return cpu_idle(NULL);
972 int setup_profiling_timer(unsigned int multiplier)
977 void __init smp_cpus_done(unsigned int max_cpus)
981 /* We want the setup_cpu() here to be called from CPU 0, but our
982 * init thread may have been "borrowed" by another CPU in the meantime
983 * se we pin us down to CPU 0 for a short while
985 old_mask = current->cpus_allowed;
986 set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
988 smp_ops->setup_cpu(boot_cpuid);
990 /* XXX fix this, xics currently relies on it - Anton */
991 smp_threads_ready = 1;
993 set_cpus_allowed(current, old_mask);