+early_param("possible_cpus", setup_possible_cpus);
+
+int
+__cpu_disable(void)
+{
+ unsigned long flags;
+ struct ec_creg_mask_parms cr_parms;
+ int cpu = smp_processor_id();
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ if (smp_cpu_reserved[cpu] != 0) {
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return -EBUSY;
+ }
+ cpu_clear(cpu, cpu_online_map);
+
+ /* Disable pfault pseudo page faults on this cpu. */
+ pfault_fini();
+
+ memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
+ memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
+
+ /* disable all external interrupts */
+ cr_parms.orvals[0] = 0;
+ cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+ 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+ /* disable all I/O interrupts */
+ cr_parms.orvals[6] = 0;
+ cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+ 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ /* disable most machine checks */
+ cr_parms.orvals[14] = 0;
+ cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+
+ smp_ctl_bit_callback(&cr_parms);
+
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return 0;
+}
+
+void
+__cpu_die(unsigned int cpu)
+{
+ /* Wait until target cpu is down */
+ while (!smp_cpu_not_running(cpu))
+ cpu_relax();
+ printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+ idle_task_exit();
+ signal_processor(smp_processor_id(), sigp_stop);
+ BUG();
+ for(;;);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */