* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Heiko Carstens (heiko.carstens@de.ibm.com)
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
cpumask_t cpu_possible_map;
unsigned long cache_decay_ticks = 0;
+static struct task_struct *current_set[NR_CPUS];
+
EXPORT_SYMBOL(cpu_online_map);
/*
struct call_data_struct data;
int cpus = num_online_cpus()-1;
- /* FIXME: get cpu lock -hc */
if (cpus <= 0)
return 0;
static inline void do_send_stop(void)
{
- unsigned long dummy;
int i, rc;
/* stop all processors */
if (!cpu_online(i) || smp_processor_id() == i)
continue;
do {
- rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
+ rc = signal_processor(i, sigp_stop);
} while (rc == sigp_busy);
}
}
static inline void do_store_status(void)
{
- unsigned long low_core_addr;
- unsigned long dummy;
int i, rc;
/* store status of all processors in their lowcores (real 0) */
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i)
continue;
- low_core_addr = (unsigned long) lowcore_ptr[i];
do {
- rc = signal_processor_ps(&dummy, low_core_addr, i,
- sigp_store_status_at_address);
+ rc = signal_processor_p(
+ (__u32)(unsigned long) lowcore_ptr[i], i,
+ sigp_store_status_at_address);
} while(rc == sigp_busy);
}
}
static void do_machine_restart(void * __unused)
{
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
cpu_clear(smp_processor_id(), cpu_restart_map);
- if (smp_processor_id() == 0) {
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
/* Wait for all other cpus to enter do_machine_restart. */
while (!cpus_empty(cpu_restart_map))
cpu_relax();
static void do_machine_halt(void * __unused)
{
- if (smp_processor_id() == 0) {
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
static void do_machine_power_off(void * __unused)
{
- if (smp_processor_id() == 0) {
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
* Lets check how many CPUs we have.
*/
-void __init smp_check_cpus(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
+{
+ int cpu;
+
+ /*
+ * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+ */
+ for (cpu = 1; cpu < max_cpus; cpu++)
+ cpu_set(cpu, cpu_possible_map);
+}
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+void
+__init smp_check_cpus(unsigned int max_cpus)
{
int curr_cpu, num_cpus;
__u16 boot_cpu_addr;
printk("Boot cpu address %2X\n", boot_cpu_addr);
}
+#endif /* CONFIG_HOTPLUG_CPU */
+
/*
* Activate a secondary processor.
*/
extern void init_cpu_timer(void);
+extern void init_cpu_vtimer(void);
extern int pfault_init(void);
extern int pfault_token(void);
cpu_init();
/* init per CPU timer */
init_cpu_timer();
+#ifdef CONFIG_VIRT_TIMER
+ init_cpu_vtimer();
+#endif
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
return cpu_idle(NULL);
}
-static struct task_struct *__devinit fork_by_hand(void)
+static void __init smp_create_idle(unsigned int cpu)
+{
+ struct task_struct *p;
+
+ /*
+ * don't care about the psw and regs settings since we'll never
+ * reschedule the forked task.
+ */
+ p = fork_idle(cpu);
+ if (IS_ERR(p))
+ panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+ current_set[cpu] = p;
+}
+
+/* Reserving and releasing of CPUs */
+
+static spinlock_t smp_reserve_lock = SPIN_LOCK_UNLOCKED;
+static int smp_cpu_reserved[NR_CPUS];
+
+int
+smp_get_cpu(cpumask_t cpu_mask)
{
- struct pt_regs regs;
- /* don't care about the psw and regs settings since we'll never
- reschedule the forked task. */
- memset(®s,0,sizeof(struct pt_regs));
- return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
+ unsigned long flags;
+ int cpu;
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ /* Try to find an already reserved cpu. */
+ for_each_cpu_mask(cpu, cpu_mask) {
+ if (smp_cpu_reserved[cpu] != 0) {
+ smp_cpu_reserved[cpu]++;
+ /* Found one. */
+ goto out;
+ }
+ }
+ /* Reserve a new cpu from cpu_mask. */
+ for_each_cpu_mask(cpu, cpu_mask) {
+ if (cpu_online(cpu)) {
+ smp_cpu_reserved[cpu]++;
+ goto out;
+ }
+ }
+ cpu = -ENODEV;
+out:
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return cpu;
}
-int __cpu_up(unsigned int cpu)
+void
+smp_put_cpu(int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ smp_cpu_reserved[cpu]--;
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+}
+
+static inline int
+cpu_stopped(int cpu)
+{
+ __u32 status;
+
+ /* Check for stopped state */
+ if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+ if (status & 0x40)
+ return 1;
+ }
+ return 0;
+}
+
+/* Upping and downing of CPUs */
+
+int
+__cpu_up(unsigned int cpu)
{
- struct task_struct *idle;
+ struct task_struct *idle;
struct _lowcore *cpu_lowcore;
+ struct stack_frame *sf;
sigp_ccode ccode;
+ int curr_cpu;
- /*
- * Set prefix page for new cpu
- */
+ for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+ __cpu_logical_map[cpu] = (__u16) curr_cpu;
+ if (cpu_stopped(cpu))
+ break;
+ }
- ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
+ if (!cpu_stopped(cpu))
+ return -ENODEV;
+
+ ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
if (ccode){
printk("sigp_set_prefix failed for cpu %d "
return -EIO;
}
- /* We can't use kernel_thread since we must _avoid_ to reschedule
- the child. */
- idle = fork_by_hand();
- if (IS_ERR(idle)){
- printk("failed fork for CPU %d", cpu);
- return -EIO;
- }
- wake_up_forked_process(idle);
-
- /*
- * We remove it from the pidhash and the runqueue
- * once we got the process:
- */
- init_idle(idle, cpu);
-
- unhash_process(idle);
-
+ idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
- cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (unsigned long)
idle->thread_info + (THREAD_SIZE);
+ sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+ - sizeof(struct pt_regs)
+ - sizeof(struct stack_frame));
+ memset(sf, 0, sizeof(struct stack_frame));
+ sf->gprs[9] = (unsigned long) sf;
+ cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
__asm__ __volatile__("stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area)
return 0;
}
+int
+__cpu_disable(void)
+{
+ unsigned long flags;
+ ec_creg_mask_parms cr_parms;
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ if (smp_cpu_reserved[smp_processor_id()] != 0) {
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return -EBUSY;
+ }
+
+ /* disable all external interrupts */
+
+ cr_parms.start_ctl = 0;
+ cr_parms.end_ctl = 0;
+ cr_parms.orvals[0] = 0;
+ cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+ 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+ smp_ctl_bit_callback(&cr_parms);
+
+ /* disable all I/O interrupts */
+
+ cr_parms.start_ctl = 6;
+ cr_parms.end_ctl = 6;
+ cr_parms.orvals[6] = 0;
+ cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+ 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ smp_ctl_bit_callback(&cr_parms);
+
+ /* disable most machine checks */
+
+ cr_parms.start_ctl = 14;
+ cr_parms.end_ctl = 14;
+ cr_parms.orvals[14] = 0;
+ cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ smp_ctl_bit_callback(&cr_parms);
+
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return 0;
+}
+
+void
+__cpu_die(unsigned int cpu)
+{
+ /* Wait until target cpu is down */
+ while (!cpu_stopped(cpu));
+ printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+ signal_processor(smp_processor_id(), sigp_stop);
+ BUG();
+ for(;;);
+}
+
/*
* Cycle through the processors and setup structures.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long async_stack;
+ unsigned long stack;
+ unsigned int cpu;
int i;
/* request the 0x1202 external interrupt */
lowcore_ptr[i] = (struct _lowcore *)
__get_free_pages(GFP_KERNEL|GFP_DMA,
sizeof(void*) == 8 ? 1 : 0);
- async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
- if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
+ stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
+ if (lowcore_ptr[i] == NULL || stack == 0ULL)
panic("smp_boot_cpus failed to allocate memory\n");
*(lowcore_ptr[i]) = S390_lowcore;
- lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
+ lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
+#ifdef CONFIG_CHECK_STACK
+ stack = __get_free_pages(GFP_KERNEL,0);
+ if (stack == 0ULL)
+ panic("smp_boot_cpus failed to allocate memory\n");
+ lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+#endif
}
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
+
+ for_each_cpu(cpu)
+ if (cpu != smp_processor_id())
+ smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_possible_map);
- S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
+ BUG_ON(smp_processor_id() != 0);
+
+ cpu_set(0, cpu_online_map);
+ cpu_set(0, cpu_possible_map);
+ S390_lowcore.percpu_offset = __per_cpu_offset[0];
+ current_set[0] = current;
}
void smp_cpus_done(unsigned int max_cpus)
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_get_cpu);
+EXPORT_SYMBOL(smp_put_cpu);
+