/*
* arch/s390/kernel/smp.c
*
- * S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright (C) IBM Corp. 1999,2006
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Heiko Carstens (heiko.carstens@de.ibm.com)
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
#include <linux/delay.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
+#include <linux/cpu.h>
#include <asm/sigp.h>
#include <asm/pgalloc.h>
#include <asm/cpcmd.h>
#include <asm/tlbflush.h>
-/* prototypes */
-extern int cpu_idle(void * unused);
-
extern volatile int __cpu_logical_map[];
/*
*/
struct _lowcore *lowcore_ptr[NR_CPUS];
-cycles_t cacheflush_time=0;
-int smp_threads_ready=0; /* Set when the idlers are all forked. */
-cpumask_t cpu_online_map;
-cpumask_t cpu_possible_map;
-unsigned long cache_decay_ticks = 0;
+cpumask_t cpu_online_map = CPU_MASK_NONE;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
+static struct task_struct *current_set[NR_CPUS];
/*
* Reboot, halt and power_off routines for SMP.
extern char vmpoff_cmd[];
extern void reipl(unsigned long devno);
+extern void reipl_diag(void);
static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig);
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
struct call_data_struct data;
int cpus = num_online_cpus()-1;
- /* FIXME: get cpu lock -hc */
if (cpus <= 0)
return 0;
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
static inline void do_send_stop(void)
{
- unsigned long dummy;
- int i, rc;
+ int cpu, rc;
/* stop all processors */
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
do {
- rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
+ rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
}
}
static inline void do_store_status(void)
{
- unsigned long low_core_addr;
- unsigned long dummy;
- int i, rc;
+ int cpu, rc;
/* store status of all processors in their lowcores (real 0) */
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
- low_core_addr = (unsigned long) lowcore_ptr[i];
do {
- rc = signal_processor_ps(&dummy, low_core_addr, i,
- sigp_store_status_at_address);
+ rc = signal_processor_p(
+ (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
+ sigp_store_status_at_address);
} while(rc == sigp_busy);
}
}
/*
* Reboot, halt and power_off routines for SMP.
*/
-static cpumask_t cpu_restart_map;
static void do_machine_restart(void * __unused)
{
- cpu_clear(smp_processor_id(), cpu_restart_map);
- if (smp_processor_id() == 0) {
- /* Wait for all other cpus to enter do_machine_restart. */
- while (!cpus_empty(cpu_restart_map))
+ int cpu;
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
+ signal_processor(smp_processor_id(), sigp_stop);
+
+ /* Wait for all other cpus to enter stopped state */
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ while(!smp_cpu_not_running(cpu))
cpu_relax();
- /* Store status of other cpus. */
- do_store_status();
- /*
- * Finally call reipl. Because we waited for all other
- * cpus to enter this function we know that they do
- * not hold any s390irq-locks (the cpus have been
- * interrupted by an external interrupt and s390irq
- * locks are always held disabled).
- */
- if (MACHINE_IS_VM)
- cpcmd ("IPL", NULL, 0);
- else
- reipl (0x10000 | S390_lowcore.ipl_device);
}
- signal_processor(smp_processor_id(), sigp_stop);
+
+ /* Store status of other cpus. */
+ do_store_status();
+
+ /*
+ * Finally call reipl. Because we waited for all other
+ * cpus to enter this function we know that they do
+ * not hold any s390irq-locks (the cpus have been
+ * interrupted by an external interrupt and s390irq
+ * locks are always held disabled).
+ */
+ reipl_diag();
+
+ if (MACHINE_IS_VM)
+ cpcmd ("IPL", NULL, 0, NULL);
+ else
+ reipl (0x10000 | S390_lowcore.ipl_device);
}
void machine_restart_smp(char * __unused)
{
- cpu_restart_map = cpu_online_map;
on_each_cpu(do_machine_restart, NULL, 0, 0);
}
static void do_machine_halt(void * __unused)
{
- if (smp_processor_id() == 0) {
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
- cpcmd(vmhalt_cmd, NULL, 0);
+ cpcmd(vmhalt_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(),
sigp_stop_and_store_status);
}
static void do_machine_power_off(void * __unused)
{
- if (smp_processor_id() == 0) {
+ static atomic_t cpuid = ATOMIC_INIT(-1);
+
+ if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
- cpcmd(vmpoff_cmd, NULL, 0);
+ cpcmd(vmpoff_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(),
sigp_stop_and_store_status);
}
* Set signaling bit in lowcore of target cpu and kick it
*/
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
- while(signal_processor(cpu, sigp_external_call) == sigp_busy)
+ while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
udelay(10);
}
*/
static void smp_ext_bitcall_others(ec_bit_sig sig)
{
- int i;
+ int cpu;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
- set_bit(sig, (unsigned long *) &lowcore_ptr[i]->ext_call_fast);
- while (signal_processor(i, sigp_external_call) == sigp_busy)
+ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+ while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
udelay(10);
}
}
-#ifndef CONFIG_ARCH_S390X
+#ifndef CONFIG_64BIT
/*
* this function sends a 'purge tlb' signal to another CPU.
*/
on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
}
EXPORT_SYMBOL(smp_ptlb_all);
-#endif /* ! CONFIG_ARCH_S390X */
+#endif /* ! CONFIG_64BIT */
/*
* this function sends a 'reschedule' IPI to another CPU.
* Lets check how many CPUs we have.
*/
-void __init smp_check_cpus(unsigned int max_cpus)
+static unsigned int
+__init smp_count_cpus(void)
{
- int curr_cpu, num_cpus;
+ unsigned int cpu, num_cpus;
__u16 boot_cpu_addr;
+ /*
+ * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
+ */
+
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
- current_thread_info()->cpu = 0;
- num_cpus = 1;
- for (curr_cpu = 0;
- curr_cpu <= 65535 && num_cpus < max_cpus; curr_cpu++) {
- if ((__u16) curr_cpu == boot_cpu_addr)
- continue;
- __cpu_logical_map[num_cpus] = (__u16) curr_cpu;
- if (signal_processor(num_cpus, sigp_sense) ==
- sigp_not_operational)
- continue;
- cpu_set(num_cpus, cpu_possible_map);
- num_cpus++;
- }
- printk("Detected %d CPU's\n",(int) num_cpus);
- printk("Boot cpu address %2X\n", boot_cpu_addr);
+ current_thread_info()->cpu = 0;
+ num_cpus = 1;
+ for (cpu = 0; cpu <= 65535; cpu++) {
+ if ((__u16) cpu == boot_cpu_addr)
+ continue;
+ __cpu_logical_map[1] = (__u16) cpu;
+ if (signal_processor(1, sigp_sense) ==
+ sigp_not_operational)
+ continue;
+ num_cpus++;
+ }
+
+ printk("Detected %d CPU's\n",(int) num_cpus);
+ printk("Boot cpu address %2X\n", boot_cpu_addr);
+
+ return num_cpus;
}
/*
* Activate a secondary processor.
*/
extern void init_cpu_timer(void);
+extern void init_cpu_vtimer(void);
extern int pfault_init(void);
-extern int pfault_token(void);
+extern void pfault_fini(void);
int __devinit start_secondary(void *cpuvoid)
{
/* Setup the cpu */
cpu_init();
+ preempt_disable();
/* init per CPU timer */
init_cpu_timer();
+#ifdef CONFIG_VIRT_TIMER
+ init_cpu_vtimer();
+#endif
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
- pfault_init();
+ if (MACHINE_IS_VM)
+ pfault_init();
#endif
/* Mark this cpu as online */
cpu_set(smp_processor_id(), cpu_online_map);
/* Print info about this processor */
print_cpu_info(&S390_lowcore.cpu_data);
/* cpu_idle will call schedule for us */
- return cpu_idle(NULL);
+ cpu_idle();
+ return 0;
}
-static struct task_struct *__devinit fork_by_hand(void)
+static void __init smp_create_idle(unsigned int cpu)
{
- struct pt_regs regs;
- /* don't care about the psw and regs settings since we'll never
- reschedule the forked task. */
- memset(®s,0,sizeof(struct pt_regs));
- return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
+ struct task_struct *p;
+
+ /*
+ * don't care about the psw and regs settings since we'll never
+ * reschedule the forked task.
+ */
+ p = fork_idle(cpu);
+ if (IS_ERR(p))
+ panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
+ current_set[cpu] = p;
+}
+
+/* Reserving and releasing of CPUs */
+
+static DEFINE_SPINLOCK(smp_reserve_lock);
+static int smp_cpu_reserved[NR_CPUS];
+
+int
+smp_get_cpu(cpumask_t cpu_mask)
+{
+ unsigned long flags;
+ int cpu;
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ /* Try to find an already reserved cpu. */
+ for_each_cpu_mask(cpu, cpu_mask) {
+ if (smp_cpu_reserved[cpu] != 0) {
+ smp_cpu_reserved[cpu]++;
+ /* Found one. */
+ goto out;
+ }
+ }
+ /* Reserve a new cpu from cpu_mask. */
+ for_each_cpu_mask(cpu, cpu_mask) {
+ if (cpu_online(cpu)) {
+ smp_cpu_reserved[cpu]++;
+ goto out;
+ }
+ }
+ cpu = -ENODEV;
+out:
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return cpu;
+}
+
+void
+smp_put_cpu(int cpu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ smp_cpu_reserved[cpu]--;
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+}
+
+static inline int
+cpu_stopped(int cpu)
+{
+ __u32 status;
+
+ /* Check for stopped state */
+ if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
+ if (status & 0x40)
+ return 1;
+ }
+ return 0;
}
-int __cpu_up(unsigned int cpu)
+/* Upping and downing of CPUs */
+
+int
+__cpu_up(unsigned int cpu)
{
- struct task_struct *idle;
+ struct task_struct *idle;
struct _lowcore *cpu_lowcore;
+ struct stack_frame *sf;
sigp_ccode ccode;
+ int curr_cpu;
- /*
- * Set prefix page for new cpu
- */
+ for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
+ __cpu_logical_map[cpu] = (__u16) curr_cpu;
+ if (cpu_stopped(cpu))
+ break;
+ }
+
+ if (!cpu_stopped(cpu))
+ return -ENODEV;
- ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
+ ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
if (ccode){
printk("sigp_set_prefix failed for cpu %d "
return -EIO;
}
- /* We can't use kernel_thread since we must _avoid_ to reschedule
- the child. */
- idle = fork_by_hand();
- if (IS_ERR(idle)){
- printk("failed fork for CPU %d", cpu);
- return -EIO;
- }
- wake_up_forked_process(idle);
-
- /*
- * We remove it from the pidhash and the runqueue
- * once we got the process:
- */
- init_idle(idle, cpu);
-
- unhash_process(idle);
-
+ idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
- cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (unsigned long)
- idle->thread_info + (THREAD_SIZE);
+ task_stack_page(idle) + (THREAD_SIZE);
+ sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
+ - sizeof(struct pt_regs)
+ - sizeof(struct stack_frame));
+ memset(sf, 0, sizeof(struct stack_frame));
+ sf->gprs[9] = (unsigned long) sf;
+ cpu_lowcore->save_area[15] = (unsigned long) sf;
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
__asm__ __volatile__("stam 0,15,0(%0)"
: : "a" (&cpu_lowcore->access_regs_save_area)
eieio();
signal_processor(cpu,sigp_restart);
- while (!cpu_online(cpu));
+ while (!cpu_online(cpu))
+ cpu_relax();
+ return 0;
+}
+
+static unsigned int __initdata additional_cpus;
+static unsigned int __initdata possible_cpus;
+
+void __init smp_setup_cpu_possible_map(void)
+{
+ unsigned int phy_cpus, pos_cpus, cpu;
+
+ phy_cpus = smp_count_cpus();
+ pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
+
+ if (possible_cpus)
+ pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
+
+ for (cpu = 0; cpu < pos_cpus; cpu++)
+ cpu_set(cpu, cpu_possible_map);
+
+ phy_cpus = min(phy_cpus, pos_cpus);
+
+ for (cpu = 0; cpu < phy_cpus; cpu++)
+ cpu_set(cpu, cpu_present_map);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int __init setup_additional_cpus(char *s)
+{
+ additional_cpus = simple_strtoul(s, NULL, 0);
+ return 0;
+}
+early_param("additional_cpus", setup_additional_cpus);
+
+static int __init setup_possible_cpus(char *s)
+{
+ possible_cpus = simple_strtoul(s, NULL, 0);
+ return 0;
+}
+early_param("possible_cpus", setup_possible_cpus);
+
+int
+__cpu_disable(void)
+{
+ unsigned long flags;
+ ec_creg_mask_parms cr_parms;
+ int cpu = smp_processor_id();
+
+ spin_lock_irqsave(&smp_reserve_lock, flags);
+ if (smp_cpu_reserved[cpu] != 0) {
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
+ return -EBUSY;
+ }
+ cpu_clear(cpu, cpu_online_map);
+
+#ifdef CONFIG_PFAULT
+ /* Disable pfault pseudo page faults on this cpu. */
+ if (MACHINE_IS_VM)
+ pfault_fini();
+#endif
+
+ /* disable all external interrupts */
+
+ cr_parms.start_ctl = 0;
+ cr_parms.end_ctl = 0;
+ cr_parms.orvals[0] = 0;
+ cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
+ 1<<11 | 1<<10 | 1<< 6 | 1<< 4);
+ smp_ctl_bit_callback(&cr_parms);
+
+ /* disable all I/O interrupts */
+
+ cr_parms.start_ctl = 6;
+ cr_parms.end_ctl = 6;
+ cr_parms.orvals[6] = 0;
+ cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
+ 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ smp_ctl_bit_callback(&cr_parms);
+
+ /* disable most machine checks */
+
+ cr_parms.start_ctl = 14;
+ cr_parms.end_ctl = 14;
+ cr_parms.orvals[14] = 0;
+ cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
+ smp_ctl_bit_callback(&cr_parms);
+
+ spin_unlock_irqrestore(&smp_reserve_lock, flags);
return 0;
}
+void
+__cpu_die(unsigned int cpu)
+{
+ /* Wait until target cpu is down */
+ while (!smp_cpu_not_running(cpu))
+ cpu_relax();
+ printk("Processor %d spun down\n", cpu);
+}
+
+void
+cpu_die(void)
+{
+ idle_task_exit();
+ signal_processor(smp_processor_id(), sigp_stop);
+ BUG();
+ for(;;);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
/*
* Cycle through the processors and setup structures.
*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- unsigned long async_stack;
+ unsigned long stack;
+ unsigned int cpu;
int i;
- /* request the 0x1202 external interrupt */
- if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
- panic("Couldn't request external interrupt 0x1202");
- smp_check_cpus(max_cpus);
+ /* request the 0x1201 emergency signal external interrupt */
+ if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+ panic("Couldn't request external interrupt 0x1201");
memset(lowcore_ptr,0,sizeof(lowcore_ptr));
/*
* Initialize prefix pages and stacks for all possible cpus
lowcore_ptr[i] = (struct _lowcore *)
__get_free_pages(GFP_KERNEL|GFP_DMA,
sizeof(void*) == 8 ? 1 : 0);
- async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
- if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
+ stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
+ if (lowcore_ptr[i] == NULL || stack == 0ULL)
panic("smp_boot_cpus failed to allocate memory\n");
*(lowcore_ptr[i]) = S390_lowcore;
- lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
+ lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
+ stack = __get_free_pages(GFP_KERNEL,0);
+ if (stack == 0ULL)
+ panic("smp_boot_cpus failed to allocate memory\n");
+ lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE) {
+ lowcore_ptr[i]->extended_save_area_addr =
+ (__u32) __get_free_pages(GFP_KERNEL,0);
+ if (lowcore_ptr[i]->extended_save_area_addr == 0)
+ panic("smp_boot_cpus failed to "
+ "allocate memory\n");
+ }
+#endif
}
+#ifndef CONFIG_64BIT
+ if (MACHINE_HAS_IEEE)
+ ctl_set_bit(14, 29); /* enable extended save area */
+#endif
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
+
+ for_each_cpu(cpu)
+ if (cpu != smp_processor_id())
+ smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_possible_map);
- S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
+ BUG_ON(smp_processor_id() != 0);
+
+ cpu_set(0, cpu_online_map);
+ S390_lowcore.percpu_offset = __per_cpu_offset[0];
+ current_set[0] = current;
}
void smp_cpus_done(unsigned int max_cpus)
{
+ cpu_present_map = cpu_possible_map;
}
/*
return 0;
}
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int cpu;
+ int ret;
+
+ for_each_cpu(cpu) {
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ if (ret)
+ printk(KERN_WARNING "topology_init: register_cpu %d "
+ "failed (%d)\n", cpu, ret);
+ }
+ return 0;
+}
+
+subsys_initcall(topology_init);
+
+EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(cpu_possible_map);
EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_get_cpu);
+EXPORT_SYMBOL(smp_put_cpu);
+