*/
struct _lowcore *lowcore_ptr[NR_CPUS];
-cycles_t cacheflush_time=0;
-int smp_threads_ready=0; /* Set when the idlers are all forked. */
cpumask_t cpu_online_map;
cpumask_t cpu_possible_map;
-unsigned long cache_decay_ticks = 0;
static struct task_struct *current_set[NR_CPUS];
static inline void do_send_stop(void)
{
- int i, rc;
+ int cpu, rc;
/* stop all processors */
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
do {
- rc = signal_processor(i, sigp_stop);
+ rc = signal_processor(cpu, sigp_stop);
} while (rc == sigp_busy);
}
}
static inline void do_store_status(void)
{
- int i, rc;
+ int cpu, rc;
/* store status of all processors in their lowcores (real 0) */
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
do {
rc = signal_processor_p(
- (__u32)(unsigned long) lowcore_ptr[i], i,
+ (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
sigp_store_status_at_address);
} while(rc == sigp_busy);
}
/*
* Reboot, halt and power_off routines for SMP.
*/
-static cpumask_t cpu_restart_map;
static void do_machine_restart(void * __unused)
{
+ int cpu;
static atomic_t cpuid = ATOMIC_INIT(-1);
- cpu_clear(smp_processor_id(), cpu_restart_map);
- if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
- /* Wait for all other cpus to enter do_machine_restart. */
- while (!cpus_empty(cpu_restart_map))
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
+ signal_processor(smp_processor_id(), sigp_stop);
+
+ /* Wait for all other cpus to enter stopped state */
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ while(!smp_cpu_not_running(cpu))
cpu_relax();
- /* Store status of other cpus. */
- do_store_status();
- /*
- * Finally call reipl. Because we waited for all other
- * cpus to enter this function we know that they do
- * not hold any s390irq-locks (the cpus have been
- * interrupted by an external interrupt and s390irq
- * locks are always held disabled).
- */
- if (MACHINE_IS_VM)
- cpcmd ("IPL", NULL, 0);
- else
- reipl (0x10000 | S390_lowcore.ipl_device);
}
- signal_processor(smp_processor_id(), sigp_stop);
+
+ /* Store status of other cpus. */
+ do_store_status();
+
+ /*
+ * Finally call reipl. Because we waited for all other
+ * cpus to enter this function we know that they do
+ * not hold any s390irq-locks (the cpus have been
+ * interrupted by an external interrupt and s390irq
+ * locks are always held disabled).
+ */
+ if (MACHINE_IS_VM)
+ cpcmd ("IPL", NULL, 0);
+ else
+ reipl (0x10000 | S390_lowcore.ipl_device);
}
void machine_restart_smp(char * __unused)
{
- cpu_restart_map = cpu_online_map;
on_each_cpu(do_machine_restart, NULL, 0, 0);
}
*/
static void smp_ext_bitcall_others(ec_bit_sig sig)
{
- int i;
+ int cpu;
- for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i) || smp_processor_id() == i)
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
continue;
/*
* Set signaling bit in lowcore of target cpu and kick it
*/
- set_bit(sig, (unsigned long *) &lowcore_ptr[i]->ext_call_fast);
- while (signal_processor(i, sigp_external_call) == sigp_busy)
+ set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
+ while (signal_processor(cpu, sigp_external_call) == sigp_busy)
udelay(10);
}
}
*/
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
- __cpu_logical_map[0] = boot_cpu_addr;
current_thread_info()->cpu = 0;
num_cpus = 1;
for (cpu = 0; cpu <= 65535 && num_cpus < max_cpus; cpu++) {
__cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
- while (!cpu_stopped(cpu))
+ while (!smp_cpu_not_running(cpu))
cpu_relax();
printk("Processor %d spun down\n", cpu);
}
BUG_ON(smp_processor_id() != 0);
cpu_set(0, cpu_online_map);
+ cpu_set(0, cpu_present_map);
cpu_set(0, cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;