2 * arch/s390/kernel/smp.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/smp_lock.h>
31 #include <linux/delay.h>
32 #include <linux/cache.h>
33 #include <linux/interrupt.h>
34 #include <linux/cpu.h>
37 #include <asm/pgalloc.h>
39 #include <asm/s390_ext.h>
40 #include <asm/cpcmd.h>
41 #include <asm/tlbflush.h>
44 extern int cpu_idle(void * unused);
46 extern volatile int __cpu_logical_map[];
49 * An array with a pointer the lowcore of every CPU.
52 struct _lowcore *lowcore_ptr[NR_CPUS];
53 cycles_t cacheflush_time=0;
54 int smp_threads_ready=0; /* Set when the idlers are all forked. */
56 cpumask_t cpu_online_map;
57 cpumask_t cpu_possible_map;
58 unsigned long cache_decay_ticks = 0;
60 EXPORT_SYMBOL(cpu_online_map);
63 * Reboot, halt and power_off routines for SMP.
65 extern char vmhalt_cmd[];
66 extern char vmpoff_cmd[];
68 extern void reipl(unsigned long devno);
70 static void smp_ext_bitcall(int, ec_bit_sig);
71 static void smp_ext_bitcall_others(ec_bit_sig);
74 * Structure and data for smp_call_function(). This is designed to minimise
75 * static memory requirements. It also looks cleaner.
77 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
79 struct call_data_struct {
80 void (*func) (void *info);
87 static struct call_data_struct * call_data;
90 * 'Call function' interrupt callback
92 static void do_call_function(void)
94 void (*func) (void *info) = call_data->func;
95 void *info = call_data->info;
96 int wait = call_data->wait;
98 atomic_inc(&call_data->started);
101 atomic_inc(&call_data->finished);
105 * this function sends a 'generic call function' IPI to all other CPUs
109 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
112 * [SUMMARY] Run a function on all other CPUs.
113 * <func> The function to run. This must be fast and non-blocking.
114 * <info> An arbitrary pointer to pass to the function.
115 * <nonatomic> currently unused.
116 * <wait> If true, wait (atomically) until function has completed on other CPUs.
117 * [RETURNS] 0 on success, else a negative status code. Does not return until
118 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
120 * You must not call this function with disabled interrupts or from a
121 * hardware interrupt handler or from a bottom half handler.
124 struct call_data_struct data;
125 int cpus = num_online_cpus()-1;
127 /* FIXME: get cpu lock -hc */
131 /* Can deadlock when called with interrupts disabled */
132 WARN_ON(irqs_disabled());
136 atomic_set(&data.started, 0);
139 atomic_set(&data.finished, 0);
141 spin_lock(&call_lock);
143 /* Send a message to all other CPUs and wait for them to respond */
144 smp_ext_bitcall_others(ec_call_function);
146 /* Wait for response */
147 while (atomic_read(&data.started) != cpus)
151 while (atomic_read(&data.finished) != cpus)
153 spin_unlock(&call_lock);
159 * Call a function on one CPU
160 * cpu : the CPU the function should be executed on
162 * You must not call this function with disabled interrupts or from a
163 * hardware interrupt handler. You may call it from a bottom half.
165 * It is guaranteed that the called function runs on the specified CPU,
166 * preemption is disabled.
168 int smp_call_function_on(void (*func) (void *info), void *info,
169 int nonatomic, int wait, int cpu)
171 struct call_data_struct data;
174 if (!cpu_online(cpu))
177 /* disable preemption for local function call */
178 curr_cpu = get_cpu();
180 if (curr_cpu == cpu) {
181 /* direct call to function */
189 atomic_set(&data.started, 0);
192 atomic_set(&data.finished, 0);
194 spin_lock_bh(&call_lock);
196 smp_ext_bitcall(cpu, ec_call_function);
198 /* Wait for response */
199 while (atomic_read(&data.started) != 1)
203 while (atomic_read(&data.finished) != 1)
206 spin_unlock_bh(&call_lock);
210 EXPORT_SYMBOL(smp_call_function_on);
212 static inline void do_send_stop(void)
217 /* stop all processors */
218 for (i = 0; i < NR_CPUS; i++) {
219 if (!cpu_online(i) || smp_processor_id() == i)
222 rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
223 } while (rc == sigp_busy);
227 static inline void do_store_status(void)
229 unsigned long low_core_addr;
233 /* store status of all processors in their lowcores (real 0) */
234 for (i = 0; i < NR_CPUS; i++) {
235 if (!cpu_online(i) || smp_processor_id() == i)
237 low_core_addr = (unsigned long) lowcore_ptr[i];
239 rc = signal_processor_ps(&dummy, low_core_addr, i,
240 sigp_store_status_at_address);
241 } while(rc == sigp_busy);
246 * this function sends a 'stop' sigp to all other CPUs in the system.
247 * it goes straight through.
249 void smp_send_stop(void)
251 /* write magic number to zero page (absolute 0) */
252 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
254 /* stop other processors. */
257 /* store status of other processors. */
262 * Reboot, halt and power_off routines for SMP.
264 static cpumask_t cpu_restart_map;
266 static void do_machine_restart(void * __unused)
268 cpu_clear(smp_processor_id(), cpu_restart_map);
269 if (smp_processor_id() == 0) {
270 /* Wait for all other cpus to enter do_machine_restart. */
271 while (!cpus_empty(cpu_restart_map))
273 /* Store status of other cpus. */
276 * Finally call reipl. Because we waited for all other
277 * cpus to enter this function we know that they do
278 * not hold any s390irq-locks (the cpus have been
279 * interrupted by an external interrupt and s390irq
280 * locks are always held disabled).
283 cpcmd ("IPL", NULL, 0);
285 reipl (0x10000 | S390_lowcore.ipl_device);
287 signal_processor(smp_processor_id(), sigp_stop);
290 void machine_restart_smp(char * __unused)
292 cpu_restart_map = cpu_online_map;
293 on_each_cpu(do_machine_restart, NULL, 0, 0);
296 static void do_wait_for_stop(void)
298 unsigned long cr[16];
300 __ctl_store(cr, 0, 15);
303 __ctl_load(cr, 0, 15);
308 static void do_machine_halt(void * __unused)
310 if (smp_processor_id() == 0) {
312 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
313 cpcmd(vmhalt_cmd, NULL, 0);
314 signal_processor(smp_processor_id(),
315 sigp_stop_and_store_status);
320 void machine_halt_smp(void)
322 on_each_cpu(do_machine_halt, NULL, 0, 0);
325 static void do_machine_power_off(void * __unused)
327 if (smp_processor_id() == 0) {
329 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
330 cpcmd(vmpoff_cmd, NULL, 0);
331 signal_processor(smp_processor_id(),
332 sigp_stop_and_store_status);
337 void machine_power_off_smp(void)
339 on_each_cpu(do_machine_power_off, NULL, 0, 0);
343 * This is the main routine where commands issued by other
347 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
352 * handle bit signal external calls
354 * For the ec_schedule signal we have to do nothing. All the work
355 * is done automatically when we return from the interrupt.
357 bits = xchg(&S390_lowcore.ext_call_fast, 0);
359 if (test_bit(ec_call_function, &bits))
364 * Send an external call sigp to another cpu and return without waiting
365 * for its completion.
367 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
370 * Set signaling bit in lowcore of target cpu and kick it
372 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
373 while(signal_processor(cpu, sigp_external_call) == sigp_busy)
378 * Send an external call sigp to every other cpu in the system and
379 * return without waiting for its completion.
381 static void smp_ext_bitcall_others(ec_bit_sig sig)
385 for (i = 0; i < NR_CPUS; i++) {
386 if (!cpu_online(i) || smp_processor_id() == i)
389 * Set signaling bit in lowcore of target cpu and kick it
391 set_bit(sig, (unsigned long *) &lowcore_ptr[i]->ext_call_fast);
392 while (signal_processor(i, sigp_external_call) == sigp_busy)
397 #ifndef CONFIG_ARCH_S390X
399 * this function sends a 'purge tlb' signal to another CPU.
401 void smp_ptlb_callback(void *info)
406 void smp_ptlb_all(void)
408 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
410 EXPORT_SYMBOL(smp_ptlb_all);
411 #endif /* ! CONFIG_ARCH_S390X */
414 * this function sends a 'reschedule' IPI to another CPU.
415 * it goes straight through and wastes no time serializing
416 * anything. Worst case is that we lose a reschedule ...
418 void smp_send_reschedule(int cpu)
420 smp_ext_bitcall(cpu, ec_schedule);
424 * parameter area for the set/clear control bit callbacks
430 unsigned long orvals[16];
431 unsigned long andvals[16];
432 } ec_creg_mask_parms;
435 * callback for setting/clearing control bits
437 void smp_ctl_bit_callback(void *info) {
438 ec_creg_mask_parms *pp;
439 unsigned long cregs[16];
442 pp = (ec_creg_mask_parms *) info;
443 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
444 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
445 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
446 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
450 * Set a bit in a control register of all cpus
452 void smp_ctl_set_bit(int cr, int bit) {
453 ec_creg_mask_parms parms;
455 parms.start_ctl = cr;
457 parms.orvals[cr] = 1 << bit;
458 parms.andvals[cr] = -1L;
460 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
461 __ctl_set_bit(cr, bit);
466 * Clear a bit in a control register of all cpus
468 void smp_ctl_clear_bit(int cr, int bit) {
469 ec_creg_mask_parms parms;
471 parms.start_ctl = cr;
473 parms.orvals[cr] = 0;
474 parms.andvals[cr] = ~(1L << bit);
476 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
477 __ctl_clear_bit(cr, bit);
482 * Lets check how many CPUs we have.
485 void __init smp_check_cpus(unsigned int max_cpus)
487 int curr_cpu, num_cpus;
490 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
491 current_thread_info()->cpu = 0;
494 curr_cpu <= 65535 && num_cpus < max_cpus; curr_cpu++) {
495 if ((__u16) curr_cpu == boot_cpu_addr)
497 __cpu_logical_map[num_cpus] = (__u16) curr_cpu;
498 if (signal_processor(num_cpus, sigp_sense) ==
499 sigp_not_operational)
501 cpu_set(num_cpus, cpu_possible_map);
504 printk("Detected %d CPU's\n",(int) num_cpus);
505 printk("Boot cpu address %2X\n", boot_cpu_addr);
509 * Activate a secondary processor.
511 extern void init_cpu_timer(void);
512 extern int pfault_init(void);
513 extern int pfault_token(void);
515 int __devinit start_secondary(void *cpuvoid)
519 /* init per CPU timer */
522 /* Enable pfault pseudo page faults on this cpu. */
525 /* Mark this cpu as online */
526 cpu_set(smp_processor_id(), cpu_online_map);
527 /* Switch on interrupts */
529 /* Print info about this processor */
530 print_cpu_info(&S390_lowcore.cpu_data);
531 /* cpu_idle will call schedule for us */
532 return cpu_idle(NULL);
535 static struct task_struct *__devinit fork_by_hand(void)
538 /* don't care about the psw and regs settings since we'll never
539 reschedule the forked task. */
540 memset(®s,0,sizeof(struct pt_regs));
541 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
544 int __cpu_up(unsigned int cpu)
546 struct task_struct *idle;
547 struct _lowcore *cpu_lowcore;
551 * Set prefix page for new cpu
554 ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
555 cpu, sigp_set_prefix);
557 printk("sigp_set_prefix failed for cpu %d "
558 "with condition code %d\n",
559 (int) cpu, (int) ccode);
563 /* We can't use kernel_thread since we must _avoid_ to reschedule
565 idle = fork_by_hand();
567 printk("failed fork for CPU %d", cpu);
570 wake_up_forked_process(idle);
573 * We remove it from the pidhash and the runqueue
574 * once we got the process:
576 init_idle(idle, cpu);
578 unhash_process(idle);
580 cpu_lowcore = lowcore_ptr[cpu];
581 cpu_lowcore->save_area[15] = idle->thread.ksp;
582 cpu_lowcore->kernel_stack = (unsigned long)
583 idle->thread_info + (THREAD_SIZE);
584 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
585 __asm__ __volatile__("stam 0,15,0(%0)"
586 : : "a" (&cpu_lowcore->access_regs_save_area)
588 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
589 cpu_lowcore->current_task = (unsigned long) idle;
590 cpu_lowcore->cpu_data.cpu_nr = cpu;
592 signal_processor(cpu,sigp_restart);
594 while (!cpu_online(cpu));
599 * Cycle through the processors and setup structures.
602 void __init smp_prepare_cpus(unsigned int max_cpus)
604 unsigned long async_stack;
607 /* request the 0x1202 external interrupt */
608 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
609 panic("Couldn't request external interrupt 0x1202");
610 smp_check_cpus(max_cpus);
611 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
613 * Initialize prefix pages and stacks for all possible cpus
615 print_cpu_info(&S390_lowcore.cpu_data);
617 for(i = 0; i < NR_CPUS; i++) {
618 if (!cpu_possible(i))
620 lowcore_ptr[i] = (struct _lowcore *)
621 __get_free_pages(GFP_KERNEL|GFP_DMA,
622 sizeof(void*) == 8 ? 1 : 0);
623 async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
624 if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
625 panic("smp_boot_cpus failed to allocate memory\n");
627 *(lowcore_ptr[i]) = S390_lowcore;
628 lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
630 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
633 void __devinit smp_prepare_boot_cpu(void)
635 cpu_set(smp_processor_id(), cpu_online_map);
636 cpu_set(smp_processor_id(), cpu_possible_map);
637 S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
640 void smp_cpus_done(unsigned int max_cpus)
645 * the frequency of the profiling timer can be changed
646 * by writing a multiplier value into /proc/profile.
648 * usually you want to run this on all CPUs ;)
650 int setup_profiling_timer(unsigned int multiplier)
655 static DEFINE_PER_CPU(struct cpu, cpu_devices);
657 static int __init topology_init(void)
663 ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
665 printk(KERN_WARNING "topology_init: register_cpu %d "
666 "failed (%d)\n", cpu, ret);
671 subsys_initcall(topology_init);
673 EXPORT_SYMBOL(cpu_possible_map);
674 EXPORT_SYMBOL(lowcore_ptr);
675 EXPORT_SYMBOL(smp_ctl_set_bit);
676 EXPORT_SYMBOL(smp_ctl_clear_bit);
677 EXPORT_SYMBOL(smp_call_function);