2 * arch/s390/kernel/smp.c
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #include <linux/module.h>
24 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/smp_lock.h>
31 #include <linux/delay.h>
32 #include <linux/cache.h>
33 #include <linux/interrupt.h>
36 #include <asm/pgalloc.h>
38 #include <asm/s390_ext.h>
39 #include <asm/cpcmd.h>
40 #include <asm/tlbflush.h>
43 extern int cpu_idle(void * unused);
45 extern volatile int __cpu_logical_map[];
48 * An array with a pointer the lowcore of every CPU.
51 struct _lowcore *lowcore_ptr[NR_CPUS];
52 cycles_t cacheflush_time=0;
53 int smp_threads_ready=0; /* Set when the idlers are all forked. */
55 cpumask_t cpu_online_map;
56 cpumask_t cpu_possible_map;
57 unsigned long cache_decay_ticks = 0;
59 EXPORT_SYMBOL(cpu_online_map);
62 * Reboot, halt and power_off routines for SMP.
64 extern char vmhalt_cmd[];
65 extern char vmpoff_cmd[];
67 extern void reipl(unsigned long devno);
69 static void smp_ext_bitcall(int, ec_bit_sig);
70 static void smp_ext_bitcall_others(ec_bit_sig);
73 * Structure and data for smp_call_function(). This is designed to minimise
74 * static memory requirements. It also looks cleaner.
76 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
78 struct call_data_struct {
79 void (*func) (void *info);
86 static struct call_data_struct * call_data;
89 * 'Call function' interrupt callback
91 static void do_call_function(void)
93 void (*func) (void *info) = call_data->func;
94 void *info = call_data->info;
95 int wait = call_data->wait;
97 atomic_inc(&call_data->started);
100 atomic_inc(&call_data->finished);
104 * this function sends a 'generic call function' IPI to all other CPUs
108 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
111 * [SUMMARY] Run a function on all other CPUs.
112 * <func> The function to run. This must be fast and non-blocking.
113 * <info> An arbitrary pointer to pass to the function.
114 * <nonatomic> currently unused.
115 * <wait> If true, wait (atomically) until function has completed on other CPUs.
116 * [RETURNS] 0 on success, else a negative status code. Does not return until
117 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
119 * You must not call this function with disabled interrupts or from a
120 * hardware interrupt handler or from a bottom half handler.
123 struct call_data_struct data;
124 int cpus = num_online_cpus()-1;
126 /* FIXME: get cpu lock -hc */
132 atomic_set(&data.started, 0);
135 atomic_set(&data.finished, 0);
137 spin_lock(&call_lock);
139 /* Send a message to all other CPUs and wait for them to respond */
140 smp_ext_bitcall_others(ec_call_function);
142 /* Wait for response */
143 while (atomic_read(&data.started) != cpus)
147 while (atomic_read(&data.finished) != cpus)
149 spin_unlock(&call_lock);
155 * Call a function on one CPU
156 * cpu : the CPU the function should be executed on
158 * You must not call this function with disabled interrupts or from a
159 * hardware interrupt handler. You may call it from a bottom half.
161 * It is guaranteed that the called function runs on the specified CPU,
162 * preemption is disabled.
164 int smp_call_function_on(void (*func) (void *info), void *info,
165 int nonatomic, int wait, int cpu)
167 struct call_data_struct data;
170 if (!cpu_online(cpu))
173 /* disable preemption for local function call */
174 curr_cpu = get_cpu();
176 if (curr_cpu == cpu) {
177 /* direct call to function */
185 atomic_set(&data.started, 0);
188 atomic_set(&data.finished, 0);
190 spin_lock_bh(&call_lock);
192 smp_ext_bitcall(cpu, ec_call_function);
194 /* Wait for response */
195 while (atomic_read(&data.started) != 1)
199 while (atomic_read(&data.finished) != 1)
202 spin_unlock_bh(&call_lock);
206 EXPORT_SYMBOL(smp_call_function_on);
208 static inline void do_send_stop(void)
213 /* stop all processors */
214 for (i = 0; i < NR_CPUS; i++) {
215 if (!cpu_online(i) || smp_processor_id() == i)
218 rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
219 } while (rc == sigp_busy);
223 static inline void do_store_status(void)
225 unsigned long low_core_addr;
229 /* store status of all processors in their lowcores (real 0) */
230 for (i = 0; i < NR_CPUS; i++) {
231 if (!cpu_online(i) || smp_processor_id() == i)
233 low_core_addr = (unsigned long) lowcore_ptr[i];
235 rc = signal_processor_ps(&dummy, low_core_addr, i,
236 sigp_store_status_at_address);
237 } while(rc == sigp_busy);
242 * this function sends a 'stop' sigp to all other CPUs in the system.
243 * it goes straight through.
245 void smp_send_stop(void)
247 /* write magic number to zero page (absolute 0) */
248 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
250 /* stop other processors. */
253 /* store status of other processors. */
258 * Reboot, halt and power_off routines for SMP.
260 static cpumask_t cpu_restart_map;
262 static void do_machine_restart(void * __unused)
264 cpu_clear(smp_processor_id(), cpu_restart_map);
265 if (smp_processor_id() == 0) {
266 /* Wait for all other cpus to enter do_machine_restart. */
267 while (!cpus_empty(cpu_restart_map))
269 /* Store status of other cpus. */
272 * Finally call reipl. Because we waited for all other
273 * cpus to enter this function we know that they do
274 * not hold any s390irq-locks (the cpus have been
275 * interrupted by an external interrupt and s390irq
276 * locks are always held disabled).
279 cpcmd ("IPL", NULL, 0);
281 reipl (0x10000 | S390_lowcore.ipl_device);
283 signal_processor(smp_processor_id(), sigp_stop);
286 void machine_restart_smp(char * __unused)
288 cpu_restart_map = cpu_online_map;
289 on_each_cpu(do_machine_restart, NULL, 0, 0);
292 static void do_wait_for_stop(void)
294 unsigned long cr[16];
296 __ctl_store(cr, 0, 15);
299 __ctl_load(cr, 0, 15);
304 static void do_machine_halt(void * __unused)
306 if (smp_processor_id() == 0) {
308 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
309 cpcmd(vmhalt_cmd, NULL, 0);
310 signal_processor(smp_processor_id(),
311 sigp_stop_and_store_status);
316 void machine_halt_smp(void)
318 on_each_cpu(do_machine_halt, NULL, 0, 0);
321 static void do_machine_power_off(void * __unused)
323 if (smp_processor_id() == 0) {
325 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
326 cpcmd(vmpoff_cmd, NULL, 0);
327 signal_processor(smp_processor_id(),
328 sigp_stop_and_store_status);
333 void machine_power_off_smp(void)
335 on_each_cpu(do_machine_power_off, NULL, 0, 0);
339 * This is the main routine where commands issued by other
343 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
348 * handle bit signal external calls
350 * For the ec_schedule signal we have to do nothing. All the work
351 * is done automatically when we return from the interrupt.
353 bits = xchg(&S390_lowcore.ext_call_fast, 0);
355 if (test_bit(ec_call_function, &bits))
360 * Send an external call sigp to another cpu and return without waiting
361 * for its completion.
363 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
366 * Set signaling bit in lowcore of target cpu and kick it
368 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
369 while(signal_processor(cpu, sigp_external_call) == sigp_busy)
374 * Send an external call sigp to every other cpu in the system and
375 * return without waiting for its completion.
377 static void smp_ext_bitcall_others(ec_bit_sig sig)
381 for (i = 0; i < NR_CPUS; i++) {
382 if (!cpu_online(i) || smp_processor_id() == i)
385 * Set signaling bit in lowcore of target cpu and kick it
387 set_bit(sig, (unsigned long *) &lowcore_ptr[i]->ext_call_fast);
388 while (signal_processor(i, sigp_external_call) == sigp_busy)
393 #ifndef CONFIG_ARCH_S390X
395 * this function sends a 'purge tlb' signal to another CPU.
397 void smp_ptlb_callback(void *info)
402 void smp_ptlb_all(void)
404 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
406 EXPORT_SYMBOL(smp_ptlb_all);
407 #endif /* ! CONFIG_ARCH_S390X */
410 * this function sends a 'reschedule' IPI to another CPU.
411 * it goes straight through and wastes no time serializing
412 * anything. Worst case is that we lose a reschedule ...
414 void smp_send_reschedule(int cpu)
416 smp_ext_bitcall(cpu, ec_schedule);
420 * parameter area for the set/clear control bit callbacks
426 unsigned long orvals[16];
427 unsigned long andvals[16];
428 } ec_creg_mask_parms;
431 * callback for setting/clearing control bits
433 void smp_ctl_bit_callback(void *info) {
434 ec_creg_mask_parms *pp;
435 unsigned long cregs[16];
438 pp = (ec_creg_mask_parms *) info;
439 __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
440 for (i = pp->start_ctl; i <= pp->end_ctl; i++)
441 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
442 __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl);
446 * Set a bit in a control register of all cpus
448 void smp_ctl_set_bit(int cr, int bit) {
449 ec_creg_mask_parms parms;
451 parms.start_ctl = cr;
453 parms.orvals[cr] = 1 << bit;
454 parms.andvals[cr] = -1L;
456 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
457 __ctl_set_bit(cr, bit);
462 * Clear a bit in a control register of all cpus
464 void smp_ctl_clear_bit(int cr, int bit) {
465 ec_creg_mask_parms parms;
467 parms.start_ctl = cr;
469 parms.orvals[cr] = 0;
470 parms.andvals[cr] = ~(1L << bit);
472 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
473 __ctl_clear_bit(cr, bit);
478 * Lets check how many CPUs we have.
481 void __init smp_check_cpus(unsigned int max_cpus)
483 int curr_cpu, num_cpus;
486 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
487 current_thread_info()->cpu = 0;
490 curr_cpu <= 65535 && num_cpus < max_cpus; curr_cpu++) {
491 if ((__u16) curr_cpu == boot_cpu_addr)
493 __cpu_logical_map[num_cpus] = (__u16) curr_cpu;
494 if (signal_processor(num_cpus, sigp_sense) ==
495 sigp_not_operational)
497 cpu_set(num_cpus, cpu_possible_map);
500 printk("Detected %d CPU's\n",(int) num_cpus);
501 printk("Boot cpu address %2X\n", boot_cpu_addr);
505 * Activate a secondary processor.
507 extern void init_cpu_timer(void);
508 extern int pfault_init(void);
509 extern int pfault_token(void);
511 int __devinit start_secondary(void *cpuvoid)
515 /* init per CPU timer */
518 /* Enable pfault pseudo page faults on this cpu. */
521 /* Mark this cpu as online */
522 cpu_set(smp_processor_id(), cpu_online_map);
523 /* Switch on interrupts */
525 /* Print info about this processor */
526 print_cpu_info(&S390_lowcore.cpu_data);
527 /* cpu_idle will call schedule for us */
528 return cpu_idle(NULL);
531 static struct task_struct *__devinit fork_by_hand(void)
534 /* don't care about the psw and regs settings since we'll never
535 reschedule the forked task. */
536 memset(®s,0,sizeof(struct pt_regs));
537 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
540 int __cpu_up(unsigned int cpu)
542 struct task_struct *idle;
543 struct _lowcore *cpu_lowcore;
547 * Set prefix page for new cpu
550 ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
551 cpu, sigp_set_prefix);
553 printk("sigp_set_prefix failed for cpu %d "
554 "with condition code %d\n",
555 (int) cpu, (int) ccode);
559 /* We can't use kernel_thread since we must _avoid_ to reschedule
561 idle = fork_by_hand();
563 printk("failed fork for CPU %d", cpu);
566 wake_up_forked_process(idle);
569 * We remove it from the pidhash and the runqueue
570 * once we got the process:
572 init_idle(idle, cpu);
574 unhash_process(idle);
576 cpu_lowcore = lowcore_ptr[cpu];
577 cpu_lowcore->save_area[15] = idle->thread.ksp;
578 cpu_lowcore->kernel_stack = (unsigned long)
579 idle->thread_info + (THREAD_SIZE);
580 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
581 __asm__ __volatile__("stam 0,15,0(%0)"
582 : : "a" (&cpu_lowcore->access_regs_save_area)
584 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
585 cpu_lowcore->current_task = (unsigned long) idle;
586 cpu_lowcore->cpu_data.cpu_nr = cpu;
588 signal_processor(cpu,sigp_restart);
590 while (!cpu_online(cpu));
595 * Cycle through the processors and setup structures.
598 void __init smp_prepare_cpus(unsigned int max_cpus)
600 unsigned long async_stack;
603 /* request the 0x1202 external interrupt */
604 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
605 panic("Couldn't request external interrupt 0x1202");
606 smp_check_cpus(max_cpus);
607 memset(lowcore_ptr,0,sizeof(lowcore_ptr));
609 * Initialize prefix pages and stacks for all possible cpus
611 print_cpu_info(&S390_lowcore.cpu_data);
613 for(i = 0; i < NR_CPUS; i++) {
614 if (!cpu_possible(i))
616 lowcore_ptr[i] = (struct _lowcore *)
617 __get_free_pages(GFP_KERNEL|GFP_DMA,
618 sizeof(void*) == 8 ? 1 : 0);
619 async_stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
620 if (lowcore_ptr[i] == NULL || async_stack == 0ULL)
621 panic("smp_boot_cpus failed to allocate memory\n");
623 *(lowcore_ptr[i]) = S390_lowcore;
624 lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
626 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
629 void __devinit smp_prepare_boot_cpu(void)
631 cpu_set(smp_processor_id(), cpu_online_map);
632 cpu_set(smp_processor_id(), cpu_possible_map);
633 S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
636 void smp_cpus_done(unsigned int max_cpus)
641 * the frequency of the profiling timer can be changed
642 * by writing a multiplier value into /proc/profile.
644 * usually you want to run this on all CPUs ;)
646 int setup_profiling_timer(unsigned int multiplier)
651 EXPORT_SYMBOL(cpu_possible_map);
652 EXPORT_SYMBOL(lowcore_ptr);
653 EXPORT_SYMBOL(smp_ctl_set_bit);
654 EXPORT_SYMBOL(smp_ctl_clear_bit);
655 EXPORT_SYMBOL(smp_call_function);