2 * arch/ppc64/kernel/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
25 #include <asm/pgtable.h>
30 #include <asm/ppcdebug.h>
31 #include <asm/hvcall.h>
32 #include <asm/machdep.h>
36 static unsigned int xics_startup(unsigned int irq);
37 static void xics_enable_irq(unsigned int irq);
38 static void xics_disable_irq(unsigned int irq);
39 static void xics_mask_and_ack_irq(unsigned int irq);
40 static void xics_end_irq(unsigned int irq);
41 static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
43 struct hw_interrupt_type xics_pic = {
45 .startup = xics_startup,
46 .enable = xics_enable_irq,
47 .disable = xics_disable_irq,
48 .ack = xics_mask_and_ack_irq,
50 .set_affinity = xics_set_affinity
53 struct hw_interrupt_type xics_8259_pic = {
54 .typename = " XICS/8259",
55 .ack = xics_mask_and_ack_irq,
58 /* This is used to map real irq numbers to virtual */
59 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
62 #define XICS_IRQ_SPURIOUS 0
64 /* Want a priority other than 0. Various HW issues require this. */
65 #define DEFAULT_PRIORITY 5
68 * Mark IPIs as higher priority so we can take them inside interrupts that
69 * arent marked SA_INTERRUPT
71 #define IPI_PRIORITY 4
89 static struct xics_ipl *xics_per_cpu[NR_CPUS];
91 static int xics_irq_8259_cascade = 0;
92 static int xics_irq_8259_cascade_real = 0;
93 static unsigned int default_server = 0xFF;
94 /* also referenced in smp.c... */
95 unsigned int default_distrib_server = 0;
98 * XICS only has a single IPI, so encode the messages per CPU
100 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
102 /* RTAS service tokens */
109 int (*xirr_info_get)(int cpu);
110 void (*xirr_info_set)(int cpu, int val);
111 void (*cppr_info)(int cpu, u8 val);
112 void (*qirr_info)(int cpu, u8 val);
118 static int pSeries_xirr_info_get(int n_cpu)
120 return xics_per_cpu[n_cpu]->xirr.word;
123 static void pSeries_xirr_info_set(int n_cpu, int value)
125 xics_per_cpu[n_cpu]->xirr.word = value;
128 static void pSeries_cppr_info(int n_cpu, u8 value)
130 xics_per_cpu[n_cpu]->xirr.bytes[0] = value;
133 static void pSeries_qirr_info(int n_cpu, u8 value)
135 xics_per_cpu[n_cpu]->qirr.bytes[0] = value;
138 static xics_ops pSeries_ops = {
139 pSeries_xirr_info_get,
140 pSeries_xirr_info_set,
145 static xics_ops *ops = &pSeries_ops;
150 static inline long plpar_eoi(unsigned long xirr)
152 return plpar_hcall_norets(H_EOI, xirr);
155 static inline long plpar_cppr(unsigned long cppr)
157 return plpar_hcall_norets(H_CPPR, cppr);
160 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
162 return plpar_hcall_norets(H_IPI, servernum, mfrr);
165 static inline long plpar_xirr(unsigned long *xirr_ret)
168 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
171 static int pSeriesLP_xirr_info_get(int n_cpu)
173 unsigned long lpar_rc;
174 unsigned long return_value;
176 lpar_rc = plpar_xirr(&return_value);
177 if (lpar_rc != H_Success)
178 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
179 return (int)return_value;
182 static void pSeriesLP_xirr_info_set(int n_cpu, int value)
184 unsigned long lpar_rc;
185 unsigned long val64 = value & 0xffffffff;
187 lpar_rc = plpar_eoi(val64);
188 if (lpar_rc != H_Success)
189 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
193 static void pSeriesLP_cppr_info(int n_cpu, u8 value)
195 unsigned long lpar_rc;
197 lpar_rc = plpar_cppr(value);
198 if (lpar_rc != H_Success)
199 panic("bad return code cppr - rc = %lx\n", lpar_rc);
202 static void pSeriesLP_qirr_info(int n_cpu , u8 value)
204 unsigned long lpar_rc;
206 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
207 if (lpar_rc != H_Success)
208 panic("bad return code qirr - rc = %lx\n", lpar_rc);
211 xics_ops pSeriesLP_ops = {
212 pSeriesLP_xirr_info_get,
213 pSeriesLP_xirr_info_set,
218 static unsigned int xics_startup(unsigned int virq)
220 virq = irq_offset_down(virq);
221 if (radix_tree_insert(&irq_map, virt_irq_to_real(virq),
222 &virt_irq_to_real_map[virq]) == -ENOMEM)
223 printk(KERN_CRIT "Out of memory creating real -> virtual"
224 " IRQ mapping for irq %u (real 0x%x)\n",
225 virq, virt_irq_to_real(virq));
226 return 0; /* return value is ignored */
229 static unsigned int real_irq_to_virt(unsigned int real_irq)
233 ptr = radix_tree_lookup(&irq_map, real_irq);
236 return ptr - virt_irq_to_real_map;
240 static int get_irq_server(unsigned int irq)
242 cpumask_t cpumask = irq_affinity[irq];
243 cpumask_t tmp = CPU_MASK_NONE;
246 #ifdef CONFIG_IRQ_ALL_CPUS
247 /* For the moment only implement delivery to all cpus or one cpu */
248 if (smp_threads_ready) {
249 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
250 server = default_distrib_server;
252 cpus_and(tmp, cpu_online_map, cpumask);
255 server = default_distrib_server;
257 server = get_hard_smp_processor_id(first_cpu(tmp));
260 server = default_server;
263 server = default_server;
269 static int get_irq_server(unsigned int irq)
271 return default_server;
275 static void xics_enable_irq(unsigned int virq)
281 irq = virt_irq_to_real(irq_offset_down(virq));
285 server = get_irq_server(virq);
286 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
288 if (call_status != 0) {
289 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
290 "returned %x\n", irq, call_status);
294 /* Now unmask the interrupt (often a no-op) */
295 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
296 if (call_status != 0) {
297 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
298 "returned %x\n", irq, call_status);
303 static void xics_disable_real_irq(unsigned int irq)
311 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
312 if (call_status != 0) {
313 printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
314 "ibm_int_off returned %x\n", irq, call_status);
318 server = get_irq_server(irq);
319 /* Have to set XIVE to 0xff to be able to remove a slot */
320 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
321 if (call_status != 0) {
322 printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
323 " returned %x\n", irq, call_status);
328 static void xics_disable_irq(unsigned int virq)
332 irq = virt_irq_to_real(irq_offset_down(virq));
333 xics_disable_real_irq(irq);
336 static void xics_end_irq(unsigned int irq)
338 int cpu = smp_processor_id();
341 ops->xirr_info_set(cpu, ((0xff << 24) |
342 (virt_irq_to_real(irq_offset_down(irq)))));
346 static void xics_mask_and_ack_irq(unsigned int irq)
348 int cpu = smp_processor_id();
350 if (irq < irq_offset_value()) {
353 ops->xirr_info_set(cpu, ((0xff<<24) |
354 xics_irq_8259_cascade_real));
359 extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
361 int xics_get_irq(struct pt_regs *regs)
363 unsigned int cpu = smp_processor_id();
367 vec = ops->xirr_info_get(cpu);
368 /* (vec >> 24) == old priority */
371 /* for sanity, this had better be < NR_IRQS - 16 */
372 if (vec == xics_irq_8259_cascade_real) {
373 irq = i8259_irq(cpu);
375 /* Spurious cascaded interrupt. Still must ack xics */
376 xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
380 } else if (vec == XICS_IRQ_SPURIOUS) {
383 irq = real_irq_to_virt(vec);
385 irq = real_irq_to_virt_slowpath(vec);
387 printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
388 " disabling it.\n", vec);
389 xics_disable_real_irq(vec);
391 irq = irq_offset_up(irq);
398 extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
400 irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
402 int cpu = smp_processor_id();
404 ops->qirr_info(cpu, 0xff);
406 WARN_ON(cpu_is_offline(cpu));
408 while (xics_ipi_message[cpu].value) {
409 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
410 &xics_ipi_message[cpu].value)) {
412 smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
414 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
415 &xics_ipi_message[cpu].value)) {
417 smp_message_recv(PPC_MSG_RESCHEDULE, regs);
420 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
421 &xics_ipi_message[cpu].value)) {
423 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
426 #ifdef CONFIG_DEBUGGER
427 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
428 &xics_ipi_message[cpu].value)) {
430 smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
437 void xics_cause_IPI(int cpu)
439 ops->qirr_info(cpu, IPI_PRIORITY);
442 void xics_setup_cpu(void)
444 int cpu = smp_processor_id();
446 ops->cppr_info(cpu, 0xff);
450 #endif /* CONFIG_SMP */
452 void xics_init_IRQ(void)
455 unsigned long intr_size = 0;
456 struct device_node *np;
457 uint *ireg, ilen, indx = 0;
458 unsigned long intr_base = 0;
459 struct xics_interrupt_node {
464 ppc64_boot_msg(0x20, "XICS Init");
466 ibm_get_xive = rtas_token("ibm,get-xive");
467 ibm_set_xive = rtas_token("ibm,set-xive");
468 ibm_int_on = rtas_token("ibm,int-on");
469 ibm_int_off = rtas_token("ibm,int-off");
471 np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
473 printk(KERN_WARNING "Can't find Interrupt Presentation\n");
474 udbg_printf("Can't find Interrupt Presentation\n");
478 ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
481 * set node starting index for this node
486 ireg = (uint *)get_property(np, "reg", &ilen);
488 printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
489 udbg_printf("Can't find Interrupt Reg Property\n");
494 inodes[indx].addr = (unsigned long long)*ireg++ << 32;
495 ilen -= sizeof(uint);
496 inodes[indx].addr |= *ireg++;
497 ilen -= sizeof(uint);
498 inodes[indx].size = (unsigned long long)*ireg++ << 32;
499 ilen -= sizeof(uint);
500 inodes[indx].size |= *ireg++;
501 ilen -= sizeof(uint);
503 if (indx >= NR_CPUS) break;
506 np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
507 if ((indx < NR_CPUS) && np) goto nextnode;
509 /* Find the server numbers for the boot cpu. */
510 for (np = of_find_node_by_type(NULL, "cpu");
512 np = of_find_node_by_type(np, "cpu")) {
513 ireg = (uint *)get_property(np, "reg", &ilen);
514 if (ireg && ireg[0] == hard_smp_processor_id()) {
515 ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
516 i = ilen / sizeof(int);
518 default_server = ireg[0];
519 default_distrib_server = ireg[i-1]; /* take last element */
526 intr_base = inodes[0].addr;
527 intr_size = (ulong)inodes[0].size;
529 np = of_find_node_by_type(NULL, "interrupt-controller");
531 printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
532 xics_irq_8259_cascade_real = -1;
533 xics_irq_8259_cascade = -1;
535 ireg = (uint *) get_property(np, "interrupts", 0);
537 printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
538 udbg_printf("Can't find ISA Interrupts Property\n");
541 xics_irq_8259_cascade_real = *ireg;
542 xics_irq_8259_cascade
543 = virt_irq_create_mapping(xics_irq_8259_cascade_real);
547 if (systemcfg->platform == PLATFORM_PSERIES) {
550 /* FIXME: Do this dynamically! --RR */
551 if (!cpu_present_at_boot(i))
553 xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
554 (ulong)inodes[get_hard_smp_processor_id(i)].size,
558 xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
560 #endif /* CONFIG_SMP */
561 #ifdef CONFIG_PPC_PSERIES
562 /* actually iSeries does not use any of xics...but it has link dependencies
563 * for now, except this new one...
565 } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
566 ops = &pSeriesLP_ops;
570 xics_8259_pic.enable = i8259_pic.enable;
571 xics_8259_pic.disable = i8259_pic.disable;
572 for (i = 0; i < 16; ++i)
573 get_irq_desc(i)->handler = &xics_8259_pic;
574 for (; i < NR_IRQS; ++i)
575 get_irq_desc(i)->handler = &xics_pic;
577 ops->cppr_info(boot_cpuid, 0xff);
580 ppc64_boot_msg(0x21, "XICS Done");
584 * We cant do this in init_IRQ because we need the memory subsystem up for
587 static int __init xics_setup_i8259(void)
589 if (naca->interrupt_controller == IC_PPC_XIC &&
590 xics_irq_8259_cascade != -1) {
591 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
592 no_action, 0, "8259 cascade", 0))
593 printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
598 arch_initcall(xics_setup_i8259);
601 void xics_request_IPIs(void)
603 virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
605 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
606 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
608 get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
612 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
617 unsigned long newmask;
618 cpumask_t tmp = CPU_MASK_NONE;
620 irq = virt_irq_to_real(irq_offset_down(virq));
621 if (irq == XICS_IPI || irq == NO_IRQ)
624 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
627 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive "
628 "returns %d\n", irq, status);
632 /* For the moment only implement delivery to all cpus or one cpu */
633 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
634 newmask = default_distrib_server;
636 cpus_and(tmp, cpu_online_map, cpumask);
639 newmask = get_hard_smp_processor_id(first_cpu(tmp));
642 status = rtas_call(ibm_set_xive, 3, 1, NULL,
643 irq, newmask, xics_status[1]);
646 printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
647 "returns %d\n", irq, status);
652 #ifdef CONFIG_HOTPLUG_CPU
654 /* Interrupts are disabled. */
655 void xics_migrate_irqs_away(void)
657 int set_indicator = rtas_token("set-indicator");
658 const unsigned int giqs = 9005UL; /* Global Interrupt Queue Server */
660 unsigned int irq, cpu = smp_processor_id();
664 BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
666 /* Reject any interrupt that was queued to us... */
667 ops->cppr_info(cpu, 0);
670 /* Refuse any new interrupts... */
671 rtas_call(set_indicator, 3, 1, &status, giqs,
672 hard_smp_processor_id(), 0);
673 WARN_ON(status != 0);
675 /* Allow IPIs again... */
676 ops->cppr_info(cpu, DEFAULT_PRIORITY);
679 printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
681 irq_desc_t *desc = get_irq_desc(irq);
683 /* We need to get IPIs still. */
684 if (irq_offset_down(irq) == XICS_IPI)
687 /* We only need to migrate enabled IRQS */
688 if (desc == NULL || desc->handler == NULL
689 || desc->action == NULL
690 || desc->handler->set_affinity == NULL)
693 spin_lock_irqsave(&desc->lock, flags);
695 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
697 printk(KERN_ERR "migrate_irqs_away: irq=%d "
698 "ibm,get-xive returns %d\n",
704 * We only support delivery to all cpus or to one cpu.
705 * The irq has to be migrated only in the single cpu
708 if (xics_status[0] != get_hard_smp_processor_id(cpu))
711 printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
714 /* Reset affinity to all cpus */
715 xics_status[0] = default_distrib_server;
717 status = rtas_call(ibm_set_xive, 3, 1, NULL,
718 irq, xics_status[0], xics_status[1]);
720 printk(KERN_ERR "migrate_irqs_away irq=%d "
721 "ibm,set-xive returns %d\n",
725 spin_unlock_irqrestore(&desc->lock, flags);