2 * arch/ppc64/kernel/xics.c
4 * Copyright 2000 IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
25 #include <asm/pgtable.h>
30 #include <asm/ppcdebug.h>
31 #include <asm/hvcall.h>
32 #include <asm/machdep.h>
36 static unsigned int xics_startup(unsigned int irq);
37 static void xics_enable_irq(unsigned int irq);
38 static void xics_disable_irq(unsigned int irq);
39 static void xics_mask_and_ack_irq(unsigned int irq);
40 static void xics_end_irq(unsigned int irq);
41 static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
43 struct hw_interrupt_type xics_pic = {
45 .startup = xics_startup,
46 .enable = xics_enable_irq,
47 .disable = xics_disable_irq,
48 .ack = xics_mask_and_ack_irq,
50 .set_affinity = xics_set_affinity
53 struct hw_interrupt_type xics_8259_pic = {
54 .typename = " XICS/8259",
55 .ack = xics_mask_and_ack_irq,
58 /* This is used to map real irq numbers to virtual */
59 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
62 #define XICS_IRQ_SPURIOUS 0
64 /* Want a priority other than 0. Various HW issues require this. */
65 #define DEFAULT_PRIORITY 5
68 * Mark IPIs as higher priority so we can take them inside interrupts that
69 * arent marked SA_INTERRUPT
71 #define IPI_PRIORITY 4
89 static struct xics_ipl *xics_per_cpu[NR_CPUS];
91 static int xics_irq_8259_cascade = 0;
92 static int xics_irq_8259_cascade_real = 0;
93 static unsigned int default_server = 0xFF;
94 /* also referenced in smp.c... */
95 unsigned int default_distrib_server = 0;
98 * XICS only has a single IPI, so encode the messages per CPU
100 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
102 /* RTAS service tokens */
109 int (*xirr_info_get)(int cpu);
110 void (*xirr_info_set)(int cpu, int val);
111 void (*cppr_info)(int cpu, u8 val);
112 void (*qirr_info)(int cpu, u8 val);
118 static int pSeries_xirr_info_get(int n_cpu)
120 return xics_per_cpu[n_cpu]->xirr.word;
123 static void pSeries_xirr_info_set(int n_cpu, int value)
125 xics_per_cpu[n_cpu]->xirr.word = value;
128 static void pSeries_cppr_info(int n_cpu, u8 value)
130 xics_per_cpu[n_cpu]->xirr.bytes[0] = value;
133 static void pSeries_qirr_info(int n_cpu, u8 value)
135 xics_per_cpu[n_cpu]->qirr.bytes[0] = value;
138 static xics_ops pSeries_ops = {
139 pSeries_xirr_info_get,
140 pSeries_xirr_info_set,
145 static xics_ops *ops = &pSeries_ops;
150 static inline long plpar_eoi(unsigned long xirr)
152 return plpar_hcall_norets(H_EOI, xirr);
155 static inline long plpar_cppr(unsigned long cppr)
157 return plpar_hcall_norets(H_CPPR, cppr);
160 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
162 return plpar_hcall_norets(H_IPI, servernum, mfrr);
165 static inline long plpar_xirr(unsigned long *xirr_ret)
168 return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
171 static int pSeriesLP_xirr_info_get(int n_cpu)
173 unsigned long lpar_rc;
174 unsigned long return_value;
176 lpar_rc = plpar_xirr(&return_value);
177 if (lpar_rc != H_Success)
178 panic(" bad return code xirr - rc = %lx \n", lpar_rc);
179 return (int)return_value;
182 static void pSeriesLP_xirr_info_set(int n_cpu, int value)
184 unsigned long lpar_rc;
185 unsigned long val64 = value & 0xffffffff;
187 lpar_rc = plpar_eoi(val64);
188 if (lpar_rc != H_Success)
189 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
193 static void pSeriesLP_cppr_info(int n_cpu, u8 value)
195 unsigned long lpar_rc;
197 lpar_rc = plpar_cppr(value);
198 if (lpar_rc != H_Success)
199 panic("bad return code cppr - rc = %lx\n", lpar_rc);
202 static void pSeriesLP_qirr_info(int n_cpu , u8 value)
204 unsigned long lpar_rc;
206 lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
207 if (lpar_rc != H_Success)
208 panic("bad return code qirr - rc = %lx\n", lpar_rc);
211 xics_ops pSeriesLP_ops = {
212 pSeriesLP_xirr_info_get,
213 pSeriesLP_xirr_info_set,
218 static unsigned int xics_startup(unsigned int virq)
220 virq = irq_offset_down(virq);
221 if (radix_tree_insert(&irq_map, virt_irq_to_real(virq),
222 &virt_irq_to_real_map[virq]) == -ENOMEM)
223 printk(KERN_CRIT "Out of memory creating real -> virtual"
224 " IRQ mapping for irq %u (real 0x%x)\n",
225 virq, virt_irq_to_real(virq));
226 return 0; /* return value is ignored */
229 static unsigned int real_irq_to_virt(unsigned int real_irq)
233 ptr = radix_tree_lookup(&irq_map, real_irq);
236 return ptr - virt_irq_to_real_map;
240 static int get_irq_server(unsigned int irq)
242 cpumask_t cpumask = irq_affinity[irq];
243 cpumask_t allcpus = CPU_MASK_ALL;
244 cpumask_t tmp = CPU_MASK_NONE;
247 #ifdef CONFIG_IRQ_ALL_CPUS
248 /* For the moment only implement delivery to all cpus or one cpu */
249 if (smp_threads_ready) {
250 if (cpus_equal(cpumask, allcpus)) {
251 server = default_distrib_server;
253 cpus_and(tmp, cpu_online_map, cpumask);
256 server = default_distrib_server;
258 server = get_hard_smp_processor_id(first_cpu(tmp));
261 server = default_server;
264 server = default_server;
270 static int get_irq_server(unsigned int irq)
272 return default_server;
276 static void xics_enable_irq(unsigned int virq)
282 irq = virt_irq_to_real(irq_offset_down(virq));
286 server = get_irq_server(virq);
287 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
289 if (call_status != 0) {
290 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
291 "returned %lx\n", irq, call_status);
295 /* Now unmask the interrupt (often a no-op) */
296 call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
297 if (call_status != 0) {
298 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
299 "returned %lx\n", irq, call_status);
304 static void xics_disable_real_irq(unsigned int irq)
312 call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
313 if (call_status != 0) {
314 printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
315 "ibm_int_off returned %lx\n", irq, call_status);
319 server = get_irq_server(irq);
320 /* Have to set XIVE to 0xff to be able to remove a slot */
321 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
322 if (call_status != 0) {
323 printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
324 " returned %lx\n", irq, call_status);
329 static void xics_disable_irq(unsigned int virq)
333 irq = virt_irq_to_real(irq_offset_down(virq));
334 xics_disable_real_irq(irq);
337 static void xics_end_irq(unsigned int irq)
339 int cpu = smp_processor_id();
342 ops->xirr_info_set(cpu, ((0xff << 24) |
343 (virt_irq_to_real(irq_offset_down(irq)))));
347 static void xics_mask_and_ack_irq(unsigned int irq)
349 int cpu = smp_processor_id();
351 if (irq < irq_offset_value()) {
354 ops->xirr_info_set(cpu, ((0xff<<24) |
355 xics_irq_8259_cascade_real));
360 extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
362 int xics_get_irq(struct pt_regs *regs)
364 unsigned int cpu = smp_processor_id();
368 vec = ops->xirr_info_get(cpu);
369 /* (vec >> 24) == old priority */
372 /* for sanity, this had better be < NR_IRQS - 16 */
373 if (vec == xics_irq_8259_cascade_real) {
374 irq = i8259_irq(cpu);
376 /* Spurious cascaded interrupt. Still must ack xics */
377 xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
381 } else if (vec == XICS_IRQ_SPURIOUS) {
384 irq = real_irq_to_virt(vec);
386 irq = real_irq_to_virt_slowpath(vec);
388 printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
389 " disabling it.\n", vec);
390 xics_disable_real_irq(vec);
392 irq = irq_offset_up(irq);
399 extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
401 irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
403 int cpu = smp_processor_id();
405 ops->qirr_info(cpu, 0xff);
407 WARN_ON(cpu_is_offline(cpu));
409 while (xics_ipi_message[cpu].value) {
410 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
411 &xics_ipi_message[cpu].value)) {
413 smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
415 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
416 &xics_ipi_message[cpu].value)) {
418 smp_message_recv(PPC_MSG_RESCHEDULE, regs);
421 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
422 &xics_ipi_message[cpu].value)) {
424 smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
427 #ifdef CONFIG_DEBUGGER
428 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
429 &xics_ipi_message[cpu].value)) {
431 smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
438 void xics_cause_IPI(int cpu)
440 ops->qirr_info(cpu, IPI_PRIORITY);
443 void xics_setup_cpu(void)
445 int cpu = smp_processor_id();
447 ops->cppr_info(cpu, 0xff);
451 #endif /* CONFIG_SMP */
453 void xics_init_IRQ(void)
456 unsigned long intr_size = 0;
457 struct device_node *np;
458 uint *ireg, ilen, indx = 0;
459 unsigned long intr_base = 0;
460 struct xics_interrupt_node {
465 ppc64_boot_msg(0x20, "XICS Init");
467 ibm_get_xive = rtas_token("ibm,get-xive");
468 ibm_set_xive = rtas_token("ibm,set-xive");
469 ibm_int_on = rtas_token("ibm,int-on");
470 ibm_int_off = rtas_token("ibm,int-off");
472 np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
474 printk(KERN_WARNING "Can't find Interrupt Presentation\n");
475 udbg_printf("Can't find Interrupt Presentation\n");
479 ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
482 * set node starting index for this node
487 ireg = (uint *)get_property(np, "reg", &ilen);
489 printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
490 udbg_printf("Can't find Interrupt Reg Property\n");
495 inodes[indx].addr = (unsigned long long)*ireg++ << 32;
496 ilen -= sizeof(uint);
497 inodes[indx].addr |= *ireg++;
498 ilen -= sizeof(uint);
499 inodes[indx].size = (unsigned long long)*ireg++ << 32;
500 ilen -= sizeof(uint);
501 inodes[indx].size |= *ireg++;
502 ilen -= sizeof(uint);
504 if (indx >= NR_CPUS) break;
507 np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
508 if ((indx < NR_CPUS) && np) goto nextnode;
510 /* Find the server numbers for the boot cpu. */
511 for (np = of_find_node_by_type(NULL, "cpu");
513 np = of_find_node_by_type(np, "cpu")) {
514 ireg = (uint *)get_property(np, "reg", &ilen);
515 if (ireg && ireg[0] == hard_smp_processor_id()) {
516 ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
517 i = ilen / sizeof(int);
519 default_server = ireg[0];
520 default_distrib_server = ireg[i-1]; /* take last element */
527 intr_base = inodes[0].addr;
528 intr_size = (ulong)inodes[0].size;
530 np = of_find_node_by_type(NULL, "interrupt-controller");
532 printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
533 xics_irq_8259_cascade_real = -1;
534 xics_irq_8259_cascade = -1;
536 ireg = (uint *) get_property(np, "interrupts", 0);
538 printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
539 udbg_printf("Can't find ISA Interrupts Property\n");
542 xics_irq_8259_cascade_real = *ireg;
543 xics_irq_8259_cascade
544 = virt_irq_create_mapping(xics_irq_8259_cascade_real);
548 if (systemcfg->platform == PLATFORM_PSERIES) {
551 /* FIXME: Do this dynamically! --RR */
552 if (!cpu_present_at_boot(i))
554 xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
555 (ulong)inodes[get_hard_smp_processor_id(i)].size,
559 xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
561 #endif /* CONFIG_SMP */
562 #ifdef CONFIG_PPC_PSERIES
563 /* actually iSeries does not use any of xics...but it has link dependencies
564 * for now, except this new one...
566 } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
567 ops = &pSeriesLP_ops;
571 xics_8259_pic.enable = i8259_pic.enable;
572 xics_8259_pic.disable = i8259_pic.disable;
573 for (i = 0; i < 16; ++i)
574 get_irq_desc(i)->handler = &xics_8259_pic;
575 for (; i < NR_IRQS; ++i)
576 get_irq_desc(i)->handler = &xics_pic;
578 ops->cppr_info(boot_cpuid, 0xff);
581 ppc64_boot_msg(0x21, "XICS Done");
585 * We cant do this in init_IRQ because we need the memory subsystem up for
588 static int __init xics_setup_i8259(void)
590 if (naca->interrupt_controller == IC_PPC_XIC &&
591 xics_irq_8259_cascade != -1) {
592 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
593 no_action, 0, "8259 cascade", 0))
594 printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
599 arch_initcall(xics_setup_i8259);
602 void xics_request_IPIs(void)
604 virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
606 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
607 request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
609 get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
613 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
617 unsigned long xics_status[2];
618 unsigned long newmask;
619 cpumask_t allcpus = CPU_MASK_ALL;
620 cpumask_t tmp = CPU_MASK_NONE;
622 irq = virt_irq_to_real(irq_offset_down(virq));
623 if (irq == XICS_IPI || irq == NO_IRQ)
626 status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
629 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive "
630 "returns %ld\n", irq, status);
634 /* For the moment only implement delivery to all cpus or one cpu */
635 if (cpus_equal(cpumask, allcpus)) {
636 newmask = default_distrib_server;
638 cpus_and(tmp, cpu_online_map, cpumask);
641 newmask = get_hard_smp_processor_id(first_cpu(tmp));
644 status = rtas_call(ibm_set_xive, 3, 1, NULL,
645 irq, newmask, xics_status[1]);
648 printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
649 "returns %ld\n", irq, status);
654 #ifdef CONFIG_HOTPLUG_CPU
656 /* Interrupts are disabled. */
657 void xics_migrate_irqs_away(void)
659 int set_indicator = rtas_token("set-indicator");
660 const unsigned long giqs = 9005UL; /* Global Interrupt Queue Server */
661 unsigned long status = 0;
662 unsigned int irq, cpu = smp_processor_id();
663 unsigned long xics_status[2];
666 BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
668 /* Reject any interrupt that was queued to us... */
669 ops->cppr_info(cpu, 0);
672 /* Refuse any new interrupts... */
673 rtas_call(set_indicator, 3, 1, &status, giqs,
674 hard_smp_processor_id(), 0UL);
675 WARN_ON(status != 0);
677 /* Allow IPIs again... */
678 ops->cppr_info(cpu, DEFAULT_PRIORITY);
681 printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
683 irq_desc_t *desc = get_irq_desc(irq);
685 /* We need to get IPIs still. */
686 if (irq_offset_down(irq) == XICS_IPI)
689 /* We only need to migrate enabled IRQS */
690 if (desc == NULL || desc->handler == NULL
691 || desc->action == NULL
692 || desc->handler->set_affinity == NULL)
695 spin_lock_irqsave(&desc->lock, flags);
697 status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status,
700 printk(KERN_ERR "migrate_irqs_away: irq=%d "
701 "ibm,get-xive returns %ld\n",
707 * We only support delivery to all cpus or to one cpu.
708 * The irq has to be migrated only in the single cpu
711 if (xics_status[0] != get_hard_smp_processor_id(cpu))
714 printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
717 /* Reset affinity to all cpus */
718 xics_status[0] = default_distrib_server;
720 status = rtas_call(ibm_set_xive, 3, 1, NULL,
721 irq, xics_status[0], xics_status[1]);
723 printk(KERN_ERR "migrate_irqs_away irq=%d "
724 "ibm,set-xive returns %ld\n",
728 spin_unlock_irqrestore(&desc->lock, flags);