2 * Platform dependent support for SGI SN
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/vmalloc.h>
14 #include <linux/irq.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/bootmem.h>
18 #include <linux/cpumask.h>
20 #include <asm/pgtable.h>
21 #include <asm/sn/sgi.h>
22 #include <asm/sn/hcl.h>
23 #include <asm/sn/types.h>
24 #include <asm/sn/pci/pciio.h>
25 #include <asm/sn/pci/pciio_private.h>
26 #include <asm/sn/pci/pcibr.h>
27 #include <asm/sn/pci/pcibr_private.h>
28 #include <asm/sn/sn_cpuid.h>
29 #include <asm/sn/io.h>
30 #include <asm/sn/intr.h>
31 #include <asm/sn/addrs.h>
32 #include <asm/sn/driver.h>
33 #include <asm/sn/arch.h>
34 #include <asm/sn/pda.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/bitops.h>
38 #include <asm/sn/sn2/shub_mmr.h>
40 static void force_interrupt(int irq);
41 extern void pcibr_force_interrupt(pcibr_intr_t intr);
42 extern int sn_force_interrupt_flag;
43 struct irq_desc * sn_irq_desc(unsigned int irq);
45 struct sn_intr_list_t {
46 struct sn_intr_list_t *next;
50 static struct sn_intr_list_t *sn_intr_list[NR_IRQS];
54 sn_startup_irq(unsigned int irq)
60 sn_shutdown_irq(unsigned int irq)
65 sn_disable_irq(unsigned int irq)
70 sn_enable_irq(unsigned int irq)
75 sn_ack_irq(unsigned int irq)
77 unsigned long event_occurred, mask = 0;
81 nasid = smp_physical_node_id();
82 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
83 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
84 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
86 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
87 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
89 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
90 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
92 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
93 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
95 HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
96 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
100 sn_end_irq(unsigned int irq)
104 unsigned long event_occurred;
105 irq_desc_t *desc = sn_irq_desc(irq);
106 unsigned int status = desc->status;
109 if (ivec == SGI_UART_VECTOR) {
110 nasid = smp_physical_node_id();
111 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
112 // If the UART bit is set here, we may have received an interrupt from the
113 // UART that the driver missed. To make sure, we IPI ourselves to force us
115 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
116 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
119 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
120 if (sn_force_interrupt_flag)
121 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
122 force_interrupt(irq);
126 sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
131 struct sn_intr_list_t *p = sn_intr_list[irq];
133 extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
134 extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
144 cpu = first_cpu(mask);
145 sn_shub_redirect_intr(intr, cpu);
146 irq = irq & 0xff; /* strip off redirect bit, if someone stuck it on. */
147 (void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir);
148 #endif /* CONFIG_SMP */
152 struct hw_interrupt_type irq_type_sn = {
165 sn_irq_desc(unsigned int irq)
168 irq = SN_IVEC_FROM_IRQ(irq);
170 return(_irq_desc + irq);
174 sn_irq_to_vector(u8 irq)
180 sn_local_vector_to_irq(u8 vector)
182 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
189 irq_desc_t *base_desc = _irq_desc;
191 for (i=0; i<NR_IRQS; i++) {
192 if (base_desc[i].handler == &no_irq_type) {
193 base_desc[i].handler = &irq_type_sn;
199 register_pcibr_intr(int irq, pcibr_intr_t intr)
201 struct sn_intr_list_t *p = kmalloc(sizeof(struct sn_intr_list_t), GFP_KERNEL);
202 struct sn_intr_list_t *list;
203 int cpu = intr->bi_cpu;
205 if (pdacpu(cpu)->sn_last_irq < irq) {
206 pdacpu(cpu)->sn_last_irq = irq;
208 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq;
209 if (!p) panic("Could not allocate memory for sn_intr_list_t\n");
210 if ((list = sn_intr_list[irq])) {
211 while (list->next) list = list->next;
216 sn_intr_list[irq] = p;
223 unregister_pcibr_intr(int irq, pcibr_intr_t intr)
226 struct sn_intr_list_t **prev, *curr;
227 int cpu = intr->bi_cpu;
230 if (sn_intr_list[irq] == NULL)
233 prev = &sn_intr_list[irq];
234 curr = sn_intr_list[irq];
236 if (curr->intr == intr) {
247 if (!sn_intr_list[irq]) {
248 if (pdacpu(cpu)->sn_last_irq == irq) {
249 for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--)
252 pdacpu(cpu)->sn_last_irq = i;
255 if (pdacpu(cpu)->sn_first_irq == irq) {
256 pdacpu(cpu)->sn_first_irq = 0;
257 for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++)
259 pdacpu(cpu)->sn_first_irq = i;
266 force_polled_int(void)
269 struct sn_intr_list_t *p;
271 for (i=0; i<NR_IRQS;i++) {
275 pcibr_force_interrupt(p->intr);
283 force_interrupt(int irq)
285 struct sn_intr_list_t *p = sn_intr_list[irq];
289 pcibr_force_interrupt(p->intr);
296 Check for lost interrupts. If the PIC int_status reg. says that
297 an interrupt has been sent, but not handled, and the interrupt
298 is not pending in either the cpu irr regs or in the soft irr regs,
299 and the interrupt is not in service, then the interrupt may have
300 been lost. Force an interrupt on that pin. It is possible that
301 the interrupt is in flight, so we may generate a spurious interrupt,
302 but we should never miss a real lost interrupt.
306 sn_check_intr(int irq, pcibr_intr_t intr)
308 unsigned long regval;
311 unsigned long irr_reg;
314 regval = pcireg_intr_status_get(intr->bi_soft);
315 irr_reg_num = irq_to_vector(irq) / 64;
316 irr_bit = irq_to_vector(irq) % 64;
317 switch (irr_reg_num) {
319 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
322 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
325 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
328 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
331 if (!test_bit(irr_bit, &irr_reg) ) {
332 if (!test_bit(irq, pda->sn_soft_irr) ) {
333 if (!test_bit(irq, pda->sn_in_service_ivecs) ) {
335 if (intr->bi_ibits & regval & intr->bi_last_intr) {
336 regval &= ~(intr->bi_ibits & regval);
337 pcibr_force_interrupt(intr);
342 intr->bi_last_intr = regval;
346 sn_lb_int_war_check(void)
350 if (pda->sn_first_irq == 0) return;
351 for (i=pda->sn_first_irq;
352 i <= pda->sn_last_irq; i++) {
353 struct sn_intr_list_t *p = sn_intr_list[i];
358 sn_check_intr(i, p->intr);