2 * Platform dependent support for SGI SN
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/vmalloc.h>
14 #include <linux/irq.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/bootmem.h>
18 #include <linux/cpumask.h>
20 #include <asm/pgtable.h>
21 #include <asm/sn/sgi.h>
22 #include <asm/sn/hcl.h>
23 #include <asm/sn/types.h>
24 #include <asm/sn/pci/pciio.h>
25 #include <asm/sn/pci/pciio_private.h>
26 #include <asm/sn/pci/pcibr.h>
27 #include <asm/sn/pci/pcibr_private.h>
28 #include <asm/sn/sn_cpuid.h>
29 #include <asm/sn/io.h>
30 #include <asm/sn/intr.h>
31 #include <asm/sn/addrs.h>
32 #include <asm/sn/driver.h>
33 #include <asm/sn/arch.h>
34 #include <asm/sn/pda.h>
35 #include <asm/processor.h>
36 #include <asm/system.h>
37 #include <asm/bitops.h>
38 #include <asm/sn/sn2/shub_mmr.h>
40 static void force_interrupt(int irq);
41 extern void pcibr_force_interrupt(pcibr_intr_t intr);
42 extern int sn_force_interrupt_flag;
43 struct irq_desc * sn_irq_desc(unsigned int irq);
44 extern cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
46 struct sn_intr_list_t {
47 struct sn_intr_list_t *next;
51 static struct sn_intr_list_t *sn_intr_list[NR_IRQS];
55 sn_startup_irq(unsigned int irq)
61 sn_shutdown_irq(unsigned int irq)
66 sn_disable_irq(unsigned int irq)
71 sn_enable_irq(unsigned int irq)
75 static inline void sn_move_irq(int irq)
77 /* note - we hold desc->lock */
79 irq_desc_t *desc = irq_descp(irq);
81 if (!cpus_empty(pending_irq_cpumask[irq])) {
82 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
83 if (unlikely(!cpus_empty(tmp))) {
84 desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
86 cpus_clear(pending_irq_cpumask[irq]);
91 sn_ack_irq(unsigned int irq)
93 unsigned long event_occurred, mask = 0;
97 nasid = smp_physical_node_id();
98 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
99 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
100 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
102 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
103 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
105 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
106 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
108 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
109 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
111 HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
112 __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
117 sn_end_irq(unsigned int irq)
121 unsigned long event_occurred;
122 irq_desc_t *desc = sn_irq_desc(irq);
123 unsigned int status = desc->status;
126 if (ivec == SGI_UART_VECTOR) {
127 nasid = smp_physical_node_id();
128 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
129 // If the UART bit is set here, we may have received an interrupt from the
130 // UART that the driver missed. To make sure, we IPI ourselves to force us
132 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
133 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
136 __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
137 if (sn_force_interrupt_flag)
138 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
139 force_interrupt(irq);
143 sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
148 struct sn_intr_list_t *p = sn_intr_list[irq];
150 extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
151 extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
161 cpu = first_cpu(mask);
162 sn_shub_redirect_intr(intr, cpu);
163 irq = irq & 0xff; /* strip off redirect bit, if someone stuck it on. */
164 (void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir);
165 #endif /* CONFIG_SMP */
169 struct hw_interrupt_type irq_type_sn = {
182 sn_irq_desc(unsigned int irq)
185 irq = SN_IVEC_FROM_IRQ(irq);
187 return(_irq_desc + irq);
191 sn_irq_to_vector(unsigned int irq)
197 sn_local_vector_to_irq(u8 vector)
199 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
206 irq_desc_t *base_desc = _irq_desc;
208 for (i=0; i<NR_IRQS; i++) {
209 if (base_desc[i].handler == &no_irq_type) {
210 base_desc[i].handler = &irq_type_sn;
216 register_pcibr_intr(int irq, pcibr_intr_t intr)
218 struct sn_intr_list_t *p = kmalloc(sizeof(struct sn_intr_list_t), GFP_KERNEL);
219 struct sn_intr_list_t *list;
220 int cpu = intr->bi_cpu;
222 if (pdacpu(cpu)->sn_last_irq < irq) {
223 pdacpu(cpu)->sn_last_irq = irq;
225 if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq;
226 if (!p) panic("Could not allocate memory for sn_intr_list_t\n");
227 if ((list = sn_intr_list[irq])) {
228 while (list->next) list = list->next;
233 sn_intr_list[irq] = p;
240 unregister_pcibr_intr(int irq, pcibr_intr_t intr)
243 struct sn_intr_list_t **prev, *curr;
244 int cpu = intr->bi_cpu;
247 if (sn_intr_list[irq] == NULL)
250 prev = &sn_intr_list[irq];
251 curr = sn_intr_list[irq];
253 if (curr->intr == intr) {
264 if (!sn_intr_list[irq]) {
265 if (pdacpu(cpu)->sn_last_irq == irq) {
266 for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--)
269 pdacpu(cpu)->sn_last_irq = i;
272 if (pdacpu(cpu)->sn_first_irq == irq) {
273 pdacpu(cpu)->sn_first_irq = 0;
274 for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++)
276 pdacpu(cpu)->sn_first_irq = i;
283 force_polled_int(void)
286 struct sn_intr_list_t *p;
288 for (i=0; i<NR_IRQS;i++) {
292 pcibr_force_interrupt(p->intr);
300 force_interrupt(int irq)
302 struct sn_intr_list_t *p = sn_intr_list[irq];
306 pcibr_force_interrupt(p->intr);
313 Check for lost interrupts. If the PIC int_status reg. says that
314 an interrupt has been sent, but not handled, and the interrupt
315 is not pending in either the cpu irr regs or in the soft irr regs,
316 and the interrupt is not in service, then the interrupt may have
317 been lost. Force an interrupt on that pin. It is possible that
318 the interrupt is in flight, so we may generate a spurious interrupt,
319 but we should never miss a real lost interrupt.
323 sn_check_intr(int irq, pcibr_intr_t intr)
325 unsigned long regval;
328 unsigned long irr_reg;
331 regval = pcireg_intr_status_get(intr->bi_soft);
332 irr_reg_num = irq_to_vector(irq) / 64;
333 irr_bit = irq_to_vector(irq) % 64;
334 switch (irr_reg_num) {
336 irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
339 irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
342 irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
345 irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
348 if (!test_bit(irr_bit, &irr_reg) ) {
349 if (!test_bit(irq, pda->sn_soft_irr) ) {
350 if (!test_bit(irq, pda->sn_in_service_ivecs) ) {
352 if (intr->bi_ibits & regval & intr->bi_last_intr) {
353 regval &= ~(intr->bi_ibits & regval);
354 pcibr_force_interrupt(intr);
359 intr->bi_last_intr = regval;
363 sn_lb_int_war_check(void)
367 if (pda->sn_first_irq == 0) return;
368 for (i=pda->sn_first_irq;
369 i <= pda->sn_last_irq; i++) {
370 struct sn_intr_list_t *p = sn_intr_list[i];
375 sn_check_intr(i, p->intr);