X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fsn%2Fkernel%2Firq.c;h=8d2a1bfbfe7c03f4d4b7e687fbf1a978d5dd1c71;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=88f0bbaed5d75d460c5f1630ccc3e41c35d09861;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 88f0bbaed..8d2a1bfbf 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -5,358 +5,464 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. */ -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include -#include #include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include static void force_interrupt(int irq); -extern void pcibr_force_interrupt(pcibr_intr_t intr); -extern int sn_force_interrupt_flag; -struct irq_desc * sn_irq_desc(unsigned int irq); +static void register_intr_pda(struct sn_irq_info *sn_irq_info); +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); + +int sn_force_interrupt_flag = 1; +extern int sn_ioif_inited; +struct list_head **sn_irq_lh; +static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ + +u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info, + int req_irq, nasid_t req_nasid, + int req_slice) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; -struct sn_intr_list_t { - struct sn_intr_list_t *next; - pcibr_intr_t intr; -}; + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, + (u64) SAL_INTR_ALLOC, (u64) local_nasid, + (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, + (u64) req_nasid, (u64) req_slice); -static struct sn_intr_list_t *sn_intr_list[NR_IRQS]; + return ret_stuff.status; +} +void sn_intr_free(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, + (u64) SAL_INTR_FREE, (u64) local_nasid, + (u64) local_widget, (u64) sn_irq_info->irq_irq, + (u64) sn_irq_info->irq_cookie, 0, 0); +} -static unsigned int -sn_startup_irq(unsigned int irq) +static unsigned int sn_startup_irq(unsigned int irq) { - return(0); + return 0; } -static void -sn_shutdown_irq(unsigned int irq) +static void sn_shutdown_irq(unsigned int irq) { } -static void -sn_disable_irq(unsigned int irq) +static void sn_disable_irq(unsigned int irq) { } -static void -sn_enable_irq(unsigned int irq) +static void sn_enable_irq(unsigned int irq) { } -static void -sn_ack_irq(unsigned int irq) +static void sn_ack_irq(unsigned int irq) { - unsigned long event_occurred, mask = 0; - int nasid; + u64 event_occurred, mask; irq = irq & 0xff; - nasid = smp_physical_node_id(); - event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) ); - if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { - mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT); - } - if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) { - mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT); - } - if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) { - mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT); - } - if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) { - mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT); - } - HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask ); + event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); + mask = event_occurred & SH_ALL_INT_MASK; + HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); + + move_native_irq(irq); } -static void -sn_end_irq(unsigned int irq) +static void sn_end_irq(unsigned int irq) { - int nasid; int ivec; - unsigned long event_occurred; - irq_desc_t *desc = sn_irq_desc(irq); - unsigned int status = desc->status; + u64 event_occurred; ivec = irq & 0xff; if (ivec == SGI_UART_VECTOR) { - nasid = smp_physical_node_id(); - event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) ); - // If the UART bit is set here, we may have received an interrupt from the - // UART that the driver missed. To make sure, we IPI ourselves to force us - // to look again. + event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); + /* If the UART bit is set here, we may have received an + * interrupt from the UART that the driver missed. To + * make sure, we IPI ourselves to force us to look again. + */ if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { - platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0); + platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, + IA64_IPI_DM_INT, 0); } } __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); if (sn_force_interrupt_flag) - if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) - force_interrupt(irq); + force_interrupt(irq); } -static void -sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +static void sn_irq_info_free(struct rcu_head *head); + +struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, + nasid_t nasid, int slice) { + int vector; + int cpuid; #ifdef CONFIG_SMP - int redir = 0; - int cpu; - struct sn_intr_list_t *p = sn_intr_list[irq]; - pcibr_intr_t intr; - extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu); - extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu); - - if (p == NULL) - return; - - intr = p->intr; - - if (intr == NULL) - return; - - cpu = first_cpu(mask); - sn_shub_redirect_intr(intr, cpu); - irq = irq & 0xff; /* strip off redirect bit, if someone stuck it on. */ - (void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir); -#endif /* CONFIG_SMP */ -} + int cpuphys; +#endif + int64_t bridge; + int local_widget, status; + nasid_t local_nasid; + struct sn_irq_info *new_irq_info; + struct sn_pcibus_provider *pci_provider; + + new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); + if (new_irq_info == NULL) + return NULL; + + memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); + + bridge = (u64) new_irq_info->irq_bridge; + if (!bridge) { + kfree(new_irq_info); + return NULL; /* irq is not a device interrupt */ + } + local_nasid = NASID_GET(bridge); -struct hw_interrupt_type irq_type_sn = { - "SN hub", - sn_startup_irq, - sn_shutdown_irq, - sn_enable_irq, - sn_disable_irq, - sn_ack_irq, - sn_end_irq, - sn_set_affinity_irq -}; + if (local_nasid & 1) + local_widget = TIO_SWIN_WIDGETNUM(bridge); + else + local_widget = SWIN_WIDGETNUM(bridge); + vector = sn_irq_info->irq_irq; + /* Free the old PROM new_irq_info structure */ + sn_intr_free(local_nasid, local_widget, new_irq_info); + unregister_intr_pda(new_irq_info); + + /* allocate a new PROM new_irq_info struct */ + status = sn_intr_alloc(local_nasid, local_widget, + new_irq_info, vector, + nasid, slice); + + /* SAL call failed */ + if (status) { + kfree(new_irq_info); + return NULL; + } -struct irq_desc * -sn_irq_desc(unsigned int irq) + /* Update kernels new_irq_info with new target info */ + cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, + new_irq_info->irq_slice); + new_irq_info->irq_cpuid = cpuid; + register_intr_pda(new_irq_info); + + pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; + + /* + * If this represents a line interrupt, target it. If it's + * an msi (irq_int_bit < 0), it's already targeted. + */ + if (new_irq_info->irq_int_bit >= 0 && + pci_provider && pci_provider->target_interrupt) + (pci_provider->target_interrupt)(new_irq_info); + + spin_lock(&sn_irq_info_lock); + list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); + spin_unlock(&sn_irq_info_lock); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + +#ifdef CONFIG_SMP + cpuphys = cpu_physical_id(cpuid); + set_irq_affinity_info((vector & 0xff), cpuphys, 0); +#endif + + return new_irq_info; +} + +static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) { + struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; + nasid_t nasid; + int slice; + + nasid = cpuid_to_nasid(first_cpu(mask)); + slice = cpuid_to_slice(first_cpu(mask)); - irq = SN_IVEC_FROM_IRQ(irq); + list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, + sn_irq_lh[irq], list) + (void)sn_retarget_vector(sn_irq_info, nasid, slice); +} - return(_irq_desc + irq); +static void +sn_mask_irq(unsigned int irq) +{ } -u8 -sn_irq_to_vector(u8 irq) +static void +sn_unmask_irq(unsigned int irq) { - return(irq); } -unsigned int -sn_local_vector_to_irq(u8 vector) +struct irq_chip irq_type_sn = { + .name = "SN hub", + .startup = sn_startup_irq, + .shutdown = sn_shutdown_irq, + .enable = sn_enable_irq, + .disable = sn_disable_irq, + .ack = sn_ack_irq, + .end = sn_end_irq, + .mask = sn_mask_irq, + .unmask = sn_unmask_irq, + .set_affinity = sn_set_affinity_irq +}; + +unsigned int sn_local_vector_to_irq(u8 vector) { return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); } -void -sn_irq_init (void) +void sn_irq_init(void) { int i; - irq_desc_t *base_desc = _irq_desc; + irq_desc_t *base_desc = irq_desc; - for (i=0; ibi_cpu; + int irq = sn_irq_info->irq_irq; + int cpu = sn_irq_info->irq_cpuid; if (pdacpu(cpu)->sn_last_irq < irq) { pdacpu(cpu)->sn_last_irq = irq; } - if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq; - if (!p) panic("Could not allocate memory for sn_intr_list_t\n"); - if ((list = sn_intr_list[irq])) { - while (list->next) list = list->next; - list->next = p; - p->next = NULL; - p->intr = intr; - } else { - sn_intr_list[irq] = p; - p->next = NULL; - p->intr = intr; + + if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) + pdacpu(cpu)->sn_first_irq = irq; +} + +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) +{ + int irq = sn_irq_info->irq_irq; + int cpu = sn_irq_info->irq_cpuid; + struct sn_irq_info *tmp_irq_info; + int i, foundmatch; + + rcu_read_lock(); + if (pdacpu(cpu)->sn_last_irq == irq) { + foundmatch = 0; + for (i = pdacpu(cpu)->sn_last_irq - 1; + i && !foundmatch; i--) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { + if (tmp_irq_info->irq_cpuid == cpu) { + foundmatch = 1; + break; + } + } + } + pdacpu(cpu)->sn_last_irq = i; + } + + if (pdacpu(cpu)->sn_first_irq == irq) { + foundmatch = 0; + for (i = pdacpu(cpu)->sn_first_irq + 1; + i < NR_IRQS && !foundmatch; i++) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { + if (tmp_irq_info->irq_cpuid == cpu) { + foundmatch = 1; + break; + } + } + } + pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); } + rcu_read_unlock(); } -void -unregister_pcibr_intr(int irq, pcibr_intr_t intr) +static void sn_irq_info_free(struct rcu_head *head) { + struct sn_irq_info *sn_irq_info; - struct sn_intr_list_t **prev, *curr; - int cpu = intr->bi_cpu; - int i; + sn_irq_info = container_of(head, struct sn_irq_info, rcu); + kfree(sn_irq_info); +} + +void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) +{ + nasid_t nasid = sn_irq_info->irq_nasid; + int slice = sn_irq_info->irq_slice; + int cpu = nasid_slice_to_cpuid(nasid, slice); +#ifdef CONFIG_SMP + int cpuphys; +#endif + + pci_dev_get(pci_dev); + sn_irq_info->irq_cpuid = cpu; + sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); + + /* link it into the sn_irq[irq] list */ + spin_lock(&sn_irq_info_lock); + list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); + reserve_irq_vector(sn_irq_info->irq_irq); + spin_unlock(&sn_irq_info_lock); + + register_intr_pda(sn_irq_info); +#ifdef CONFIG_SMP + cpuphys = cpu_physical_id(cpu); + set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); +#endif +} - if (sn_intr_list[irq] == NULL) +void sn_irq_unfixup(struct pci_dev *pci_dev) +{ + struct sn_irq_info *sn_irq_info; + + /* Only cleanup IRQ stuff if this device has a host bus context */ + if (!SN_PCIDEV_BUSSOFT(pci_dev)) return; - prev = &sn_intr_list[irq]; - curr = sn_intr_list[irq]; - while (curr) { - if (curr->intr == intr) { - *prev = curr->next; - break; - } - prev = &curr->next; - curr = curr->next; + sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; + if (!sn_irq_info) + return; + if (!sn_irq_info->irq_irq) { + kfree(sn_irq_info); + return; } - if (curr) - kfree(curr); + unregister_intr_pda(sn_irq_info); + spin_lock(&sn_irq_info_lock); + list_del_rcu(&sn_irq_info->list); + spin_unlock(&sn_irq_info_lock); + if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) + free_irq_vector(sn_irq_info->irq_irq); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + pci_dev_put(pci_dev); - if (!sn_intr_list[irq]) { - if (pdacpu(cpu)->sn_last_irq == irq) { - for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) - if (sn_intr_list[i]) - break; - pdacpu(cpu)->sn_last_irq = i; - } +} - if (pdacpu(cpu)->sn_first_irq == irq) { - pdacpu(cpu)->sn_first_irq = 0; - for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) - if (sn_intr_list[i]) - pdacpu(cpu)->sn_first_irq = i; - } - } +static inline void +sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) +{ + struct sn_pcibus_provider *pci_provider; + pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; + if (pci_provider && pci_provider->force_interrupt) + (*pci_provider->force_interrupt)(sn_irq_info); } -void -force_polled_int(void) +static void force_interrupt(int irq) { - int i; - struct sn_intr_list_t *p; + struct sn_irq_info *sn_irq_info; - for (i=0; iintr){ - pcibr_force_interrupt(p->intr); - } - p = p->next; - } - } + if (!sn_ioif_inited) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) + sn_call_force_intr_provider(sn_irq_info); + + rcu_read_unlock(); } -static void -force_interrupt(int irq) +/* + * Check for lost interrupts. If the PIC int_status reg. says that + * an interrupt has been sent, but not handled, and the interrupt + * is not pending in either the cpu irr regs or in the soft irr regs, + * and the interrupt is not in service, then the interrupt may have + * been lost. Force an interrupt on that pin. It is possible that + * the interrupt is in flight, so we may generate a spurious interrupt, + * but we should never miss a real lost interrupt. + */ +static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) { - struct sn_intr_list_t *p = sn_intr_list[irq]; + u64 regval; + struct pcidev_info *pcidev_info; + struct pcibus_info *pcibus_info; + + /* + * Bridge types attached to TIO (anything but PIC) do not need this WAR + * since they do not target Shub II interrupt registers. If that + * ever changes, this check needs to accomodate. + */ + if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) + return; + + pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + if (!pcidev_info) + return; - while (p) { - if (p->intr) { - pcibr_force_interrupt(p->intr); + pcibus_info = + (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> + pdi_pcibus_info; + regval = pcireg_intr_status_get(pcibus_info); + + if (!ia64_get_irr(irq_to_vector(irq))) { + if (!test_bit(irq, pda->sn_in_service_ivecs)) { + regval &= 0xff; + if (sn_irq_info->irq_int_bit & regval & + sn_irq_info->irq_last_intr) { + regval &= ~(sn_irq_info->irq_int_bit & regval); + sn_call_force_intr_provider(sn_irq_info); + } } - p = p->next; } + sn_irq_info->irq_last_intr = regval; } -/* -Check for lost interrupts. If the PIC int_status reg. says that -an interrupt has been sent, but not handled, and the interrupt -is not pending in either the cpu irr regs or in the soft irr regs, -and the interrupt is not in service, then the interrupt may have -been lost. Force an interrupt on that pin. It is possible that -the interrupt is in flight, so we may generate a spurious interrupt, -but we should never miss a real lost interrupt. -*/ - -static void -sn_check_intr(int irq, pcibr_intr_t intr) +void sn_lb_int_war_check(void) { - unsigned long regval; - int irr_reg_num; - int irr_bit; - unsigned long irr_reg; - - - regval = pcireg_intr_status_get(intr->bi_soft); - irr_reg_num = irq_to_vector(irq) / 64; - irr_bit = irq_to_vector(irq) % 64; - switch (irr_reg_num) { - case 0: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR0); - break; - case 1: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR1); - break; - case 2: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR2); - break; - case 3: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR3); - break; - } - if (!test_bit(irr_bit, &irr_reg) ) { - if (!test_bit(irq, pda->sn_soft_irr) ) { - if (!test_bit(irq, pda->sn_in_service_ivecs) ) { - regval &= 0xff; - if (intr->bi_ibits & regval & intr->bi_last_intr) { - regval &= ~(intr->bi_ibits & regval); - pcibr_force_interrupt(intr); - } - } + struct sn_irq_info *sn_irq_info; + int i; + + if (!sn_ioif_inited || pda->sn_first_irq == 0) + return; + + rcu_read_lock(); + for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { + sn_check_intr(i, sn_irq_info); } } - intr->bi_last_intr = regval; + rcu_read_unlock(); } -void -sn_lb_int_war_check(void) +void __init sn_irq_lh_init(void) { int i; - if (pda->sn_first_irq == 0) return; - for (i=pda->sn_first_irq; - i <= pda->sn_last_irq; i++) { - struct sn_intr_list_t *p = sn_intr_list[i]; - if (p == NULL) { - continue; - } - while (p) { - sn_check_intr(i, p->intr); - p = p->next; - } + sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); + if (!sn_irq_lh) + panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); + + for (i = 0; i < NR_IRQS; i++) { + sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!sn_irq_lh[i]) + panic("SN PCI INIT: Failed IRQ memory allocation\n"); + + INIT_LIST_HEAD(sn_irq_lh[i]); } }