* Copyright (C) 1992 Linus Torvalds
* Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
*
+ * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
+ * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
+ * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
+#include <linux/proc_fs.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/mach/irq.h>
+#include <asm/mach/time.h>
/*
* Maximum IRQ count. Currently, this is arbitary. However, it should
*/
#define MAX_IRQ_CNT 100000
+static int noirqdebug;
static volatile unsigned long irq_err_count;
-static spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(irq_controller_lock);
static LIST_HEAD(irq_pending);
struct irqdesc irq_desc[NR_IRQS];
void (*init_arch_irq)(void) __initdata = NULL;
+/*
+ * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
+ */
+#ifndef irq_finish
+#define irq_finish(irq) do { } while (0)
+#endif
+
/*
* Dummy mask/unmask handler
*/
.disable_depth = 1,
};
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+
+ while (desc->running)
+ barrier();
+}
+EXPORT_SYMBOL(synchronize_irq);
+
+#define smp_set_running(desc) do { desc->running = 1; } while (0)
+#define smp_clear_running(desc) do { desc->running = 0; } while (0)
+#else
+#define smp_set_running(desc) do { } while (0)
+#define smp_clear_running(desc) do { } while (0)
+#endif
+
/**
- * disable_irq - disable an irq and wait for completion
+ * disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Enables and disables
*
* This function may be called from IRQ context.
*/
-void disable_irq(unsigned int irq)
+void disable_irq_nosync(unsigned int irq)
{
struct irqdesc *desc = irq_desc + irq;
unsigned long flags;
list_del_init(&desc->pend);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
+EXPORT_SYMBOL(disable_irq_nosync);
+
+/**
+ * disable_irq - disable an irq and wait for completion
+ * @irq: Interrupt to disable
+ *
+ * Disable the selected interrupt line. Enables and disables
+ * are nested. This functions waits for any pending IRQ
+ * handlers for this interrupt to complete before returning.
+ * If you use this function while holding a resource the IRQ
+ * handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void disable_irq(unsigned int irq)
+{
+ struct irqdesc *desc = irq_desc + irq;
+
+ disable_irq_nosync(irq);
+ if (desc->action)
+ synchronize_irq(irq);
+}
EXPORT_SYMBOL(disable_irq);
/**
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
- if (desc->chip->wake)
- desc->chip->wake(irq, 1);
+ if (desc->chip->set_wake)
+ desc->chip->set_wake(irq, 1);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(enable_irq_wake);
unsigned long flags;
spin_lock_irqsave(&irq_controller_lock, flags);
- if (desc->chip->wake)
- desc->chip->wake(irq, 0);
+ if (desc->chip->set_wake)
+ desc->chip->set_wake(irq, 0);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
EXPORT_SYMBOL(disable_irq_wake);
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *) v;
+ int i = *(loff_t *) v, cpu;
struct irqaction * action;
unsigned long flags;
+ if (i == 0) {
+ char cpuname[12];
+
+ seq_printf(p, " ");
+ for_each_present_cpu(cpu) {
+ sprintf(cpuname, "CPU%d", cpu);
+ seq_printf(p, " %10s", cpuname);
+ }
+ seq_putc(p, '\n');
+ }
+
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_controller_lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
- seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
+ seq_printf(p, "%3d: ", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
} else if (i == NR_IRQS) {
#ifdef CONFIG_ARCH_ACORN
show_fiq_list(p, v);
+#endif
+#ifdef CONFIG_SMP
+ show_ipi_list(p);
+ show_local_irqs(p);
#endif
seq_printf(p, "Err: %10lu\n", irq_err_count);
}
static int count = 100;
struct irqaction *action;
- if (!count)
+ if (noirqdebug)
return;
- count--;
-
if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
+ if (!count)
+ return;
+ count--;
printk("irq%u: bogus retval mask %x\n", irq, ret);
} else {
+ desc->irqs_unhandled++;
+ if (desc->irqs_unhandled <= 99900)
+ return;
+ desc->irqs_unhandled = 0;
printk("irq%u: nobody cared\n", irq);
}
show_regs(regs);
__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
{
unsigned int status;
- int retval = 0;
+ int ret, retval = 0;
spin_unlock(&irq_controller_lock);
+#ifdef CONFIG_NO_IDLE_HZ
+ if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
+ write_seqlock(&xtime_lock);
+ if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
+ system_timer->dyn_tick->handler(irq, 0, regs);
+ write_sequnlock(&xtime_lock);
+ }
+#endif
+
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
status = 0;
do {
- status |= action->flags;
- retval |= action->handler(irq, action->dev_id, regs);
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ retval |= ret;
action = action->next;
} while (action);
do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
struct irqaction *action;
- const int cpu = smp_processor_id();
+ const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
kstat_cpu(cpu).irqs[irq]++;
+ smp_set_running(desc);
+
action = desc->action;
if (action) {
int ret = __do_irq(irq, action, regs);
if (ret != IRQ_HANDLED)
report_bad_irq(irq, regs, desc, ret);
}
+
+ smp_clear_running(desc);
}
/*
void
do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
- const int cpu = smp_processor_id();
+ const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
do {
struct irqaction *action;
- int ret;
action = desc->action;
if (!action)
desc->chip->unmask(irq);
}
- ret = __do_irq(irq, action, regs);
- if (ret != IRQ_HANDLED)
- report_bad_irq(irq, regs, desc, ret);
+ __do_irq(irq, action, regs);
} while (desc->pending && !desc->disable_depth);
desc->running = 0;
do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
{
struct irqaction *action;
- const int cpu = smp_processor_id();
+ const unsigned int cpu = smp_processor_id();
desc->triggered = 1;
if (likely(!desc->disable_depth)) {
kstat_cpu(cpu).irqs[irq]++;
+ smp_set_running(desc);
+
/*
* Return with this interrupt masked if no action
*/
!check_irq_lock(desc, irq, regs)))
desc->chip->unmask(irq);
}
+
+ smp_clear_running(desc);
}
}
list_for_each_safe(l, n, &head) {
desc = list_entry(l, struct irqdesc, pend);
list_del_init(&desc->pend);
- desc->handle(desc - irq_desc, desc, regs);
+ desc_handle_irq(desc - irq_desc, desc, regs);
}
/*
* come via this function. Instead, they should provide their
* own 'handler'
*/
-asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct irqdesc *desc = irq_desc + irq;
irq_enter();
spin_lock(&irq_controller_lock);
- desc->handle(irq, desc, regs);
+ desc_handle_irq(irq, desc, regs);
/*
* Now re-run any pending interrupts.
if (!list_empty(&irq_pending))
do_pending_irqs(regs);
+ irq_finish(irq);
+
spin_unlock(&irq_controller_lock);
irq_exit();
}
}
desc = irq_desc + irq;
- if (desc->chip->type) {
+ if (desc->chip->set_type) {
spin_lock_irqsave(&irq_controller_lock, flags);
- ret = desc->chip->type(irq, type);
+ ret = desc->chip->set_type(irq, type);
spin_unlock_irqrestore(&irq_controller_lock, flags);
}
spin_lock_irqsave(&irq_controller_lock, flags);
p = &desc->action;
if ((old = *p) != NULL) {
- /* Can't share interrupts unless both agree to */
- if (!(old->flags & new->flags & SA_SHIRQ)) {
+ /*
+ * Can't share interrupts unless both agree to and are
+ * the same type.
+ */
+ if (!(old->flags & new->flags & SA_SHIRQ) ||
+ (~old->flags & new->flags) & SA_TRIGGER_MASK) {
spin_unlock_irqrestore(&irq_controller_lock, flags);
return -EBUSY;
}
desc->running = 0;
desc->pending = 0;
desc->disable_depth = 1;
+
+ if (new->flags & SA_TRIGGER_MASK &&
+ desc->chip->set_type) {
+ unsigned int type = new->flags & SA_TRIGGER_MASK;
+ desc->chip->set_type(irq, type);
+ }
+
if (!desc->noautoenable) {
desc->disable_depth = 0;
desc->chip->unmask(irq);
action->handler = handler;
action->flags = irq_flags;
- action->mask = 0;
+ cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
irq_desc[i].probing = 1;
irq_desc[i].triggered = 0;
- if (irq_desc[i].chip->type)
- irq_desc[i].chip->type(i, IRQT_PROBE);
+ if (irq_desc[i].chip->set_type)
+ irq_desc[i].chip->set_type(i, IRQT_PROBE);
irq_desc[i].chip->unmask(i);
irqs += 1;
}
EXPORT_SYMBOL(probe_irq_off);
+#ifdef CONFIG_SMP
+static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
+{
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
+
+ spin_lock_irq(&irq_controller_lock);
+ desc->cpu = cpu;
+ desc->chip->set_cpu(desc, irq, cpu);
+ spin_unlock_irq(&irq_controller_lock);
+}
+
+#ifdef CONFIG_PROC_FS
+static int
+irq_affinity_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct irqdesc *desc = irq_desc + ((int)data);
+ int len = cpumask_scnprintf(page, count, desc->affinity);
+
+ if (count - len < 2)
+ return -EINVAL;
+ page[len++] = '\n';
+ page[len] = '\0';
+
+ return len;
+}
+
+static int
+irq_affinity_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ unsigned int irq = (unsigned int)data;
+ struct irqdesc *desc = irq_desc + irq;
+ cpumask_t affinity, tmp;
+ int ret = -EIO;
+
+ if (!desc->chip->set_cpu)
+ goto out;
+
+ ret = cpumask_parse(buffer, count, affinity);
+ if (ret)
+ goto out;
+
+ cpus_and(tmp, affinity, cpu_online_map);
+ if (cpus_empty(tmp)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ desc->affinity = affinity;
+ route_irq(desc, irq, first_cpu(tmp));
+ ret = count;
+
+ out:
+ return ret;
+}
+#endif
+#endif
+
void __init init_irq_proc(void)
{
+#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
+ struct proc_dir_entry *dir;
+ int irq;
+
+ dir = proc_mkdir("irq", NULL);
+ if (!dir)
+ return;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct proc_dir_entry *entry;
+ struct irqdesc *desc;
+ char name[16];
+
+ desc = irq_desc + irq;
+ memset(name, 0, sizeof(name));
+ snprintf(name, sizeof(name) - 1, "%u", irq);
+
+ desc->procdir = proc_mkdir(name, dir);
+ if (!desc->procdir)
+ continue;
+
+ entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+ }
+#endif
}
void __init init_IRQ(void)
{
struct irqdesc *desc;
- extern void init_dma(void);
int irq;
+#ifdef CONFIG_SMP
+ bad_irq_desc.affinity = CPU_MASK_ALL;
+ bad_irq_desc.cpu = smp_processor_id();
+#endif
+
for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
*desc = bad_irq_desc;
INIT_LIST_HEAD(&desc->pend);
}
init_arch_irq();
- init_dma();
}
+
+static int __init noirqdebug_setup(char *str)
+{
+ noirqdebug = 1;
+ return 1;
+}
+
+__setup("noirqdebug", noirqdebug_setup);
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * The CPU has been marked offline. Migrate IRQs off this CPU. If
+ * the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i, cpu = smp_processor_id();
+
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irqdesc *desc = irq_desc + i;
+
+ if (desc->cpu == cpu) {
+ unsigned int newcpu = any_online_cpu(desc->affinity);
+
+ if (newcpu == NR_CPUS) {
+ if (printk_ratelimit())
+ printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
+ i, cpu);
+
+ cpus_setall(desc->affinity);
+ newcpu = any_online_cpu(desc->affinity);
+ }
+
+ route_irq(desc, i, newcpu);
+ }
+ }
+}
+#endif /* CONFIG_HOTPLUG_CPU */