2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
37 #include <asm/system.h>
38 #include <asm/mach/irq.h>
41 * Maximum IRQ count. Currently, this is arbitary. However, it should
42 * not be set too low to prevent false triggering. Conversely, if it
43 * is set too high, then you could miss a stuck IRQ.
45 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
47 #define MAX_IRQ_CNT 100000
49 static volatile unsigned long irq_err_count;
50 static spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
51 static LIST_HEAD(irq_pending);
53 struct irqdesc irq_desc[NR_IRQS];
54 void (*init_arch_irq)(void) __initdata = NULL;
57 * Dummy mask/unmask handler
59 void dummy_mask_unmask_irq(unsigned int irq)
63 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
68 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
71 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
74 static struct irqchip bad_chip = {
75 .ack = dummy_mask_unmask_irq,
76 .mask = dummy_mask_unmask_irq,
77 .unmask = dummy_mask_unmask_irq,
80 static struct irqdesc bad_irq_desc = {
83 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
88 * disable_irq - disable an irq and wait for completion
89 * @irq: Interrupt to disable
91 * Disable the selected interrupt line. Enables and disables
92 * are nested. We do this lazily.
94 * This function may be called from IRQ context.
96 void disable_irq(unsigned int irq)
98 struct irqdesc *desc = irq_desc + irq;
101 spin_lock_irqsave(&irq_controller_lock, flags);
102 desc->disable_depth++;
103 list_del_init(&desc->pend);
104 spin_unlock_irqrestore(&irq_controller_lock, flags);
106 EXPORT_SYMBOL(disable_irq);
109 * enable_irq - enable interrupt handling on an irq
110 * @irq: Interrupt to enable
112 * Re-enables the processing of interrupts on this IRQ line.
113 * Note that this may call the interrupt handler, so you may
114 * get unexpected results if you hold IRQs disabled.
116 * This function may be called from IRQ context.
118 void enable_irq(unsigned int irq)
120 struct irqdesc *desc = irq_desc + irq;
123 spin_lock_irqsave(&irq_controller_lock, flags);
124 if (unlikely(!desc->disable_depth)) {
125 printk("enable_irq(%u) unbalanced from %p\n", irq,
126 __builtin_return_address(0));
127 } else if (!--desc->disable_depth) {
129 desc->chip->unmask(irq);
132 * If the interrupt is waiting to be processed,
133 * try to re-run it. We can't directly run it
134 * from here since the caller might be in an
135 * interrupt-protected region.
137 if (desc->pending && list_empty(&desc->pend)) {
139 if (!desc->chip->retrigger ||
140 desc->chip->retrigger(irq))
141 list_add(&desc->pend, &irq_pending);
144 spin_unlock_irqrestore(&irq_controller_lock, flags);
146 EXPORT_SYMBOL(enable_irq);
149 * Enable wake on selected irq
151 void enable_irq_wake(unsigned int irq)
153 struct irqdesc *desc = irq_desc + irq;
156 spin_lock_irqsave(&irq_controller_lock, flags);
157 if (desc->chip->wake)
158 desc->chip->wake(irq, 1);
159 spin_unlock_irqrestore(&irq_controller_lock, flags);
161 EXPORT_SYMBOL(enable_irq_wake);
163 void disable_irq_wake(unsigned int irq)
165 struct irqdesc *desc = irq_desc + irq;
168 spin_lock_irqsave(&irq_controller_lock, flags);
169 if (desc->chip->wake)
170 desc->chip->wake(irq, 0);
171 spin_unlock_irqrestore(&irq_controller_lock, flags);
173 EXPORT_SYMBOL(disable_irq_wake);
175 int show_interrupts(struct seq_file *p, void *v)
177 int i = *(loff_t *) v;
178 struct irqaction * action;
182 spin_lock_irqsave(&irq_controller_lock, flags);
183 action = irq_desc[i].action;
187 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
188 seq_printf(p, " %s", action->name);
189 for (action = action->next; action; action = action->next)
190 seq_printf(p, ", %s", action->name);
194 spin_unlock_irqrestore(&irq_controller_lock, flags);
195 } else if (i == NR_IRQS) {
196 #ifdef CONFIG_ARCH_ACORN
199 seq_printf(p, "Err: %10lu\n", irq_err_count);
205 * IRQ lock detection.
207 * Hopefully, this should get us out of a few locked situations.
208 * However, it may take a while for this to happen, since we need
209 * a large number if IRQs to appear in the same jiffie with the
210 * same instruction pointer (or within 2 instructions).
212 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
214 unsigned long instr_ptr = instruction_pointer(regs);
216 if (desc->lck_jif == jiffies &&
217 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
220 if (desc->lck_cnt > MAX_IRQ_CNT) {
221 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
226 desc->lck_pc = instruction_pointer(regs);
227 desc->lck_jif = jiffies;
233 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
235 static int count = 100;
236 struct irqaction *action;
243 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
244 printk("irq%u: bogus retval mask %x\n", irq, ret);
246 printk("irq%u: nobody cared\n", irq);
250 printk(KERN_ERR "handlers:");
251 action = desc->action;
253 printk("\n" KERN_ERR "[<%p>]", action->handler);
254 print_symbol(" (%s)", (unsigned long)action->handler);
255 action = action->next;
261 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
266 spin_unlock(&irq_controller_lock);
268 if (!(action->flags & SA_INTERRUPT))
273 status |= action->flags;
274 retval |= action->handler(irq, action->dev_id, regs);
275 action = action->next;
278 if (status & SA_SAMPLE_RANDOM)
279 add_interrupt_randomness(irq);
281 spin_lock_irq(&irq_controller_lock);
287 * This is for software-decoded IRQs. The caller is expected to
288 * handle the ack, clear, mask and unmask issues.
291 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
293 struct irqaction *action;
294 const int cpu = smp_processor_id();
298 kstat_cpu(cpu).irqs[irq]++;
300 action = desc->action;
302 int ret = __do_irq(irq, action, regs);
303 if (ret != IRQ_HANDLED)
304 report_bad_irq(irq, regs, desc, ret);
309 * Most edge-triggered IRQ implementations seem to take a broken
310 * approach to this. Hence the complexity.
313 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
315 const int cpu = smp_processor_id();
320 * If we're currently running this IRQ, or its disabled,
321 * we shouldn't process the IRQ. Instead, turn on the
324 if (unlikely(desc->running || desc->disable_depth))
328 * Acknowledge and clear the IRQ, but don't mask it.
330 desc->chip->ack(irq);
333 * Mark the IRQ currently in progress.
337 kstat_cpu(cpu).irqs[irq]++;
340 struct irqaction *action;
343 action = desc->action;
347 if (desc->pending && !desc->disable_depth) {
349 desc->chip->unmask(irq);
352 ret = __do_irq(irq, action, regs);
353 if (ret != IRQ_HANDLED)
354 report_bad_irq(irq, regs, desc, ret);
355 } while (desc->pending && !desc->disable_depth);
360 * If we were disabled or freed, shut down the handler.
362 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
367 * We got another IRQ while this one was masked or
368 * currently running. Delay it.
371 desc->chip->mask(irq);
372 desc->chip->ack(irq);
376 * Level-based IRQ handler. Nice and simple.
379 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
381 struct irqaction *action;
382 const int cpu = smp_processor_id();
387 * Acknowledge, clear _AND_ disable the interrupt.
389 desc->chip->ack(irq);
391 if (likely(!desc->disable_depth)) {
392 kstat_cpu(cpu).irqs[irq]++;
395 * Return with this interrupt masked if no action
397 action = desc->action;
399 int ret = __do_irq(irq, desc->action, regs);
401 if (ret != IRQ_HANDLED)
402 report_bad_irq(irq, regs, desc, ret);
404 if (likely(!desc->disable_depth &&
405 !check_irq_lock(desc, irq, regs)))
406 desc->chip->unmask(irq);
411 static void do_pending_irqs(struct pt_regs *regs)
413 struct list_head head, *l, *n;
416 struct irqdesc *desc;
419 * First, take the pending interrupts off the list.
420 * The act of calling the handlers may add some IRQs
421 * back onto the list.
424 INIT_LIST_HEAD(&irq_pending);
425 head.next->prev = &head;
426 head.prev->next = &head;
429 * Now run each entry. We must delete it from our
430 * list before calling the handler.
432 list_for_each_safe(l, n, &head) {
433 desc = list_entry(l, struct irqdesc, pend);
434 list_del_init(&desc->pend);
435 desc->handle(desc - irq_desc, desc, regs);
439 * The list must be empty.
441 BUG_ON(!list_empty(&head));
442 } while (!list_empty(&irq_pending));
446 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
447 * come via this function. Instead, they should provide their
450 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
452 struct irqdesc *desc = irq_desc + irq;
455 * Some hardware gives randomly wrong interrupts. Rather
456 * than crashing, do something sensible.
459 desc = &bad_irq_desc;
462 spin_lock(&irq_controller_lock);
463 desc->handle(irq, desc, regs);
466 * Now re-run any pending interrupts.
468 if (!list_empty(&irq_pending))
469 do_pending_irqs(regs);
471 spin_unlock(&irq_controller_lock);
475 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
477 struct irqdesc *desc;
480 if (irq >= NR_IRQS) {
481 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
488 desc = irq_desc + irq;
490 if (is_chained && desc->chip == &bad_chip)
491 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
493 spin_lock_irqsave(&irq_controller_lock, flags);
494 if (handle == do_bad_IRQ) {
495 desc->chip->mask(irq);
496 desc->chip->ack(irq);
497 desc->disable_depth = 1;
499 desc->handle = handle;
500 if (handle != do_bad_IRQ && is_chained) {
503 desc->disable_depth = 0;
504 desc->chip->unmask(irq);
506 spin_unlock_irqrestore(&irq_controller_lock, flags);
509 void set_irq_chip(unsigned int irq, struct irqchip *chip)
511 struct irqdesc *desc;
514 if (irq >= NR_IRQS) {
515 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
522 desc = irq_desc + irq;
523 spin_lock_irqsave(&irq_controller_lock, flags);
525 spin_unlock_irqrestore(&irq_controller_lock, flags);
528 int set_irq_type(unsigned int irq, unsigned int type)
530 struct irqdesc *desc;
534 if (irq >= NR_IRQS) {
535 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
539 desc = irq_desc + irq;
540 if (desc->chip->type) {
541 spin_lock_irqsave(&irq_controller_lock, flags);
542 ret = desc->chip->type(irq, type);
543 spin_unlock_irqrestore(&irq_controller_lock, flags);
548 EXPORT_SYMBOL(set_irq_type);
550 void set_irq_flags(unsigned int irq, unsigned int iflags)
552 struct irqdesc *desc;
555 if (irq >= NR_IRQS) {
556 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
560 desc = irq_desc + irq;
561 spin_lock_irqsave(&irq_controller_lock, flags);
562 desc->valid = (iflags & IRQF_VALID) != 0;
563 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
564 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
565 spin_unlock_irqrestore(&irq_controller_lock, flags);
568 int setup_irq(unsigned int irq, struct irqaction *new)
571 struct irqaction *old, **p;
573 struct irqdesc *desc;
576 * Some drivers like serial.c use request_irq() heavily,
577 * so we have to be careful not to interfere with a
580 if (new->flags & SA_SAMPLE_RANDOM) {
582 * This function might sleep, we want to call it first,
583 * outside of the atomic block.
584 * Yes, this might clear the entropy pool if the wrong
585 * driver is attempted to be loaded, without actually
586 * installing a new handler, but is this really a problem,
587 * only the sysadmin is able to do this.
589 rand_initialize_irq(irq);
593 * The following block of code has to be executed atomically
595 desc = irq_desc + irq;
596 spin_lock_irqsave(&irq_controller_lock, flags);
598 if ((old = *p) != NULL) {
599 /* Can't share interrupts unless both agree to */
600 if (!(old->flags & new->flags & SA_SHIRQ)) {
601 spin_unlock_irqrestore(&irq_controller_lock, flags);
605 /* add new interrupt at end of irq queue */
619 desc->disable_depth = 1;
620 if (!desc->noautoenable) {
621 desc->disable_depth = 0;
622 desc->chip->unmask(irq);
626 spin_unlock_irqrestore(&irq_controller_lock, flags);
631 * request_irq - allocate an interrupt line
632 * @irq: Interrupt line to allocate
633 * @handler: Function to be called when the IRQ occurs
634 * @irqflags: Interrupt type flags
635 * @devname: An ascii name for the claiming device
636 * @dev_id: A cookie passed back to the handler function
638 * This call allocates interrupt resources and enables the
639 * interrupt line and IRQ handling. From the point this
640 * call is made your handler function may be invoked. Since
641 * your handler function must clear any interrupt the board
642 * raises, you must take care both to initialise your hardware
643 * and to set up the interrupt handler in the right order.
645 * Dev_id must be globally unique. Normally the address of the
646 * device data structure is used as the cookie. Since the handler
647 * receives this value it makes sense to use it.
649 * If your interrupt is shared you must pass a non NULL dev_id
650 * as this is required when freeing the interrupt.
654 * SA_SHIRQ Interrupt is shared
656 * SA_INTERRUPT Disable local interrupts while processing
658 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
661 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
662 unsigned long irq_flags, const char * devname, void *dev_id)
664 unsigned long retval;
665 struct irqaction *action;
667 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
668 (irq_flags & SA_SHIRQ && !dev_id))
671 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
675 action->handler = handler;
676 action->flags = irq_flags;
677 cpus_clear(action->mask);
678 action->name = devname;
680 action->dev_id = dev_id;
682 retval = setup_irq(irq, action);
689 EXPORT_SYMBOL(request_irq);
692 * free_irq - free an interrupt
693 * @irq: Interrupt line to free
694 * @dev_id: Device identity to free
696 * Remove an interrupt handler. The handler is removed and if the
697 * interrupt line is no longer in use by any driver it is disabled.
698 * On a shared IRQ the caller must ensure the interrupt is disabled
699 * on the card it drives before calling this function.
701 * This function must not be called from interrupt context.
703 void free_irq(unsigned int irq, void *dev_id)
705 struct irqaction * action, **p;
708 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
709 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
714 spin_lock_irqsave(&irq_controller_lock, flags);
715 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
716 if (action->dev_id != dev_id)
719 /* Found it - now free it */
723 spin_unlock_irqrestore(&irq_controller_lock, flags);
726 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
729 synchronize_irq(irq);
734 EXPORT_SYMBOL(free_irq);
736 static DECLARE_MUTEX(probe_sem);
738 /* Start the interrupt probing. Unlike other architectures,
739 * we don't return a mask of interrupts from probe_irq_on,
740 * but return the number of interrupts enabled for the probe.
741 * The interrupts which have been enabled for probing is
742 * instead recorded in the irq_desc structure.
744 unsigned long probe_irq_on(void)
746 unsigned int i, irqs = 0;
752 * first snaffle up any unassigned but
753 * probe-able interrupts
755 spin_lock_irq(&irq_controller_lock);
756 for (i = 0; i < NR_IRQS; i++) {
757 if (!irq_desc[i].probe_ok || irq_desc[i].action)
760 irq_desc[i].probing = 1;
761 irq_desc[i].triggered = 0;
762 if (irq_desc[i].chip->type)
763 irq_desc[i].chip->type(i, IRQT_PROBE);
764 irq_desc[i].chip->unmask(i);
767 spin_unlock_irq(&irq_controller_lock);
770 * wait for spurious interrupts to mask themselves out again
772 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
773 /* min 100ms delay */;
776 * now filter out any obviously spurious interrupts
778 spin_lock_irq(&irq_controller_lock);
779 for (i = 0; i < NR_IRQS; i++) {
780 if (irq_desc[i].probing && irq_desc[i].triggered) {
781 irq_desc[i].probing = 0;
785 spin_unlock_irq(&irq_controller_lock);
790 EXPORT_SYMBOL(probe_irq_on);
792 unsigned int probe_irq_mask(unsigned long irqs)
794 unsigned int mask = 0, i;
796 spin_lock_irq(&irq_controller_lock);
797 for (i = 0; i < 16 && i < NR_IRQS; i++)
798 if (irq_desc[i].probing && irq_desc[i].triggered)
800 spin_unlock_irq(&irq_controller_lock);
806 EXPORT_SYMBOL(probe_irq_mask);
809 * Possible return values:
810 * >= 0 - interrupt number
811 * -1 - no interrupt/many interrupts
813 int probe_irq_off(unsigned long irqs)
816 int irq_found = NO_IRQ;
819 * look at the interrupts, and find exactly one
820 * that we were probing has been triggered
822 spin_lock_irq(&irq_controller_lock);
823 for (i = 0; i < NR_IRQS; i++) {
824 if (irq_desc[i].probing &&
825 irq_desc[i].triggered) {
826 if (irq_found != NO_IRQ) {
837 spin_unlock_irq(&irq_controller_lock);
844 EXPORT_SYMBOL(probe_irq_off);
846 void __init init_irq_proc(void)
850 void __init init_IRQ(void)
852 struct irqdesc *desc;
853 extern void init_dma(void);
856 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
857 *desc = bad_irq_desc;
858 INIT_LIST_HEAD(&desc->pend);