2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
37 #include <asm/system.h>
38 #include <asm/mach/irq.h>
41 * Maximum IRQ count. Currently, this is arbitary. However, it should
42 * not be set too low to prevent false triggering. Conversely, if it
43 * is set too high, then you could miss a stuck IRQ.
45 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
47 #define MAX_IRQ_CNT 100000
49 static volatile unsigned long irq_err_count;
50 static spinlock_t irq_controller_lock;
51 static LIST_HEAD(irq_pending);
53 struct irqdesc irq_desc[NR_IRQS];
54 void (*init_arch_irq)(void) __initdata = NULL;
57 * Dummy mask/unmask handler
59 void dummy_mask_unmask_irq(unsigned int irq)
63 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
68 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
71 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
74 static struct irqchip bad_chip = {
75 .ack = dummy_mask_unmask_irq,
76 .mask = dummy_mask_unmask_irq,
77 .unmask = dummy_mask_unmask_irq,
80 static struct irqdesc bad_irq_desc = {
83 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
88 * disable_irq - disable an irq and wait for completion
89 * @irq: Interrupt to disable
91 * Disable the selected interrupt line. Enables and disables
92 * are nested. We do this lazily.
94 * This function may be called from IRQ context.
96 void disable_irq(unsigned int irq)
98 struct irqdesc *desc = irq_desc + irq;
101 spin_lock_irqsave(&irq_controller_lock, flags);
102 desc->disable_depth++;
103 list_del_init(&desc->pend);
104 spin_unlock_irqrestore(&irq_controller_lock, flags);
108 * enable_irq - enable interrupt handling on an irq
109 * @irq: Interrupt to enable
111 * Re-enables the processing of interrupts on this IRQ line.
112 * Note that this may call the interrupt handler, so you may
113 * get unexpected results if you hold IRQs disabled.
115 * This function may be called from IRQ context.
117 void enable_irq(unsigned int irq)
119 struct irqdesc *desc = irq_desc + irq;
122 spin_lock_irqsave(&irq_controller_lock, flags);
123 if (unlikely(!desc->disable_depth)) {
124 printk("enable_irq(%u) unbalanced from %p\n", irq,
125 __builtin_return_address(0));
126 } else if (!--desc->disable_depth) {
128 desc->chip->unmask(irq);
131 * If the interrupt is waiting to be processed,
132 * try to re-run it. We can't directly run it
133 * from here since the caller might be in an
134 * interrupt-protected region.
136 if (desc->pending && list_empty(&desc->pend)) {
138 if (!desc->chip->retrigger ||
139 desc->chip->retrigger(irq))
140 list_add(&desc->pend, &irq_pending);
143 spin_unlock_irqrestore(&irq_controller_lock, flags);
147 * Enable wake on selected irq
149 void enable_irq_wake(unsigned int irq)
151 struct irqdesc *desc = irq_desc + irq;
154 spin_lock_irqsave(&irq_controller_lock, flags);
155 if (desc->chip->wake)
156 desc->chip->wake(irq, 1);
157 spin_unlock_irqrestore(&irq_controller_lock, flags);
160 void disable_irq_wake(unsigned int irq)
162 struct irqdesc *desc = irq_desc + irq;
165 spin_lock_irqsave(&irq_controller_lock, flags);
166 if (desc->chip->wake)
167 desc->chip->wake(irq, 0);
168 spin_unlock_irqrestore(&irq_controller_lock, flags);
171 int show_interrupts(struct seq_file *p, void *v)
173 int i = *(loff_t *) v;
174 struct irqaction * action;
178 spin_lock_irqsave(&irq_controller_lock, flags);
179 action = irq_desc[i].action;
183 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
184 seq_printf(p, " %s", action->name);
185 for (action = action->next; action; action = action->next)
186 seq_printf(p, ", %s", action->name);
190 spin_unlock_irqrestore(&irq_controller_lock, flags);
191 } else if (i == NR_IRQS) {
192 #ifdef CONFIG_ARCH_ACORN
195 seq_printf(p, "Err: %10lu\n", irq_err_count);
201 * IRQ lock detection.
203 * Hopefully, this should get us out of a few locked situations.
204 * However, it may take a while for this to happen, since we need
205 * a large number if IRQs to appear in the same jiffie with the
206 * same instruction pointer (or within 2 instructions).
208 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
210 unsigned long instr_ptr = instruction_pointer(regs);
212 if (desc->lck_jif == jiffies &&
213 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
216 if (desc->lck_cnt > MAX_IRQ_CNT) {
217 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
222 desc->lck_pc = instruction_pointer(regs);
223 desc->lck_jif = jiffies;
229 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
231 static int count = 100;
232 struct irqaction *action;
239 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
240 printk("irq%u: bogus retval mask %x\n", irq, ret);
242 printk("irq%u: nobody cared\n", irq);
246 printk(KERN_ERR "handlers:");
247 action = desc->action;
249 printk("\n" KERN_ERR "[<%p>]", action->handler);
250 print_symbol(" (%s)", (unsigned long)action->handler);
251 action = action->next;
257 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
262 spin_unlock(&irq_controller_lock);
264 if (!(action->flags & SA_INTERRUPT))
269 status |= action->flags;
270 retval |= action->handler(irq, action->dev_id, regs);
271 action = action->next;
274 if (status & SA_SAMPLE_RANDOM)
275 add_interrupt_randomness(irq);
277 spin_lock_irq(&irq_controller_lock);
283 * This is for software-decoded IRQs. The caller is expected to
284 * handle the ack, clear, mask and unmask issues.
287 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
289 struct irqaction *action;
290 const int cpu = smp_processor_id();
294 kstat_cpu(cpu).irqs[irq]++;
296 action = desc->action;
298 int ret = __do_irq(irq, action, regs);
299 if (ret != IRQ_HANDLED)
300 report_bad_irq(irq, regs, desc, ret);
305 * Most edge-triggered IRQ implementations seem to take a broken
306 * approach to this. Hence the complexity.
309 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
311 const int cpu = smp_processor_id();
316 * If we're currently running this IRQ, or its disabled,
317 * we shouldn't process the IRQ. Instead, turn on the
320 if (unlikely(desc->running || desc->disable_depth))
324 * Acknowledge and clear the IRQ, but don't mask it.
326 desc->chip->ack(irq);
329 * Mark the IRQ currently in progress.
333 kstat_cpu(cpu).irqs[irq]++;
336 struct irqaction *action;
339 action = desc->action;
343 if (desc->pending && !desc->disable_depth) {
345 desc->chip->unmask(irq);
348 ret = __do_irq(irq, action, regs);
349 if (ret != IRQ_HANDLED)
350 report_bad_irq(irq, regs, desc, ret);
351 } while (desc->pending && !desc->disable_depth);
356 * If we were disabled or freed, shut down the handler.
358 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
363 * We got another IRQ while this one was masked or
364 * currently running. Delay it.
367 desc->chip->mask(irq);
368 desc->chip->ack(irq);
372 * Level-based IRQ handler. Nice and simple.
375 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
377 struct irqaction *action;
378 const int cpu = smp_processor_id();
383 * Acknowledge, clear _AND_ disable the interrupt.
385 desc->chip->ack(irq);
387 if (likely(!desc->disable_depth)) {
388 kstat_cpu(cpu).irqs[irq]++;
391 * Return with this interrupt masked if no action
393 action = desc->action;
395 int ret = __do_irq(irq, desc->action, regs);
397 if (ret != IRQ_HANDLED)
398 report_bad_irq(irq, regs, desc, ret);
400 if (likely(!desc->disable_depth &&
401 !check_irq_lock(desc, irq, regs)))
402 desc->chip->unmask(irq);
407 static void do_pending_irqs(struct pt_regs *regs)
409 struct list_head head, *l, *n;
412 struct irqdesc *desc;
415 * First, take the pending interrupts off the list.
416 * The act of calling the handlers may add some IRQs
417 * back onto the list.
420 INIT_LIST_HEAD(&irq_pending);
421 head.next->prev = &head;
422 head.prev->next = &head;
425 * Now run each entry. We must delete it from our
426 * list before calling the handler.
428 list_for_each_safe(l, n, &head) {
429 desc = list_entry(l, struct irqdesc, pend);
430 list_del_init(&desc->pend);
431 desc->handle(desc - irq_desc, desc, regs);
435 * The list must be empty.
437 BUG_ON(!list_empty(&head));
438 } while (!list_empty(&irq_pending));
442 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
443 * come via this function. Instead, they should provide their
446 asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
448 struct irqdesc *desc = irq_desc + irq;
451 * Some hardware gives randomly wrong interrupts. Rather
452 * than crashing, do something sensible.
455 desc = &bad_irq_desc;
458 spin_lock(&irq_controller_lock);
459 desc->handle(irq, desc, regs);
462 * Now re-run any pending interrupts.
464 if (!list_empty(&irq_pending))
465 do_pending_irqs(regs);
467 spin_unlock(&irq_controller_lock);
471 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
473 struct irqdesc *desc;
476 if (irq >= NR_IRQS) {
477 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
484 desc = irq_desc + irq;
486 if (is_chained && desc->chip == &bad_chip)
487 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
489 spin_lock_irqsave(&irq_controller_lock, flags);
490 if (handle == do_bad_IRQ) {
491 desc->chip->mask(irq);
492 desc->chip->ack(irq);
493 desc->disable_depth = 1;
495 desc->handle = handle;
496 if (handle != do_bad_IRQ && is_chained) {
499 desc->disable_depth = 0;
500 desc->chip->unmask(irq);
502 spin_unlock_irqrestore(&irq_controller_lock, flags);
505 void set_irq_chip(unsigned int irq, struct irqchip *chip)
507 struct irqdesc *desc;
510 if (irq >= NR_IRQS) {
511 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
518 desc = irq_desc + irq;
519 spin_lock_irqsave(&irq_controller_lock, flags);
521 spin_unlock_irqrestore(&irq_controller_lock, flags);
524 int set_irq_type(unsigned int irq, unsigned int type)
526 struct irqdesc *desc;
530 if (irq >= NR_IRQS) {
531 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
535 desc = irq_desc + irq;
536 if (desc->chip->type) {
537 spin_lock_irqsave(&irq_controller_lock, flags);
538 ret = desc->chip->type(irq, type);
539 spin_unlock_irqrestore(&irq_controller_lock, flags);
545 void set_irq_flags(unsigned int irq, unsigned int iflags)
547 struct irqdesc *desc;
550 if (irq >= NR_IRQS) {
551 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
555 desc = irq_desc + irq;
556 spin_lock_irqsave(&irq_controller_lock, flags);
557 desc->valid = (iflags & IRQF_VALID) != 0;
558 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
559 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
560 spin_unlock_irqrestore(&irq_controller_lock, flags);
563 int setup_irq(unsigned int irq, struct irqaction *new)
566 struct irqaction *old, **p;
568 struct irqdesc *desc;
571 * Some drivers like serial.c use request_irq() heavily,
572 * so we have to be careful not to interfere with a
575 if (new->flags & SA_SAMPLE_RANDOM) {
577 * This function might sleep, we want to call it first,
578 * outside of the atomic block.
579 * Yes, this might clear the entropy pool if the wrong
580 * driver is attempted to be loaded, without actually
581 * installing a new handler, but is this really a problem,
582 * only the sysadmin is able to do this.
584 rand_initialize_irq(irq);
588 * The following block of code has to be executed atomically
590 desc = irq_desc + irq;
591 spin_lock_irqsave(&irq_controller_lock, flags);
593 if ((old = *p) != NULL) {
594 /* Can't share interrupts unless both agree to */
595 if (!(old->flags & new->flags & SA_SHIRQ)) {
596 spin_unlock_irqrestore(&irq_controller_lock, flags);
600 /* add new interrupt at end of irq queue */
614 desc->disable_depth = 1;
615 if (!desc->noautoenable) {
616 desc->disable_depth = 0;
617 desc->chip->unmask(irq);
621 spin_unlock_irqrestore(&irq_controller_lock, flags);
626 * request_irq - allocate an interrupt line
627 * @irq: Interrupt line to allocate
628 * @handler: Function to be called when the IRQ occurs
629 * @irqflags: Interrupt type flags
630 * @devname: An ascii name for the claiming device
631 * @dev_id: A cookie passed back to the handler function
633 * This call allocates interrupt resources and enables the
634 * interrupt line and IRQ handling. From the point this
635 * call is made your handler function may be invoked. Since
636 * your handler function must clear any interrupt the board
637 * raises, you must take care both to initialise your hardware
638 * and to set up the interrupt handler in the right order.
640 * Dev_id must be globally unique. Normally the address of the
641 * device data structure is used as the cookie. Since the handler
642 * receives this value it makes sense to use it.
644 * If your interrupt is shared you must pass a non NULL dev_id
645 * as this is required when freeing the interrupt.
649 * SA_SHIRQ Interrupt is shared
651 * SA_INTERRUPT Disable local interrupts while processing
653 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
656 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
657 unsigned long irq_flags, const char * devname, void *dev_id)
659 unsigned long retval;
660 struct irqaction *action;
662 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
663 (irq_flags & SA_SHIRQ && !dev_id))
666 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
670 action->handler = handler;
671 action->flags = irq_flags;
673 action->name = devname;
675 action->dev_id = dev_id;
677 retval = setup_irq(irq, action);
684 EXPORT_SYMBOL(request_irq);
687 * free_irq - free an interrupt
688 * @irq: Interrupt line to free
689 * @dev_id: Device identity to free
691 * Remove an interrupt handler. The handler is removed and if the
692 * interrupt line is no longer in use by any driver it is disabled.
693 * On a shared IRQ the caller must ensure the interrupt is disabled
694 * on the card it drives before calling this function.
696 * This function must not be called from interrupt context.
698 void free_irq(unsigned int irq, void *dev_id)
700 struct irqaction * action, **p;
703 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
704 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
709 spin_lock_irqsave(&irq_controller_lock, flags);
710 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
711 if (action->dev_id != dev_id)
714 /* Found it - now free it */
718 spin_unlock_irqrestore(&irq_controller_lock, flags);
721 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
724 synchronize_irq(irq);
729 EXPORT_SYMBOL(free_irq);
731 static DECLARE_MUTEX(probe_sem);
733 /* Start the interrupt probing. Unlike other architectures,
734 * we don't return a mask of interrupts from probe_irq_on,
735 * but return the number of interrupts enabled for the probe.
736 * The interrupts which have been enabled for probing is
737 * instead recorded in the irq_desc structure.
739 unsigned long probe_irq_on(void)
741 unsigned int i, irqs = 0;
747 * first snaffle up any unassigned but
748 * probe-able interrupts
750 spin_lock_irq(&irq_controller_lock);
751 for (i = 0; i < NR_IRQS; i++) {
752 if (!irq_desc[i].probe_ok || irq_desc[i].action)
755 irq_desc[i].probing = 1;
756 irq_desc[i].triggered = 0;
757 if (irq_desc[i].chip->type)
758 irq_desc[i].chip->type(i, IRQT_PROBE);
759 irq_desc[i].chip->unmask(i);
762 spin_unlock_irq(&irq_controller_lock);
765 * wait for spurious interrupts to mask themselves out again
767 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
768 /* min 100ms delay */;
771 * now filter out any obviously spurious interrupts
773 spin_lock_irq(&irq_controller_lock);
774 for (i = 0; i < NR_IRQS; i++) {
775 if (irq_desc[i].probing && irq_desc[i].triggered) {
776 irq_desc[i].probing = 0;
780 spin_unlock_irq(&irq_controller_lock);
785 EXPORT_SYMBOL(probe_irq_on);
787 unsigned int probe_irq_mask(unsigned long irqs)
789 unsigned int mask = 0, i;
791 spin_lock_irq(&irq_controller_lock);
792 for (i = 0; i < 16 && i < NR_IRQS; i++)
793 if (irq_desc[i].probing && irq_desc[i].triggered)
795 spin_unlock_irq(&irq_controller_lock);
803 * Possible return values:
804 * >= 0 - interrupt number
805 * -1 - no interrupt/many interrupts
807 int probe_irq_off(unsigned long irqs)
810 int irq_found = NO_IRQ;
813 * look at the interrupts, and find exactly one
814 * that we were probing has been triggered
816 spin_lock_irq(&irq_controller_lock);
817 for (i = 0; i < NR_IRQS; i++) {
818 if (irq_desc[i].probing &&
819 irq_desc[i].triggered) {
820 if (irq_found != NO_IRQ) {
831 spin_unlock_irq(&irq_controller_lock);
838 EXPORT_SYMBOL(probe_irq_off);
840 void __init init_irq_proc(void)
844 void __init init_IRQ(void)
846 struct irqdesc *desc;
847 extern void init_dma(void);
850 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
851 *desc = bad_irq_desc;
852 INIT_LIST_HEAD(&desc->pend);