2 * linux/arch/arm/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This file contains the code used by various IRQ handling routines:
12 * asking for different IRQ's should be done through these routines
13 * instead of just grabbing them. Thus setups with different IRQ numbers
14 * shouldn't result in any weird surprises, and installing new handlers
17 * IRQ's are in fact implemented a bit like signal handlers for the kernel.
18 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
35 #include <linux/proc_fs.h>
38 #include <asm/system.h>
39 #include <asm/mach/irq.h>
42 * Maximum IRQ count. Currently, this is arbitary. However, it should
43 * not be set too low to prevent false triggering. Conversely, if it
44 * is set too high, then you could miss a stuck IRQ.
46 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
48 #define MAX_IRQ_CNT 100000
50 static int noirqdebug;
51 static volatile unsigned long irq_err_count;
52 static DEFINE_SPINLOCK(irq_controller_lock);
53 static LIST_HEAD(irq_pending);
55 struct irqdesc irq_desc[NR_IRQS];
56 void (*init_arch_irq)(void) __initdata = NULL;
59 * Dummy mask/unmask handler
61 void dummy_mask_unmask_irq(unsigned int irq)
65 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
70 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
73 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
76 static struct irqchip bad_chip = {
77 .ack = dummy_mask_unmask_irq,
78 .mask = dummy_mask_unmask_irq,
79 .unmask = dummy_mask_unmask_irq,
82 static struct irqdesc bad_irq_desc = {
85 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
90 void synchronize_irq(unsigned int irq)
92 struct irqdesc *desc = irq_desc + irq;
97 EXPORT_SYMBOL(synchronize_irq);
99 #define smp_set_running(desc) do { desc->running = 1; } while (0)
100 #define smp_clear_running(desc) do { desc->running = 0; } while (0)
102 #define smp_set_running(desc) do { } while (0)
103 #define smp_clear_running(desc) do { } while (0)
107 * disable_irq_nosync - disable an irq without waiting
108 * @irq: Interrupt to disable
110 * Disable the selected interrupt line. Enables and disables
111 * are nested. We do this lazily.
113 * This function may be called from IRQ context.
115 void disable_irq_nosync(unsigned int irq)
117 struct irqdesc *desc = irq_desc + irq;
120 spin_lock_irqsave(&irq_controller_lock, flags);
121 desc->disable_depth++;
122 list_del_init(&desc->pend);
123 spin_unlock_irqrestore(&irq_controller_lock, flags);
125 EXPORT_SYMBOL(disable_irq_nosync);
128 * disable_irq - disable an irq and wait for completion
129 * @irq: Interrupt to disable
131 * Disable the selected interrupt line. Enables and disables
132 * are nested. This functions waits for any pending IRQ
133 * handlers for this interrupt to complete before returning.
134 * If you use this function while holding a resource the IRQ
135 * handler may need you will deadlock.
137 * This function may be called - with care - from IRQ context.
139 void disable_irq(unsigned int irq)
141 struct irqdesc *desc = irq_desc + irq;
143 disable_irq_nosync(irq);
145 synchronize_irq(irq);
147 EXPORT_SYMBOL(disable_irq);
150 * enable_irq - enable interrupt handling on an irq
151 * @irq: Interrupt to enable
153 * Re-enables the processing of interrupts on this IRQ line.
154 * Note that this may call the interrupt handler, so you may
155 * get unexpected results if you hold IRQs disabled.
157 * This function may be called from IRQ context.
159 void enable_irq(unsigned int irq)
161 struct irqdesc *desc = irq_desc + irq;
164 spin_lock_irqsave(&irq_controller_lock, flags);
165 if (unlikely(!desc->disable_depth)) {
166 printk("enable_irq(%u) unbalanced from %p\n", irq,
167 __builtin_return_address(0));
168 } else if (!--desc->disable_depth) {
170 desc->chip->unmask(irq);
173 * If the interrupt is waiting to be processed,
174 * try to re-run it. We can't directly run it
175 * from here since the caller might be in an
176 * interrupt-protected region.
178 if (desc->pending && list_empty(&desc->pend)) {
180 if (!desc->chip->retrigger ||
181 desc->chip->retrigger(irq))
182 list_add(&desc->pend, &irq_pending);
185 spin_unlock_irqrestore(&irq_controller_lock, flags);
187 EXPORT_SYMBOL(enable_irq);
190 * Enable wake on selected irq
192 void enable_irq_wake(unsigned int irq)
194 struct irqdesc *desc = irq_desc + irq;
197 spin_lock_irqsave(&irq_controller_lock, flags);
198 if (desc->chip->wake)
199 desc->chip->wake(irq, 1);
200 spin_unlock_irqrestore(&irq_controller_lock, flags);
202 EXPORT_SYMBOL(enable_irq_wake);
204 void disable_irq_wake(unsigned int irq)
206 struct irqdesc *desc = irq_desc + irq;
209 spin_lock_irqsave(&irq_controller_lock, flags);
210 if (desc->chip->wake)
211 desc->chip->wake(irq, 0);
212 spin_unlock_irqrestore(&irq_controller_lock, flags);
214 EXPORT_SYMBOL(disable_irq_wake);
216 int show_interrupts(struct seq_file *p, void *v)
218 int i = *(loff_t *) v, cpu;
219 struct irqaction * action;
226 for_each_present_cpu(cpu) {
227 sprintf(cpuname, "CPU%d", cpu);
228 seq_printf(p, " %10s", cpuname);
234 spin_lock_irqsave(&irq_controller_lock, flags);
235 action = irq_desc[i].action;
239 seq_printf(p, "%3d: ", i);
240 for_each_present_cpu(cpu)
241 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
242 seq_printf(p, " %s", action->name);
243 for (action = action->next; action; action = action->next)
244 seq_printf(p, ", %s", action->name);
248 spin_unlock_irqrestore(&irq_controller_lock, flags);
249 } else if (i == NR_IRQS) {
250 #ifdef CONFIG_ARCH_ACORN
256 seq_printf(p, "Err: %10lu\n", irq_err_count);
262 * IRQ lock detection.
264 * Hopefully, this should get us out of a few locked situations.
265 * However, it may take a while for this to happen, since we need
266 * a large number if IRQs to appear in the same jiffie with the
267 * same instruction pointer (or within 2 instructions).
269 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
271 unsigned long instr_ptr = instruction_pointer(regs);
273 if (desc->lck_jif == jiffies &&
274 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
277 if (desc->lck_cnt > MAX_IRQ_CNT) {
278 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
283 desc->lck_pc = instruction_pointer(regs);
284 desc->lck_jif = jiffies;
290 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
292 static int count = 100;
293 struct irqaction *action;
295 if (!count || noirqdebug)
300 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
301 printk("irq%u: bogus retval mask %x\n", irq, ret);
303 printk("irq%u: nobody cared\n", irq);
307 printk(KERN_ERR "handlers:");
308 action = desc->action;
310 printk("\n" KERN_ERR "[<%p>]", action->handler);
311 print_symbol(" (%s)", (unsigned long)action->handler);
312 action = action->next;
318 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
323 spin_unlock(&irq_controller_lock);
325 if (!(action->flags & SA_INTERRUPT))
330 ret = action->handler(irq, action->dev_id, regs);
331 if (ret == IRQ_HANDLED)
332 status |= action->flags;
334 action = action->next;
337 if (status & SA_SAMPLE_RANDOM)
338 add_interrupt_randomness(irq);
340 spin_lock_irq(&irq_controller_lock);
346 * This is for software-decoded IRQs. The caller is expected to
347 * handle the ack, clear, mask and unmask issues.
350 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
352 struct irqaction *action;
353 const unsigned int cpu = smp_processor_id();
357 kstat_cpu(cpu).irqs[irq]++;
359 smp_set_running(desc);
361 action = desc->action;
363 int ret = __do_irq(irq, action, regs);
364 if (ret != IRQ_HANDLED)
365 report_bad_irq(irq, regs, desc, ret);
368 smp_clear_running(desc);
372 * Most edge-triggered IRQ implementations seem to take a broken
373 * approach to this. Hence the complexity.
376 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
378 const unsigned int cpu = smp_processor_id();
383 * If we're currently running this IRQ, or its disabled,
384 * we shouldn't process the IRQ. Instead, turn on the
387 if (unlikely(desc->running || desc->disable_depth))
391 * Acknowledge and clear the IRQ, but don't mask it.
393 desc->chip->ack(irq);
396 * Mark the IRQ currently in progress.
400 kstat_cpu(cpu).irqs[irq]++;
403 struct irqaction *action;
405 action = desc->action;
409 if (desc->pending && !desc->disable_depth) {
411 desc->chip->unmask(irq);
414 __do_irq(irq, action, regs);
415 } while (desc->pending && !desc->disable_depth);
420 * If we were disabled or freed, shut down the handler.
422 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
427 * We got another IRQ while this one was masked or
428 * currently running. Delay it.
431 desc->chip->mask(irq);
432 desc->chip->ack(irq);
436 * Level-based IRQ handler. Nice and simple.
439 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
441 struct irqaction *action;
442 const unsigned int cpu = smp_processor_id();
447 * Acknowledge, clear _AND_ disable the interrupt.
449 desc->chip->ack(irq);
451 if (likely(!desc->disable_depth)) {
452 kstat_cpu(cpu).irqs[irq]++;
454 smp_set_running(desc);
457 * Return with this interrupt masked if no action
459 action = desc->action;
461 int ret = __do_irq(irq, desc->action, regs);
463 if (ret != IRQ_HANDLED)
464 report_bad_irq(irq, regs, desc, ret);
466 if (likely(!desc->disable_depth &&
467 !check_irq_lock(desc, irq, regs)))
468 desc->chip->unmask(irq);
471 smp_clear_running(desc);
475 static void do_pending_irqs(struct pt_regs *regs)
477 struct list_head head, *l, *n;
480 struct irqdesc *desc;
483 * First, take the pending interrupts off the list.
484 * The act of calling the handlers may add some IRQs
485 * back onto the list.
488 INIT_LIST_HEAD(&irq_pending);
489 head.next->prev = &head;
490 head.prev->next = &head;
493 * Now run each entry. We must delete it from our
494 * list before calling the handler.
496 list_for_each_safe(l, n, &head) {
497 desc = list_entry(l, struct irqdesc, pend);
498 list_del_init(&desc->pend);
499 desc->handle(desc - irq_desc, desc, regs);
503 * The list must be empty.
505 BUG_ON(!list_empty(&head));
506 } while (!list_empty(&irq_pending));
510 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
511 * come via this function. Instead, they should provide their
514 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
516 struct irqdesc *desc = irq_desc + irq;
519 * Some hardware gives randomly wrong interrupts. Rather
520 * than crashing, do something sensible.
523 desc = &bad_irq_desc;
526 spin_lock(&irq_controller_lock);
527 desc->handle(irq, desc, regs);
530 * Now re-run any pending interrupts.
532 if (!list_empty(&irq_pending))
533 do_pending_irqs(regs);
535 spin_unlock(&irq_controller_lock);
539 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
541 struct irqdesc *desc;
544 if (irq >= NR_IRQS) {
545 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
552 desc = irq_desc + irq;
554 if (is_chained && desc->chip == &bad_chip)
555 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
557 spin_lock_irqsave(&irq_controller_lock, flags);
558 if (handle == do_bad_IRQ) {
559 desc->chip->mask(irq);
560 desc->chip->ack(irq);
561 desc->disable_depth = 1;
563 desc->handle = handle;
564 if (handle != do_bad_IRQ && is_chained) {
567 desc->disable_depth = 0;
568 desc->chip->unmask(irq);
570 spin_unlock_irqrestore(&irq_controller_lock, flags);
573 void set_irq_chip(unsigned int irq, struct irqchip *chip)
575 struct irqdesc *desc;
578 if (irq >= NR_IRQS) {
579 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
586 desc = irq_desc + irq;
587 spin_lock_irqsave(&irq_controller_lock, flags);
589 spin_unlock_irqrestore(&irq_controller_lock, flags);
592 int set_irq_type(unsigned int irq, unsigned int type)
594 struct irqdesc *desc;
598 if (irq >= NR_IRQS) {
599 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
603 desc = irq_desc + irq;
604 if (desc->chip->type) {
605 spin_lock_irqsave(&irq_controller_lock, flags);
606 ret = desc->chip->type(irq, type);
607 spin_unlock_irqrestore(&irq_controller_lock, flags);
612 EXPORT_SYMBOL(set_irq_type);
614 void set_irq_flags(unsigned int irq, unsigned int iflags)
616 struct irqdesc *desc;
619 if (irq >= NR_IRQS) {
620 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
624 desc = irq_desc + irq;
625 spin_lock_irqsave(&irq_controller_lock, flags);
626 desc->valid = (iflags & IRQF_VALID) != 0;
627 desc->probe_ok = (iflags & IRQF_PROBE) != 0;
628 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
629 spin_unlock_irqrestore(&irq_controller_lock, flags);
632 int setup_irq(unsigned int irq, struct irqaction *new)
635 struct irqaction *old, **p;
637 struct irqdesc *desc;
640 * Some drivers like serial.c use request_irq() heavily,
641 * so we have to be careful not to interfere with a
644 if (new->flags & SA_SAMPLE_RANDOM) {
646 * This function might sleep, we want to call it first,
647 * outside of the atomic block.
648 * Yes, this might clear the entropy pool if the wrong
649 * driver is attempted to be loaded, without actually
650 * installing a new handler, but is this really a problem,
651 * only the sysadmin is able to do this.
653 rand_initialize_irq(irq);
657 * The following block of code has to be executed atomically
659 desc = irq_desc + irq;
660 spin_lock_irqsave(&irq_controller_lock, flags);
662 if ((old = *p) != NULL) {
663 /* Can't share interrupts unless both agree to */
664 if (!(old->flags & new->flags & SA_SHIRQ)) {
665 spin_unlock_irqrestore(&irq_controller_lock, flags);
669 /* add new interrupt at end of irq queue */
683 desc->disable_depth = 1;
684 if (!desc->noautoenable) {
685 desc->disable_depth = 0;
686 desc->chip->unmask(irq);
690 spin_unlock_irqrestore(&irq_controller_lock, flags);
695 * request_irq - allocate an interrupt line
696 * @irq: Interrupt line to allocate
697 * @handler: Function to be called when the IRQ occurs
698 * @irqflags: Interrupt type flags
699 * @devname: An ascii name for the claiming device
700 * @dev_id: A cookie passed back to the handler function
702 * This call allocates interrupt resources and enables the
703 * interrupt line and IRQ handling. From the point this
704 * call is made your handler function may be invoked. Since
705 * your handler function must clear any interrupt the board
706 * raises, you must take care both to initialise your hardware
707 * and to set up the interrupt handler in the right order.
709 * Dev_id must be globally unique. Normally the address of the
710 * device data structure is used as the cookie. Since the handler
711 * receives this value it makes sense to use it.
713 * If your interrupt is shared you must pass a non NULL dev_id
714 * as this is required when freeing the interrupt.
718 * SA_SHIRQ Interrupt is shared
720 * SA_INTERRUPT Disable local interrupts while processing
722 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
725 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
726 unsigned long irq_flags, const char * devname, void *dev_id)
728 unsigned long retval;
729 struct irqaction *action;
731 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
732 (irq_flags & SA_SHIRQ && !dev_id))
735 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
739 action->handler = handler;
740 action->flags = irq_flags;
741 cpus_clear(action->mask);
742 action->name = devname;
744 action->dev_id = dev_id;
746 retval = setup_irq(irq, action);
753 EXPORT_SYMBOL(request_irq);
756 * free_irq - free an interrupt
757 * @irq: Interrupt line to free
758 * @dev_id: Device identity to free
760 * Remove an interrupt handler. The handler is removed and if the
761 * interrupt line is no longer in use by any driver it is disabled.
762 * On a shared IRQ the caller must ensure the interrupt is disabled
763 * on the card it drives before calling this function.
765 * This function must not be called from interrupt context.
767 void free_irq(unsigned int irq, void *dev_id)
769 struct irqaction * action, **p;
772 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
773 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
778 spin_lock_irqsave(&irq_controller_lock, flags);
779 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
780 if (action->dev_id != dev_id)
783 /* Found it - now free it */
787 spin_unlock_irqrestore(&irq_controller_lock, flags);
790 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
793 synchronize_irq(irq);
798 EXPORT_SYMBOL(free_irq);
800 static DECLARE_MUTEX(probe_sem);
802 /* Start the interrupt probing. Unlike other architectures,
803 * we don't return a mask of interrupts from probe_irq_on,
804 * but return the number of interrupts enabled for the probe.
805 * The interrupts which have been enabled for probing is
806 * instead recorded in the irq_desc structure.
808 unsigned long probe_irq_on(void)
810 unsigned int i, irqs = 0;
816 * first snaffle up any unassigned but
817 * probe-able interrupts
819 spin_lock_irq(&irq_controller_lock);
820 for (i = 0; i < NR_IRQS; i++) {
821 if (!irq_desc[i].probe_ok || irq_desc[i].action)
824 irq_desc[i].probing = 1;
825 irq_desc[i].triggered = 0;
826 if (irq_desc[i].chip->type)
827 irq_desc[i].chip->type(i, IRQT_PROBE);
828 irq_desc[i].chip->unmask(i);
831 spin_unlock_irq(&irq_controller_lock);
834 * wait for spurious interrupts to mask themselves out again
836 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
837 /* min 100ms delay */;
840 * now filter out any obviously spurious interrupts
842 spin_lock_irq(&irq_controller_lock);
843 for (i = 0; i < NR_IRQS; i++) {
844 if (irq_desc[i].probing && irq_desc[i].triggered) {
845 irq_desc[i].probing = 0;
849 spin_unlock_irq(&irq_controller_lock);
854 EXPORT_SYMBOL(probe_irq_on);
856 unsigned int probe_irq_mask(unsigned long irqs)
858 unsigned int mask = 0, i;
860 spin_lock_irq(&irq_controller_lock);
861 for (i = 0; i < 16 && i < NR_IRQS; i++)
862 if (irq_desc[i].probing && irq_desc[i].triggered)
864 spin_unlock_irq(&irq_controller_lock);
870 EXPORT_SYMBOL(probe_irq_mask);
873 * Possible return values:
874 * >= 0 - interrupt number
875 * -1 - no interrupt/many interrupts
877 int probe_irq_off(unsigned long irqs)
880 int irq_found = NO_IRQ;
883 * look at the interrupts, and find exactly one
884 * that we were probing has been triggered
886 spin_lock_irq(&irq_controller_lock);
887 for (i = 0; i < NR_IRQS; i++) {
888 if (irq_desc[i].probing &&
889 irq_desc[i].triggered) {
890 if (irq_found != NO_IRQ) {
901 spin_unlock_irq(&irq_controller_lock);
908 EXPORT_SYMBOL(probe_irq_off);
911 static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
913 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
915 spin_lock_irq(&irq_controller_lock);
917 desc->chip->set_cpu(desc, irq, cpu);
918 spin_unlock_irq(&irq_controller_lock);
921 #ifdef CONFIG_PROC_FS
923 irq_affinity_read_proc(char *page, char **start, off_t off, int count,
924 int *eof, void *data)
926 struct irqdesc *desc = irq_desc + ((int)data);
927 int len = cpumask_scnprintf(page, count, desc->affinity);
938 irq_affinity_write_proc(struct file *file, const char __user *buffer,
939 unsigned long count, void *data)
941 unsigned int irq = (unsigned int)data;
942 struct irqdesc *desc = irq_desc + irq;
943 cpumask_t affinity, tmp;
946 if (!desc->chip->set_cpu)
949 ret = cpumask_parse(buffer, count, affinity);
953 cpus_and(tmp, affinity, cpu_online_map);
954 if (cpus_empty(tmp)) {
959 desc->affinity = affinity;
960 route_irq(desc, irq, first_cpu(tmp));
969 void __init init_irq_proc(void)
971 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
972 struct proc_dir_entry *dir;
975 dir = proc_mkdir("irq", 0);
979 for (irq = 0; irq < NR_IRQS; irq++) {
980 struct proc_dir_entry *entry;
981 struct irqdesc *desc;
984 desc = irq_desc + irq;
985 memset(name, 0, sizeof(name));
986 snprintf(name, sizeof(name) - 1, "%u", irq);
988 desc->procdir = proc_mkdir(name, dir);
992 entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
995 entry->data = (void *)irq;
996 entry->read_proc = irq_affinity_read_proc;
997 entry->write_proc = irq_affinity_write_proc;
1003 void __init init_IRQ(void)
1005 struct irqdesc *desc;
1006 extern void init_dma(void);
1010 bad_irq_desc.affinity = CPU_MASK_ALL;
1011 bad_irq_desc.cpu = smp_processor_id();
1014 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
1015 *desc = bad_irq_desc;
1016 INIT_LIST_HEAD(&desc->pend);
1023 static int __init noirqdebug_setup(char *str)
1029 __setup("noirqdebug", noirqdebug_setup);