2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/kallsyms.h>
26 #include <asm/atomic.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
31 * Controller mappings for all interrupt sources:
33 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
35 .handler = &no_irq_type,
36 .lock = SPIN_LOCK_UNLOCKED
40 static void register_irq_proc (unsigned int irq);
43 * Special irq handlers.
46 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
50 * Generic no controller code
53 static void enable_none(unsigned int irq) { }
54 static unsigned int startup_none(unsigned int irq) { return 0; }
55 static void disable_none(unsigned int irq) { }
56 static void ack_none(unsigned int irq)
59 * 'what should we do if we get a hw irq event on an illegal vector'.
60 * each architecture has to answer this themselves, it doesn't deserve
61 * a generic callback i think.
63 printk("unexpected interrupt %d\n", irq);
66 /* startup is the same as "enable", shutdown is same as "disable" */
67 #define shutdown_none disable_none
68 #define end_none enable_none
70 struct hw_interrupt_type no_irq_type = {
80 atomic_t irq_err_count;
83 * Generic, controller-independent functions:
86 int show_interrupts(struct seq_file *p, void *v)
88 int i = *(loff_t *) v, j;
89 struct irqaction * action;
94 for (j=0; j<NR_CPUS; j++)
96 seq_printf(p, "CPU%d ",j);
101 spin_lock_irqsave(&irq_desc[i].lock, flags);
102 action = irq_desc[i].action;
105 seq_printf(p, "%3d: ",i);
107 seq_printf(p, "%10u ", kstat_irqs(i));
109 for (j = 0; j < NR_CPUS; j++)
111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
113 seq_printf(p, " %14s", irq_desc[i].handler->typename);
114 seq_printf(p, " %s", action->name);
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
121 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) {
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
130 inline void synchronize_irq(unsigned int irq)
132 while (irq_desc[irq].status & IRQ_INPROGRESS)
138 * This should really return information about whether
139 * we should do bottom half handling etc. Right now we
140 * end up _always_ checking the bottom half, which is a
141 * waste of time and is not what some drivers would
144 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
146 int status = 1; /* Force the "do bottom halves" bit */
149 if (!(action->flags & SA_INTERRUPT))
153 status |= action->flags;
154 retval |= action->handler(irq, action->dev_id, regs);
155 action = action->next;
157 if (status & SA_SAMPLE_RANDOM)
158 add_interrupt_randomness(irq);
164 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
166 struct irqaction *action;
168 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
169 printk(KERN_ERR "irq event %d: bogus return value %x\n",
172 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
175 printk(KERN_ERR "handlers:\n");
176 action = desc->action;
178 printk(KERN_ERR "[<%p>]", action->handler);
179 print_symbol(" (%s)",
180 (unsigned long)action->handler);
182 action = action->next;
186 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
188 static int count = 100;
192 __report_bad_irq(irq, desc, action_ret);
196 static int noirqdebug;
198 static int __init noirqdebug_setup(char *str)
201 printk("IRQ lockup detection disabled\n");
205 __setup("noirqdebug", noirqdebug_setup);
208 * If 99,900 of the previous 100,000 interrupts have not been handled then
209 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
212 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
213 * device sharing an IRQ with the failing one)
215 * Called under desc->lock
217 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
219 if (action_ret != IRQ_HANDLED) {
220 desc->irqs_unhandled++;
221 if (action_ret != IRQ_NONE)
222 report_bad_irq(irq, desc, action_ret);
226 if (desc->irq_count < 100000)
230 if (desc->irqs_unhandled > 99900) {
232 * The interrupt is stuck
234 __report_bad_irq(irq, desc, action_ret);
238 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
239 desc->status |= IRQ_DISABLED;
240 desc->handler->disable(irq);
242 desc->irqs_unhandled = 0;
246 * Generic enable/disable code: this just calls
247 * down into the PIC-specific version for the actual
248 * hardware disable after having gotten the irq
253 * disable_irq_nosync - disable an irq without waiting
254 * @irq: Interrupt to disable
256 * Disable the selected interrupt line. Disables of an interrupt
257 * stack. Unlike disable_irq(), this function does not ensure existing
258 * instances of the IRQ handler have completed before returning.
260 * This function may be called from IRQ context.
263 void inline disable_irq_nosync(unsigned int irq)
265 irq_desc_t *desc = irq_desc + irq;
268 spin_lock_irqsave(&desc->lock, flags);
269 if (!desc->depth++) {
270 desc->status |= IRQ_DISABLED;
271 desc->handler->disable(irq);
273 spin_unlock_irqrestore(&desc->lock, flags);
277 * disable_irq - disable an irq and wait for completion
278 * @irq: Interrupt to disable
280 * Disable the selected interrupt line. Disables of an interrupt
281 * stack. That is for two disables you need two enables. This
282 * function waits for any pending IRQ handlers for this interrupt
283 * to complete before returning. If you use this function while
284 * holding a resource the IRQ handler may need you will deadlock.
286 * This function may be called - with care - from IRQ context.
289 void disable_irq(unsigned int irq)
291 irq_desc_t *desc = irq_desc + irq;
292 disable_irq_nosync(irq);
294 synchronize_irq(irq);
298 * enable_irq - enable interrupt handling on an irq
299 * @irq: Interrupt to enable
301 * Re-enables the processing of interrupts on this IRQ line
302 * providing no disable_irq calls are now in effect.
304 * This function may be called from IRQ context.
307 void enable_irq(unsigned int irq)
309 irq_desc_t *desc = irq_desc + irq;
312 spin_lock_irqsave(&desc->lock, flags);
313 switch (desc->depth) {
315 unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
316 desc->status = status;
317 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
318 desc->status = status | IRQ_REPLAY;
319 hw_resend_irq(desc->handler,irq);
321 desc->handler->enable(irq);
328 printk("enable_irq(%u) unbalanced from %p\n", irq,
329 __builtin_return_address(0));
331 spin_unlock_irqrestore(&desc->lock, flags);
335 * do_IRQ handles all normal device IRQ's (the special
336 * SMP cross-CPU interrupts have their own specific
339 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
342 * We ack quickly, we don't want the irq controller
343 * thinking we're snobs just because some other CPU has
344 * disabled global interrupts (we have already done the
345 * INT_ACK cycles, it's too late to try to pretend to the
346 * controller that we aren't taking the interrupt).
348 * 0 return value means that this irq is already being
349 * handled by some other CPU. (or is disabled)
351 irq_desc_t *desc = irq_desc + irq;
352 struct irqaction * action;
356 kstat_this_cpu.irqs[irq]++;
357 spin_lock(&desc->lock);
358 desc->handler->ack(irq);
360 REPLAY is when Linux resends an IRQ that was dropped earlier
361 WAITING is used by probe to mark irqs that are being tested
363 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
364 status |= IRQ_PENDING; /* we _want_ to handle it */
367 * If the IRQ is disabled for whatever reason, we cannot
368 * use the action we have.
371 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
372 action = desc->action;
373 status &= ~IRQ_PENDING; /* we commit to handling */
374 status |= IRQ_INPROGRESS; /* we are handling it */
376 desc->status = status;
379 * If there is no IRQ handler or it was disabled, exit early.
380 Since we set PENDING, if another processor is handling
381 a different instance of this same irq, the other processor
382 will take care of it.
384 if (unlikely(!action))
388 * Edge triggered interrupts need to remember
390 * This applies to any hw interrupts that allow a second
391 * instance of the same irq to arrive while we are in do_IRQ
392 * or in the handler. But the code here only handles the _second_
393 * instance of the irq, not the third or fourth. So it is mostly
394 * useful for irq hardware that does not mask cleanly in an
398 irqreturn_t action_ret;
400 spin_unlock(&desc->lock);
401 action_ret = handle_IRQ_event(irq, regs, action);
402 spin_lock(&desc->lock);
404 note_interrupt(irq, desc, action_ret);
405 if (likely(!(desc->status & IRQ_PENDING)))
407 desc->status &= ~IRQ_PENDING;
409 desc->status &= ~IRQ_INPROGRESS;
413 * The ->end() handler has to deal with interrupts which got
414 * disabled while the handler was running.
416 desc->handler->end(irq);
417 spin_unlock(&desc->lock);
425 * request_irq - allocate an interrupt line
426 * @irq: Interrupt line to allocate
427 * @handler: Function to be called when the IRQ occurs
428 * @irqflags: Interrupt type flags
429 * @devname: An ascii name for the claiming device
430 * @dev_id: A cookie passed back to the handler function
432 * This call allocates interrupt resources and enables the
433 * interrupt line and IRQ handling. From the point this
434 * call is made your handler function may be invoked. Since
435 * your handler function must clear any interrupt the board
436 * raises, you must take care both to initialise your hardware
437 * and to set up the interrupt handler in the right order.
439 * Dev_id must be globally unique. Normally the address of the
440 * device data structure is used as the cookie. Since the handler
441 * receives this value it makes sense to use it.
443 * If your interrupt is shared you must pass a non NULL dev_id
444 * as this is required when freeing the interrupt.
448 * SA_SHIRQ Interrupt is shared
450 * SA_INTERRUPT Disable local interrupts while processing
452 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
456 int request_irq(unsigned int irq,
457 irqreturn_t (*handler)(int, void *, struct pt_regs *),
458 unsigned long irqflags,
459 const char * devname,
463 struct irqaction * action;
467 * Sanity-check: shared interrupts should REALLY pass in
468 * a real dev-ID, otherwise we'll have trouble later trying
469 * to figure out which interrupt is which (messes up the
470 * interrupt freeing logic etc).
472 if (irqflags & SA_SHIRQ) {
474 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
483 action = (struct irqaction *)
484 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
488 action->handler = handler;
489 action->flags = irqflags;
491 action->name = devname;
493 action->dev_id = dev_id;
495 retval = setup_irq(irq, action);
501 EXPORT_SYMBOL(request_irq);
504 * free_irq - free an interrupt
505 * @irq: Interrupt line to free
506 * @dev_id: Device identity to free
508 * Remove an interrupt handler. The handler is removed and if the
509 * interrupt line is no longer in use by any driver it is disabled.
510 * On a shared IRQ the caller must ensure the interrupt is disabled
511 * on the card it drives before calling this function. The function
512 * does not return until any executing interrupts for this IRQ
515 * This function must not be called from interrupt context.
518 void free_irq(unsigned int irq, void *dev_id)
521 struct irqaction **p;
527 desc = irq_desc + irq;
528 spin_lock_irqsave(&desc->lock,flags);
531 struct irqaction * action = *p;
533 struct irqaction **pp = p;
535 if (action->dev_id != dev_id)
538 /* Found it - now remove it from the list of entries */
541 desc->status |= IRQ_DISABLED;
542 desc->handler->shutdown(irq);
544 spin_unlock_irqrestore(&desc->lock,flags);
546 /* Wait to make sure it's not being used on another CPU */
547 synchronize_irq(irq);
551 printk("Trying to free free IRQ%d\n",irq);
552 spin_unlock_irqrestore(&desc->lock,flags);
557 EXPORT_SYMBOL(free_irq);
560 * IRQ autodetection code..
562 * This depends on the fact that any interrupt that
563 * comes in on to an unassigned handler will get stuck
564 * with "IRQ_WAITING" cleared and the interrupt
568 static DECLARE_MUTEX(probe_sem);
571 * probe_irq_on - begin an interrupt autodetect
573 * Commence probing for an interrupt. The interrupts are scanned
574 * and a mask of potential interrupt lines is returned.
578 unsigned long probe_irq_on(void)
587 * something may have generated an irq long ago and we want to
588 * flush such a longstanding irq before considering it as spurious.
590 for (i = NR_IRQS-1; i > 0; i--) {
593 spin_lock_irq(&desc->lock);
594 if (!irq_desc[i].action)
595 irq_desc[i].handler->startup(i);
596 spin_unlock_irq(&desc->lock);
599 /* Wait for longstanding interrupts to trigger. */
600 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
601 /* about 20ms delay */ barrier();
604 * enable any unassigned irqs
605 * (we must startup again here because if a longstanding irq
606 * happened in the previous stage, it may have masked itself)
608 for (i = NR_IRQS-1; i > 0; i--) {
611 spin_lock_irq(&desc->lock);
613 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
614 if (desc->handler->startup(i))
615 desc->status |= IRQ_PENDING;
617 spin_unlock_irq(&desc->lock);
621 * Wait for spurious interrupts to trigger
623 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
624 /* about 100ms delay */ barrier();
627 * Now filter out any obviously spurious interrupts
630 for (i = 0; i < NR_IRQS; i++) {
631 irq_desc_t *desc = irq_desc + i;
634 spin_lock_irq(&desc->lock);
635 status = desc->status;
637 if (status & IRQ_AUTODETECT) {
638 /* It triggered already - consider it spurious. */
639 if (!(status & IRQ_WAITING)) {
640 desc->status = status & ~IRQ_AUTODETECT;
641 desc->handler->shutdown(i);
646 spin_unlock_irq(&desc->lock);
652 EXPORT_SYMBOL(probe_irq_on);
655 * Return a mask of triggered interrupts (this
656 * can handle only legacy ISA interrupts).
660 * probe_irq_mask - scan a bitmap of interrupt lines
661 * @val: mask of interrupts to consider
663 * Scan the ISA bus interrupt lines and return a bitmap of
664 * active interrupts. The interrupt probe logic state is then
665 * returned to its previous value.
667 * Note: we need to scan all the irq's even though we will
668 * only return ISA irq numbers - just so that we reset them
669 * all to a known state.
671 unsigned int probe_irq_mask(unsigned long val)
677 for (i = 0; i < NR_IRQS; i++) {
678 irq_desc_t *desc = irq_desc + i;
681 spin_lock_irq(&desc->lock);
682 status = desc->status;
684 if (status & IRQ_AUTODETECT) {
685 if (i < 16 && !(status & IRQ_WAITING))
688 desc->status = status & ~IRQ_AUTODETECT;
689 desc->handler->shutdown(i);
691 spin_unlock_irq(&desc->lock);
699 * Return the one interrupt that triggered (this can
700 * handle any interrupt source).
704 * probe_irq_off - end an interrupt autodetect
705 * @val: mask of potential interrupts (unused)
707 * Scans the unused interrupt lines and returns the line which
708 * appears to have triggered the interrupt. If no interrupt was
709 * found then zero is returned. If more than one interrupt is
710 * found then minus the first candidate is returned to indicate
713 * The interrupt probe logic state is returned to its previous
716 * BUGS: When used in a module (which arguably shouldnt happen)
717 * nothing prevents two IRQ probe callers from overlapping. The
718 * results of this are non-optimal.
721 int probe_irq_off(unsigned long val)
723 int i, irq_found, nr_irqs;
727 for (i = 0; i < NR_IRQS; i++) {
728 irq_desc_t *desc = irq_desc + i;
731 spin_lock_irq(&desc->lock);
732 status = desc->status;
734 if (status & IRQ_AUTODETECT) {
735 if (!(status & IRQ_WAITING)) {
740 desc->status = status & ~IRQ_AUTODETECT;
741 desc->handler->shutdown(i);
743 spin_unlock_irq(&desc->lock);
748 irq_found = -irq_found;
752 EXPORT_SYMBOL(probe_irq_off);
754 /* this was setup_x86_irq but it seems pretty generic */
755 int setup_irq(unsigned int irq, struct irqaction * new)
759 struct irqaction *old, **p;
760 irq_desc_t *desc = irq_desc + irq;
763 * Some drivers like serial.c use request_irq() heavily,
764 * so we have to be careful not to interfere with a
767 if (new->flags & SA_SAMPLE_RANDOM) {
769 * This function might sleep, we want to call it first,
770 * outside of the atomic block.
771 * Yes, this might clear the entropy pool if the wrong
772 * driver is attempted to be loaded, without actually
773 * installing a new handler, but is this really a problem,
774 * only the sysadmin is able to do this.
776 rand_initialize_irq(irq);
780 * The following block of code has to be executed atomically
782 spin_lock_irqsave(&desc->lock,flags);
784 if ((old = *p) != NULL) {
785 /* Can't share interrupts unless both agree to */
786 if (!(old->flags & new->flags & SA_SHIRQ)) {
787 spin_unlock_irqrestore(&desc->lock,flags);
791 /* add new interrupt at end of irq queue */
803 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
804 desc->handler->startup(irq);
806 spin_unlock_irqrestore(&desc->lock,flags);
808 register_irq_proc(irq);
812 void __init init_generic_irq(void)
816 for (i = 0; i < NR_IRQS; i++) {
817 irq_desc[i].status = IRQ_DISABLED;
818 irq_desc[i].action = NULL;
819 irq_desc[i].depth = 1;
820 irq_desc[i].handler = &no_irq_type;
824 EXPORT_SYMBOL(disable_irq_nosync);
825 EXPORT_SYMBOL(disable_irq);
826 EXPORT_SYMBOL(enable_irq);
827 EXPORT_SYMBOL(probe_irq_mask);
829 static struct proc_dir_entry * root_irq_dir;
830 static struct proc_dir_entry * irq_dir [NR_IRQS];
834 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
836 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
837 static int irq_affinity_read_proc (char *page, char **start, off_t off,
838 int count, int *eof, void *data)
840 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
843 len += sprintf(page + len, "\n");
847 static int irq_affinity_write_proc (struct file *file, const char *buffer,
848 unsigned long count, void *data)
850 int irq = (long) data, full_count = count, err;
851 cpumask_t new_value, tmp;
853 if (!irq_desc[irq].handler->set_affinity)
856 err = cpumask_parse(buffer, count, new_value);
859 * Do not allow disabling IRQs completely - it's a too easy
860 * way to make the system unusable accidentally :-) At least
861 * one online CPU still has to be targeted.
863 cpus_and(tmp, new_value, cpu_online_map);
867 irq_affinity[irq] = new_value;
868 irq_desc[irq].handler->set_affinity(irq, new_value);
875 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
876 int count, int *eof, void *data)
878 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
881 len += sprintf(page + len, "\n");
885 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
886 unsigned long count, void *data)
888 cpumask_t *mask = (cpumask_t *)data, new_value;
889 unsigned long full_count = count, err;
891 err = cpumask_parse(buffer, count, new_value);
899 #define MAX_NAMELEN 10
901 static void register_irq_proc (unsigned int irq)
903 char name [MAX_NAMELEN];
905 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
909 memset(name, 0, MAX_NAMELEN);
910 sprintf(name, "%d", irq);
912 /* create /proc/irq/1234 */
913 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
917 struct proc_dir_entry *entry;
919 /* create /proc/irq/1234/smp_affinity */
920 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
924 entry->data = (void *)(long)irq;
925 entry->read_proc = irq_affinity_read_proc;
926 entry->write_proc = irq_affinity_write_proc;
929 smp_affinity_entry[irq] = entry;
934 unsigned long prof_cpu_mask = -1;
936 void init_irq_proc (void)
938 struct proc_dir_entry *entry;
941 /* create /proc/irq */
942 root_irq_dir = proc_mkdir("irq", 0);
944 /* create /proc/irq/prof_cpu_mask */
945 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
951 entry->data = (void *)&prof_cpu_mask;
952 entry->read_proc = prof_cpu_mask_read_proc;
953 entry->write_proc = prof_cpu_mask_write_proc;
956 * Create entries for all existing IRQs.
958 for (i = 0; i < NR_IRQS; i++)
959 register_irq_proc(i);