2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/kallsyms.h>
26 #include <asm/atomic.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
31 * Controller mappings for all interrupt sources:
33 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
35 .handler = &no_irq_type,
36 .lock = SPIN_LOCK_UNLOCKED
40 static void register_irq_proc (unsigned int irq);
43 * Special irq handlers.
46 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
50 * Generic no controller code
53 static void enable_none(unsigned int irq) { }
54 static unsigned int startup_none(unsigned int irq) { return 0; }
55 static void disable_none(unsigned int irq) { }
56 static void ack_none(unsigned int irq)
59 * 'what should we do if we get a hw irq event on an illegal vector'.
60 * each architecture has to answer this themselves, it doesn't deserve
61 * a generic callback i think.
63 printk("unexpected interrupt %d\n", irq);
66 /* startup is the same as "enable", shutdown is same as "disable" */
67 #define shutdown_none disable_none
68 #define end_none enable_none
70 struct hw_interrupt_type no_irq_type = {
80 atomic_t irq_err_count;
83 * Generic, controller-independent functions:
86 int show_interrupts(struct seq_file *p, void *v)
88 int i = *(loff_t *) v, j;
89 struct irqaction * action;
94 for (j=0; j<NR_CPUS; j++)
96 seq_printf(p, "CPU%d ",j);
101 spin_lock_irqsave(&irq_desc[i].lock, flags);
102 action = irq_desc[i].action;
105 seq_printf(p, "%3d: ",i);
107 seq_printf(p, "%10u ", kstat_irqs(i));
109 for (j = 0; j < NR_CPUS; j++)
111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
113 seq_printf(p, " %14s", irq_desc[i].handler->typename);
114 seq_printf(p, " %s", action->name);
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
121 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) {
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
130 inline void synchronize_irq(unsigned int irq)
132 while (irq_desc[irq].status & IRQ_INPROGRESS)
138 * This should really return information about whether
139 * we should do bottom half handling etc. Right now we
140 * end up _always_ checking the bottom half, which is a
141 * waste of time and is not what some drivers would
144 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
146 int status = 1; /* Force the "do bottom halves" bit */
149 if (!(action->flags & SA_INTERRUPT))
153 ret = action->handler(irq, action->dev_id, regs);
154 if (ret == IRQ_HANDLED)
155 status |= action->flags;
157 action = action->next;
159 if (status & SA_SAMPLE_RANDOM)
160 add_interrupt_randomness(irq);
166 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
168 struct irqaction *action;
170 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
171 printk(KERN_ERR "irq event %d: bogus return value %x\n",
174 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
177 printk(KERN_ERR "handlers:\n");
178 action = desc->action;
180 printk(KERN_ERR "[<%p>]", action->handler);
181 print_symbol(" (%s)",
182 (unsigned long)action->handler);
184 action = action->next;
188 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
190 static int count = 100;
194 __report_bad_irq(irq, desc, action_ret);
198 static int noirqdebug;
200 static int __init noirqdebug_setup(char *str)
203 printk("IRQ lockup detection disabled\n");
207 __setup("noirqdebug", noirqdebug_setup);
210 * If 99,900 of the previous 100,000 interrupts have not been handled then
211 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
214 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
215 * device sharing an IRQ with the failing one)
217 * Called under desc->lock
219 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
221 if (action_ret != IRQ_HANDLED) {
222 desc->irqs_unhandled++;
223 if (action_ret != IRQ_NONE)
224 report_bad_irq(irq, desc, action_ret);
228 if (desc->irq_count < 100000)
232 if (desc->irqs_unhandled > 99900) {
234 * The interrupt is stuck
236 __report_bad_irq(irq, desc, action_ret);
240 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
241 desc->status |= IRQ_DISABLED;
242 desc->handler->disable(irq);
244 desc->irqs_unhandled = 0;
248 * Generic enable/disable code: this just calls
249 * down into the PIC-specific version for the actual
250 * hardware disable after having gotten the irq
255 * disable_irq_nosync - disable an irq without waiting
256 * @irq: Interrupt to disable
258 * Disable the selected interrupt line. Disables of an interrupt
259 * stack. Unlike disable_irq(), this function does not ensure existing
260 * instances of the IRQ handler have completed before returning.
262 * This function may be called from IRQ context.
265 void inline disable_irq_nosync(unsigned int irq)
267 irq_desc_t *desc = irq_desc + irq;
270 spin_lock_irqsave(&desc->lock, flags);
271 if (!desc->depth++) {
272 desc->status |= IRQ_DISABLED;
273 desc->handler->disable(irq);
275 spin_unlock_irqrestore(&desc->lock, flags);
279 * disable_irq - disable an irq and wait for completion
280 * @irq: Interrupt to disable
282 * Disable the selected interrupt line. Disables of an interrupt
283 * stack. That is for two disables you need two enables. This
284 * function waits for any pending IRQ handlers for this interrupt
285 * to complete before returning. If you use this function while
286 * holding a resource the IRQ handler may need you will deadlock.
288 * This function may be called - with care - from IRQ context.
291 void disable_irq(unsigned int irq)
293 irq_desc_t *desc = irq_desc + irq;
294 disable_irq_nosync(irq);
296 synchronize_irq(irq);
300 * enable_irq - enable interrupt handling on an irq
301 * @irq: Interrupt to enable
303 * Re-enables the processing of interrupts on this IRQ line
304 * providing no disable_irq calls are now in effect.
306 * This function may be called from IRQ context.
309 void enable_irq(unsigned int irq)
311 irq_desc_t *desc = irq_desc + irq;
314 spin_lock_irqsave(&desc->lock, flags);
315 switch (desc->depth) {
317 unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
318 desc->status = status;
319 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
320 desc->status = status | IRQ_REPLAY;
321 hw_resend_irq(desc->handler,irq);
323 desc->handler->enable(irq);
330 printk("enable_irq(%u) unbalanced from %p\n", irq,
331 __builtin_return_address(0));
333 spin_unlock_irqrestore(&desc->lock, flags);
337 * do_IRQ handles all normal device IRQ's (the special
338 * SMP cross-CPU interrupts have their own specific
341 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
344 * We ack quickly, we don't want the irq controller
345 * thinking we're snobs just because some other CPU has
346 * disabled global interrupts (we have already done the
347 * INT_ACK cycles, it's too late to try to pretend to the
348 * controller that we aren't taking the interrupt).
350 * 0 return value means that this irq is already being
351 * handled by some other CPU. (or is disabled)
353 irq_desc_t *desc = irq_desc + irq;
354 struct irqaction * action;
358 kstat_this_cpu.irqs[irq]++;
359 spin_lock(&desc->lock);
360 desc->handler->ack(irq);
362 REPLAY is when Linux resends an IRQ that was dropped earlier
363 WAITING is used by probe to mark irqs that are being tested
365 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
366 status |= IRQ_PENDING; /* we _want_ to handle it */
369 * If the IRQ is disabled for whatever reason, we cannot
370 * use the action we have.
373 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
374 action = desc->action;
375 status &= ~IRQ_PENDING; /* we commit to handling */
376 status |= IRQ_INPROGRESS; /* we are handling it */
378 desc->status = status;
381 * If there is no IRQ handler or it was disabled, exit early.
382 Since we set PENDING, if another processor is handling
383 a different instance of this same irq, the other processor
384 will take care of it.
386 if (unlikely(!action))
390 * Edge triggered interrupts need to remember
392 * This applies to any hw interrupts that allow a second
393 * instance of the same irq to arrive while we are in do_IRQ
394 * or in the handler. But the code here only handles the _second_
395 * instance of the irq, not the third or fourth. So it is mostly
396 * useful for irq hardware that does not mask cleanly in an
400 irqreturn_t action_ret;
402 spin_unlock(&desc->lock);
403 action_ret = handle_IRQ_event(irq, regs, action);
404 spin_lock(&desc->lock);
406 note_interrupt(irq, desc, action_ret);
407 if (likely(!(desc->status & IRQ_PENDING)))
409 desc->status &= ~IRQ_PENDING;
411 desc->status &= ~IRQ_INPROGRESS;
415 * The ->end() handler has to deal with interrupts which got
416 * disabled while the handler was running.
418 desc->handler->end(irq);
419 spin_unlock(&desc->lock);
427 * request_irq - allocate an interrupt line
428 * @irq: Interrupt line to allocate
429 * @handler: Function to be called when the IRQ occurs
430 * @irqflags: Interrupt type flags
431 * @devname: An ascii name for the claiming device
432 * @dev_id: A cookie passed back to the handler function
434 * This call allocates interrupt resources and enables the
435 * interrupt line and IRQ handling. From the point this
436 * call is made your handler function may be invoked. Since
437 * your handler function must clear any interrupt the board
438 * raises, you must take care both to initialise your hardware
439 * and to set up the interrupt handler in the right order.
441 * Dev_id must be globally unique. Normally the address of the
442 * device data structure is used as the cookie. Since the handler
443 * receives this value it makes sense to use it.
445 * If your interrupt is shared you must pass a non NULL dev_id
446 * as this is required when freeing the interrupt.
450 * SA_SHIRQ Interrupt is shared
452 * SA_INTERRUPT Disable local interrupts while processing
454 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
458 int request_irq(unsigned int irq,
459 irqreturn_t (*handler)(int, void *, struct pt_regs *),
460 unsigned long irqflags,
461 const char * devname,
465 struct irqaction * action;
469 * Sanity-check: shared interrupts should REALLY pass in
470 * a real dev-ID, otherwise we'll have trouble later trying
471 * to figure out which interrupt is which (messes up the
472 * interrupt freeing logic etc).
474 if (irqflags & SA_SHIRQ) {
476 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
485 action = (struct irqaction *)
486 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
490 action->handler = handler;
491 action->flags = irqflags;
492 cpus_clear(action->mask);
493 action->name = devname;
495 action->dev_id = dev_id;
497 retval = setup_irq(irq, action);
503 EXPORT_SYMBOL(request_irq);
506 * free_irq - free an interrupt
507 * @irq: Interrupt line to free
508 * @dev_id: Device identity to free
510 * Remove an interrupt handler. The handler is removed and if the
511 * interrupt line is no longer in use by any driver it is disabled.
512 * On a shared IRQ the caller must ensure the interrupt is disabled
513 * on the card it drives before calling this function. The function
514 * does not return until any executing interrupts for this IRQ
517 * This function must not be called from interrupt context.
520 void free_irq(unsigned int irq, void *dev_id)
523 struct irqaction **p;
529 desc = irq_desc + irq;
530 spin_lock_irqsave(&desc->lock,flags);
533 struct irqaction * action = *p;
535 struct irqaction **pp = p;
537 if (action->dev_id != dev_id)
540 /* Found it - now remove it from the list of entries */
543 desc->status |= IRQ_DISABLED;
544 desc->handler->shutdown(irq);
546 spin_unlock_irqrestore(&desc->lock,flags);
548 /* Wait to make sure it's not being used on another CPU */
549 synchronize_irq(irq);
553 printk("Trying to free free IRQ%d\n",irq);
554 spin_unlock_irqrestore(&desc->lock,flags);
559 EXPORT_SYMBOL(free_irq);
562 * IRQ autodetection code..
564 * This depends on the fact that any interrupt that
565 * comes in on to an unassigned handler will get stuck
566 * with "IRQ_WAITING" cleared and the interrupt
570 static DECLARE_MUTEX(probe_sem);
573 * probe_irq_on - begin an interrupt autodetect
575 * Commence probing for an interrupt. The interrupts are scanned
576 * and a mask of potential interrupt lines is returned.
580 unsigned long probe_irq_on(void)
589 * something may have generated an irq long ago and we want to
590 * flush such a longstanding irq before considering it as spurious.
592 for (i = NR_IRQS-1; i > 0; i--) {
595 spin_lock_irq(&desc->lock);
596 if (!irq_desc[i].action)
597 irq_desc[i].handler->startup(i);
598 spin_unlock_irq(&desc->lock);
601 /* Wait for longstanding interrupts to trigger. */
602 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
603 /* about 20ms delay */ barrier();
606 * enable any unassigned irqs
607 * (we must startup again here because if a longstanding irq
608 * happened in the previous stage, it may have masked itself)
610 for (i = NR_IRQS-1; i > 0; i--) {
613 spin_lock_irq(&desc->lock);
615 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
616 if (desc->handler->startup(i))
617 desc->status |= IRQ_PENDING;
619 spin_unlock_irq(&desc->lock);
623 * Wait for spurious interrupts to trigger
625 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
626 /* about 100ms delay */ barrier();
629 * Now filter out any obviously spurious interrupts
632 for (i = 0; i < NR_IRQS; i++) {
633 irq_desc_t *desc = irq_desc + i;
636 spin_lock_irq(&desc->lock);
637 status = desc->status;
639 if (status & IRQ_AUTODETECT) {
640 /* It triggered already - consider it spurious. */
641 if (!(status & IRQ_WAITING)) {
642 desc->status = status & ~IRQ_AUTODETECT;
643 desc->handler->shutdown(i);
648 spin_unlock_irq(&desc->lock);
654 EXPORT_SYMBOL(probe_irq_on);
657 * Return a mask of triggered interrupts (this
658 * can handle only legacy ISA interrupts).
662 * probe_irq_mask - scan a bitmap of interrupt lines
663 * @val: mask of interrupts to consider
665 * Scan the ISA bus interrupt lines and return a bitmap of
666 * active interrupts. The interrupt probe logic state is then
667 * returned to its previous value.
669 * Note: we need to scan all the irq's even though we will
670 * only return ISA irq numbers - just so that we reset them
671 * all to a known state.
673 unsigned int probe_irq_mask(unsigned long val)
679 for (i = 0; i < NR_IRQS; i++) {
680 irq_desc_t *desc = irq_desc + i;
683 spin_lock_irq(&desc->lock);
684 status = desc->status;
686 if (status & IRQ_AUTODETECT) {
687 if (i < 16 && !(status & IRQ_WAITING))
690 desc->status = status & ~IRQ_AUTODETECT;
691 desc->handler->shutdown(i);
693 spin_unlock_irq(&desc->lock);
701 * Return the one interrupt that triggered (this can
702 * handle any interrupt source).
706 * probe_irq_off - end an interrupt autodetect
707 * @val: mask of potential interrupts (unused)
709 * Scans the unused interrupt lines and returns the line which
710 * appears to have triggered the interrupt. If no interrupt was
711 * found then zero is returned. If more than one interrupt is
712 * found then minus the first candidate is returned to indicate
715 * The interrupt probe logic state is returned to its previous
718 * BUGS: When used in a module (which arguably shouldnt happen)
719 * nothing prevents two IRQ probe callers from overlapping. The
720 * results of this are non-optimal.
723 int probe_irq_off(unsigned long val)
725 int i, irq_found, nr_irqs;
729 for (i = 0; i < NR_IRQS; i++) {
730 irq_desc_t *desc = irq_desc + i;
733 spin_lock_irq(&desc->lock);
734 status = desc->status;
736 if (status & IRQ_AUTODETECT) {
737 if (!(status & IRQ_WAITING)) {
742 desc->status = status & ~IRQ_AUTODETECT;
743 desc->handler->shutdown(i);
745 spin_unlock_irq(&desc->lock);
750 irq_found = -irq_found;
754 EXPORT_SYMBOL(probe_irq_off);
756 /* this was setup_x86_irq but it seems pretty generic */
757 int setup_irq(unsigned int irq, struct irqaction * new)
761 struct irqaction *old, **p;
762 irq_desc_t *desc = irq_desc + irq;
765 * Some drivers like serial.c use request_irq() heavily,
766 * so we have to be careful not to interfere with a
769 if (new->flags & SA_SAMPLE_RANDOM) {
771 * This function might sleep, we want to call it first,
772 * outside of the atomic block.
773 * Yes, this might clear the entropy pool if the wrong
774 * driver is attempted to be loaded, without actually
775 * installing a new handler, but is this really a problem,
776 * only the sysadmin is able to do this.
778 rand_initialize_irq(irq);
782 * The following block of code has to be executed atomically
784 spin_lock_irqsave(&desc->lock,flags);
786 if ((old = *p) != NULL) {
787 /* Can't share interrupts unless both agree to */
788 if (!(old->flags & new->flags & SA_SHIRQ)) {
789 spin_unlock_irqrestore(&desc->lock,flags);
793 /* add new interrupt at end of irq queue */
805 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
806 desc->handler->startup(irq);
808 spin_unlock_irqrestore(&desc->lock,flags);
810 register_irq_proc(irq);
814 void __init init_generic_irq(void)
818 for (i = 0; i < NR_IRQS; i++) {
819 irq_desc[i].status = IRQ_DISABLED;
820 irq_desc[i].action = NULL;
821 irq_desc[i].depth = 1;
822 irq_desc[i].handler = &no_irq_type;
826 EXPORT_SYMBOL(disable_irq_nosync);
827 EXPORT_SYMBOL(disable_irq);
828 EXPORT_SYMBOL(enable_irq);
829 EXPORT_SYMBOL(probe_irq_mask);
831 static struct proc_dir_entry * root_irq_dir;
832 static struct proc_dir_entry * irq_dir [NR_IRQS];
836 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
838 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
839 static int irq_affinity_read_proc (char *page, char **start, off_t off,
840 int count, int *eof, void *data)
842 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
845 len += sprintf(page + len, "\n");
849 static int irq_affinity_write_proc (struct file *file, const char *buffer,
850 unsigned long count, void *data)
852 int irq = (long) data, full_count = count, err;
853 cpumask_t new_value, tmp;
855 if (!irq_desc[irq].handler->set_affinity)
858 err = cpumask_parse(buffer, count, new_value);
861 * Do not allow disabling IRQs completely - it's a too easy
862 * way to make the system unusable accidentally :-) At least
863 * one online CPU still has to be targeted.
865 cpus_and(tmp, new_value, cpu_online_map);
869 irq_affinity[irq] = new_value;
870 irq_desc[irq].handler->set_affinity(irq, new_value);
877 #define MAX_NAMELEN 10
879 static void register_irq_proc (unsigned int irq)
881 char name [MAX_NAMELEN];
883 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
887 memset(name, 0, MAX_NAMELEN);
888 sprintf(name, "%d", irq);
890 /* create /proc/irq/1234 */
891 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
895 struct proc_dir_entry *entry;
897 /* create /proc/irq/1234/smp_affinity */
898 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
902 entry->data = (void *)(long)irq;
903 entry->read_proc = irq_affinity_read_proc;
904 entry->write_proc = irq_affinity_write_proc;
907 smp_affinity_entry[irq] = entry;
912 void init_irq_proc (void)
916 /* create /proc/irq */
917 root_irq_dir = proc_mkdir("irq", 0);
919 /* create /proc/irq/prof_cpu_mask */
920 create_prof_cpu_mask(root_irq_dir);
923 * Create entries for all existing IRQs.
925 for (i = 0; i < NR_IRQS; i++)
926 register_irq_proc(i);