2 * linux/arch/x86_64/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
37 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
43 #include <asm/pgalloc.h>
44 #include <asm/delay.h>
51 * Linux has a controller-independent x86 interrupt architecture.
52 * every controller has a 'controller-template', that is used
53 * by the main code to do the right thing. Each driver-visible
54 * interrupt source is transparently wired to the appropriate
55 * controller. Thus drivers need not be aware of the
56 * interrupt-controller.
58 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
59 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
60 * (IO-APICs assumed to be messaging to Pentium local-APICs)
62 * the code is designed to be easily extended with new/different
63 * interrupt controllers, without having to do assembly magic.
67 * Controller mappings for all interrupt sources:
69 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
71 .handler = &no_irq_type,
72 .lock = SPIN_LOCK_UNLOCKED
76 static void register_irq_proc (unsigned int irq);
79 * Special irq handlers.
82 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_NONE; }
85 * Generic no controller code
88 static void enable_none(unsigned int irq) { }
89 static unsigned int startup_none(unsigned int irq) { return 0; }
90 static void disable_none(unsigned int irq) { }
91 static void ack_none(unsigned int irq)
94 * 'what should we do if we get a hw irq event on an illegal vector'.
95 * each architecture has to answer this themselves, it doesn't deserve
96 * a generic callback i think.
99 printk("unexpected IRQ trap at vector %02x\n", irq);
100 #ifdef CONFIG_X86_LOCAL_APIC
102 * Currently unexpected vectors happen only on SMP and APIC.
103 * We _must_ ack these because every local APIC has only N
104 * irq slots per priority level, and a 'hanging, unacked' IRQ
105 * holds up an irq slot - in excessive cases (when multiple
106 * unexpected vectors occur) that might lock up the APIC
114 /* startup is the same as "enable", shutdown is same as "disable" */
115 #define shutdown_none disable_none
116 #define end_none enable_none
118 struct hw_interrupt_type no_irq_type = {
128 atomic_t irq_err_count;
129 #ifdef CONFIG_X86_IO_APIC
130 #ifdef APIC_MISMATCH_DEBUG
131 atomic_t irq_mis_count;
136 * Generic, controller-independent functions:
139 int show_interrupts(struct seq_file *p, void *v)
141 int i = *(loff_t *) v, j;
142 struct irqaction * action;
147 for (j=0; j<NR_CPUS; j++)
149 seq_printf(p, "CPU%d ",j);
154 spin_lock_irqsave(&irq_desc[i].lock, flags);
155 action = irq_desc[i].action;
158 seq_printf(p, "%3d: ",i);
160 seq_printf(p, "%10u ", kstat_irqs(i));
162 for (j=0; j<NR_CPUS; j++)
164 seq_printf(p, "%10u ",
165 kstat_cpu(j).irqs[i]);
167 seq_printf(p, " %14s", irq_desc[i].handler->typename);
169 seq_printf(p, " %s", action->name);
170 for (action=action->next; action; action = action->next)
171 seq_printf(p, ", %s", action->name);
174 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
175 } else if (i == NR_IRQS) {
176 seq_printf(p, "NMI: ");
177 for (j = 0; j < NR_CPUS; j++)
179 seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
181 #ifdef CONFIG_X86_LOCAL_APIC
182 seq_printf(p, "LOC: ");
183 for (j = 0; j < NR_CPUS; j++)
185 seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
188 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
189 #ifdef CONFIG_X86_IO_APIC
190 #ifdef APIC_MISMATCH_DEBUG
191 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
199 inline void synchronize_irq(unsigned int irq)
201 while (irq_desc[irq].status & IRQ_INPROGRESS)
207 * This should really return information about whether
208 * we should do bottom half handling etc. Right now we
209 * end up _always_ checking the bottom half, which is a
210 * waste of time and is not what some drivers would
213 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
215 int status = 1; /* Force the "do bottom halves" bit */
217 if (!(action->flags & SA_INTERRUPT))
221 status |= action->flags;
222 action->handler(irq, action->dev_id, regs);
223 action = action->next;
225 if (status & SA_SAMPLE_RANDOM)
226 add_interrupt_randomness(irq);
233 * Generic enable/disable code: this just calls
234 * down into the PIC-specific version for the actual
235 * hardware disable after having gotten the irq
240 * disable_irq_nosync - disable an irq without waiting
241 * @irq: Interrupt to disable
243 * Disable the selected interrupt line. Disables and Enables are
245 * Unlike disable_irq(), this function does not ensure existing
246 * instances of the IRQ handler have completed before returning.
248 * This function must not be called from IRQ context.
251 inline void disable_irq_nosync(unsigned int irq)
253 irq_desc_t *desc = irq_desc + irq;
256 spin_lock_irqsave(&desc->lock, flags);
257 if (!desc->depth++) {
258 desc->status |= IRQ_DISABLED;
259 desc->handler->disable(irq);
261 spin_unlock_irqrestore(&desc->lock, flags);
265 * disable_irq - disable an irq and wait for completion
266 * @irq: Interrupt to disable
268 * Disable the selected interrupt line. Enables and Disables are
270 * This function waits for any pending IRQ handlers for this interrupt
271 * to complete before returning. If you use this function while
272 * holding a resource the IRQ handler may need you will deadlock.
274 * This function may be called - with care - from IRQ context.
277 void disable_irq(unsigned int irq)
279 disable_irq_nosync(irq);
280 synchronize_irq(irq);
284 * enable_irq - enable handling of an irq
285 * @irq: Interrupt to enable
287 * Undoes the effect of one call to disable_irq(). If this
288 * matches the last disable, processing of interrupts on this
289 * IRQ line is re-enabled.
291 * This function may be called from IRQ context.
294 void enable_irq(unsigned int irq)
296 irq_desc_t *desc = irq_desc + irq;
299 spin_lock_irqsave(&desc->lock, flags);
300 switch (desc->depth) {
302 unsigned int status = desc->status & ~IRQ_DISABLED;
303 desc->status = status;
304 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
305 desc->status = status | IRQ_REPLAY;
306 hw_resend_irq(desc->handler,irq);
308 desc->handler->enable(irq);
315 printk("enable_irq(%u) unbalanced from %p\n", irq,
316 __builtin_return_address(0));
318 spin_unlock_irqrestore(&desc->lock, flags);
322 * do_IRQ handles all normal device IRQ's (the special
323 * SMP cross-CPU interrupts have their own specific
326 asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
329 * We ack quickly, we don't want the irq controller
330 * thinking we're snobs just because some other CPU has
331 * disabled global interrupts (we have already done the
332 * INT_ACK cycles, it's too late to try to pretend to the
333 * controller that we aren't taking the interrupt).
335 * 0 return value means that this irq is already being
336 * handled by some other CPU. (or is disabled)
338 unsigned irq = regs->orig_rax & 0xff; /* high bits used in ret_from_ code */
339 int cpu = smp_processor_id();
340 irq_desc_t *desc = irq_desc + irq;
341 struct irqaction * action;
344 if (irq > 256) BUG();
347 kstat_cpu(cpu).irqs[irq]++;
348 spin_lock(&desc->lock);
349 desc->handler->ack(irq);
351 REPLAY is when Linux resends an IRQ that was dropped earlier
352 WAITING is used by probe to mark irqs that are being tested
354 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
355 status |= IRQ_PENDING; /* we _want_ to handle it */
358 * If the IRQ is disabled for whatever reason, we cannot
359 * use the action we have.
362 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
363 action = desc->action;
364 status &= ~IRQ_PENDING; /* we commit to handling */
365 status |= IRQ_INPROGRESS; /* we are handling it */
367 desc->status = status;
370 * If there is no IRQ handler or it was disabled, exit early.
371 Since we set PENDING, if another processor is handling
372 a different instance of this same irq, the other processor
373 will take care of it.
375 if (unlikely(!action))
379 * Edge triggered interrupts need to remember
381 * This applies to any hw interrupts that allow a second
382 * instance of the same irq to arrive while we are in do_IRQ
383 * or in the handler. But the code here only handles the _second_
384 * instance of the irq, not the third or fourth. So it is mostly
385 * useful for irq hardware that does not mask cleanly in an
389 spin_unlock(&desc->lock);
390 handle_IRQ_event(irq, regs, action);
391 spin_lock(&desc->lock);
393 if (unlikely(!(desc->status & IRQ_PENDING)))
395 desc->status &= ~IRQ_PENDING;
397 desc->status &= ~IRQ_INPROGRESS;
400 * The ->end() handler has to deal with interrupts which got
401 * disabled while the handler was running.
403 if (irq > 256) BUG();
404 desc->handler->end(irq);
405 spin_unlock(&desc->lock);
411 int can_request_irq(unsigned int irq, unsigned long irqflags)
413 struct irqaction *action;
417 action = irq_desc[irq].action;
419 if (irqflags & action->flags & SA_SHIRQ)
426 * request_irq - allocate an interrupt line
427 * @irq: Interrupt line to allocate
428 * @handler: Function to be called when the IRQ occurs
429 * @irqflags: Interrupt type flags
430 * @devname: An ascii name for the claiming device
431 * @dev_id: A cookie passed back to the handler function
433 * This call allocates interrupt resources and enables the
434 * interrupt line and IRQ handling. From the point this
435 * call is made your handler function may be invoked. Since
436 * your handler function must clear any interrupt the board
437 * raises, you must take care both to initialise your hardware
438 * and to set up the interrupt handler in the right order.
440 * Dev_id must be globally unique. Normally the address of the
441 * device data structure is used as the cookie. Since the handler
442 * receives this value it makes sense to use it.
444 * If your interrupt is shared you must pass a non NULL dev_id
445 * as this is required when freeing the interrupt.
449 * SA_SHIRQ Interrupt is shared
451 * SA_INTERRUPT Disable local interrupts while processing
453 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
457 int request_irq(unsigned int irq,
458 irqreturn_t (*handler)(int, void *, struct pt_regs *),
459 unsigned long irqflags,
460 const char * devname,
464 struct irqaction * action;
468 * Sanity-check: shared interrupts should REALLY pass in
469 * a real dev-ID, otherwise we'll have trouble later trying
470 * to figure out which interrupt is which (messes up the
471 * interrupt freeing logic etc).
473 if (irqflags & SA_SHIRQ) {
475 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
484 action = (struct irqaction *)
485 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
489 action->handler = handler;
490 action->flags = irqflags;
492 action->name = devname;
494 action->dev_id = dev_id;
496 retval = setup_irq(irq, action);
502 EXPORT_SYMBOL(request_irq);
505 * free_irq - free an interrupt
506 * @irq: Interrupt line to free
507 * @dev_id: Device identity to free
509 * Remove an interrupt handler. The handler is removed and if the
510 * interrupt line is no longer in use by any driver it is disabled.
511 * On a shared IRQ the caller must ensure the interrupt is disabled
512 * on the card it drives before calling this function. The function
513 * does not return until any executing interrupts for this IRQ
516 * This function may be called from interrupt context.
518 * Bugs: Attempting to free an irq in a handler for the same irq hangs
522 void free_irq(unsigned int irq, void *dev_id)
525 struct irqaction **p;
531 desc = irq_desc + irq;
532 spin_lock_irqsave(&desc->lock,flags);
535 struct irqaction * action = *p;
537 struct irqaction **pp = p;
539 if (action->dev_id != dev_id)
542 /* Found it - now remove it from the list of entries */
545 desc->status |= IRQ_DISABLED;
546 desc->handler->shutdown(irq);
548 spin_unlock_irqrestore(&desc->lock,flags);
550 synchronize_irq(irq);
554 printk("Trying to free free IRQ%d\n",irq);
555 spin_unlock_irqrestore(&desc->lock,flags);
560 EXPORT_SYMBOL(free_irq);
563 * IRQ autodetection code..
565 * This depends on the fact that any interrupt that
566 * comes in on to an unassigned handler will get stuck
567 * with "IRQ_WAITING" cleared and the interrupt
571 static DECLARE_MUTEX(probe_sem);
574 * probe_irq_on - begin an interrupt autodetect
576 * Commence probing for an interrupt. The interrupts are scanned
577 * and a mask of potential interrupt lines is returned.
581 unsigned long probe_irq_on(void)
590 * something may have generated an irq long ago and we want to
591 * flush such a longstanding irq before considering it as spurious.
593 for (i = NR_IRQS-1; i > 0; i--) {
596 spin_lock_irq(&desc->lock);
597 if (!irq_desc[i].action)
598 irq_desc[i].handler->startup(i);
599 spin_unlock_irq(&desc->lock);
602 /* Wait for longstanding interrupts to trigger. */
603 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
604 /* about 20ms delay */ barrier();
607 * enable any unassigned irqs
608 * (we must startup again here because if a longstanding irq
609 * happened in the previous stage, it may have masked itself)
611 for (i = NR_IRQS-1; i > 0; i--) {
614 spin_lock_irq(&desc->lock);
616 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
617 if (desc->handler->startup(i))
618 desc->status |= IRQ_PENDING;
620 spin_unlock_irq(&desc->lock);
624 * Wait for spurious interrupts to trigger
626 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
627 /* about 100ms delay */ barrier();
630 * Now filter out any obviously spurious interrupts
633 for (i = 0; i < NR_IRQS; i++) {
634 irq_desc_t *desc = irq_desc + i;
637 spin_lock_irq(&desc->lock);
638 status = desc->status;
640 if (status & IRQ_AUTODETECT) {
641 /* It triggered already - consider it spurious. */
642 if (!(status & IRQ_WAITING)) {
643 desc->status = status & ~IRQ_AUTODETECT;
644 desc->handler->shutdown(i);
649 spin_unlock_irq(&desc->lock);
655 EXPORT_SYMBOL(probe_irq_on);
658 * Return a mask of triggered interrupts (this
659 * can handle only legacy ISA interrupts).
663 * probe_irq_mask - scan a bitmap of interrupt lines
664 * @val: mask of interrupts to consider
666 * Scan the ISA bus interrupt lines and return a bitmap of
667 * active interrupts. The interrupt probe logic state is then
668 * returned to its previous value.
670 * Note: we need to scan all the irq's even though we will
671 * only return ISA irq numbers - just so that we reset them
672 * all to a known state.
674 unsigned int probe_irq_mask(unsigned long val)
680 for (i = 0; i < NR_IRQS; i++) {
681 irq_desc_t *desc = irq_desc + i;
684 spin_lock_irq(&desc->lock);
685 status = desc->status;
687 if (status & IRQ_AUTODETECT) {
688 if (i < 16 && !(status & IRQ_WAITING))
691 desc->status = status & ~IRQ_AUTODETECT;
692 desc->handler->shutdown(i);
694 spin_unlock_irq(&desc->lock);
702 * Return the one interrupt that triggered (this can
703 * handle any interrupt source).
707 * probe_irq_off - end an interrupt autodetect
708 * @val: mask of potential interrupts (unused)
710 * Scans the unused interrupt lines and returns the line which
711 * appears to have triggered the interrupt. If no interrupt was
712 * found then zero is returned. If more than one interrupt is
713 * found then minus the first candidate is returned to indicate
716 * The interrupt probe logic state is returned to its previous
719 * BUGS: When used in a module (which arguably shouldn't happen)
720 * nothing prevents two IRQ probe callers from overlapping. The
721 * results of this are non-optimal.
724 int probe_irq_off(unsigned long val)
726 int i, irq_found, nr_irqs;
730 for (i = 0; i < NR_IRQS; i++) {
731 irq_desc_t *desc = irq_desc + i;
734 spin_lock_irq(&desc->lock);
735 status = desc->status;
737 if (status & IRQ_AUTODETECT) {
738 if (!(status & IRQ_WAITING)) {
743 desc->status = status & ~IRQ_AUTODETECT;
744 desc->handler->shutdown(i);
746 spin_unlock_irq(&desc->lock);
751 irq_found = -irq_found;
755 EXPORT_SYMBOL(probe_irq_off);
757 /* this was setup_x86_irq but it seems pretty generic */
758 int setup_irq(unsigned int irq, struct irqaction * new)
762 struct irqaction *old, **p;
763 irq_desc_t *desc = irq_desc + irq;
765 if (desc->handler == &no_irq_type)
769 * Some drivers like serial.c use request_irq() heavily,
770 * so we have to be careful not to interfere with a
773 if (new->flags & SA_SAMPLE_RANDOM) {
775 * This function might sleep, we want to call it first,
776 * outside of the atomic block.
777 * Yes, this might clear the entropy pool if the wrong
778 * driver is attempted to be loaded, without actually
779 * installing a new handler, but is this really a problem,
780 * only the sysadmin is able to do this.
782 rand_initialize_irq(irq);
786 * The following block of code has to be executed atomically
788 spin_lock_irqsave(&desc->lock,flags);
790 if ((old = *p) != NULL) {
791 /* Can't share interrupts unless both agree to */
792 if (!(old->flags & new->flags & SA_SHIRQ)) {
793 spin_unlock_irqrestore(&desc->lock,flags);
797 /* add new interrupt at end of irq queue */
809 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
810 desc->handler->startup(irq);
812 spin_unlock_irqrestore(&desc->lock,flags);
814 register_irq_proc(irq);
818 static struct proc_dir_entry * root_irq_dir;
819 static struct proc_dir_entry * irq_dir [NR_IRQS];
823 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
825 static cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
826 static int irq_affinity_read_proc (char *page, char **start, off_t off,
827 int count, int *eof, void *data)
829 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
832 len += sprintf(page + len, "\n");
836 static int irq_affinity_write_proc (struct file *file, const char *buffer,
837 unsigned long count, void *data)
839 int irq = (long) data, full_count = count, err;
840 cpumask_t tmp, new_value;
842 if (!irq_desc[irq].handler->set_affinity)
845 err = cpumask_parse(buffer, count, new_value);
848 * Do not allow disabling IRQs completely - it's a too easy
849 * way to make the system unusable accidentally :-) At least
850 * one online CPU still has to be targeted.
852 cpus_and(tmp, new_value, cpu_online_map);
856 irq_affinity[irq] = new_value;
857 irq_desc[irq].handler->set_affinity(irq, new_value);
864 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
865 int count, int *eof, void *data)
867 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
870 len += sprintf(page + len, "\n");
874 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
875 unsigned long count, void *data)
877 unsigned long full_count = count, err;
878 cpumask_t new_value, *mask = (cpumask_t *)data;
880 err = cpumask_parse(buffer, count, new_value);
888 #define MAX_NAMELEN 10
890 static void register_irq_proc (unsigned int irq)
892 char name [MAX_NAMELEN];
894 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
898 memset(name, 0, MAX_NAMELEN);
899 sprintf(name, "%d", irq);
901 /* create /proc/irq/1234 */
902 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
906 struct proc_dir_entry *entry;
908 /* create /proc/irq/1234/smp_affinity */
909 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
913 entry->data = (void *)(long)irq;
914 entry->read_proc = irq_affinity_read_proc;
915 entry->write_proc = irq_affinity_write_proc;
918 smp_affinity_entry[irq] = entry;
923 unsigned long prof_cpu_mask = -1;
925 void init_irq_proc (void)
927 struct proc_dir_entry *entry;
930 /* create /proc/irq */
931 root_irq_dir = proc_mkdir("irq", 0);
933 /* create /proc/irq/prof_cpu_mask */
934 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
940 entry->data = (void *)&prof_cpu_mask;
941 entry->read_proc = prof_cpu_mask_read_proc;
942 entry->write_proc = prof_cpu_mask_write_proc;
945 * Create entries for all existing IRQs.
947 for (i = 0; i < NR_IRQS; i++)
948 register_irq_proc(i);