2 * linux/arch/i386/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 * This file contains the code used by various IRQ handling routines:
7 * asking for different IRQ's should be done through these routines
8 * instead of just grabbing them. Thus setups with different IRQ numbers
9 * shouldn't result in any weird surprises, and installing new handlers
14 * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
16 * IRQs are in fact implemented a bit like signal handlers for the kernel.
17 * Naturally it's not a 1:1 relation, but there are similarities.
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/kallsyms.h>
38 #include <asm/atomic.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <asm/uaccess.h>
44 #include <asm/delay.h>
49 * Linux has a controller-independent x86 interrupt architecture.
50 * every controller has a 'controller-template', that is used
51 * by the main code to do the right thing. Each driver-visible
52 * interrupt source is transparently wired to the apropriate
53 * controller. Thus drivers need not be aware of the
54 * interrupt-controller.
56 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
57 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
58 * (IO-APICs assumed to be messaging to Pentium local-APICs)
60 * the code is designed to be easily extended with new/different
61 * interrupt controllers, without having to do assembly magic.
65 * Controller mappings for all interrupt sources:
67 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
69 .handler = &no_irq_type,
70 .lock = SPIN_LOCK_UNLOCKED
74 static void register_irq_proc (unsigned int irq);
77 * per-CPU IRQ handling stacks
79 #ifdef CONFIG_IRQSTACKS
80 union irq_ctx *hardirq_ctx[NR_CPUS];
81 union irq_ctx *softirq_ctx[NR_CPUS];
85 * Special irq handlers.
88 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
92 * Generic no controller code
95 static void enable_none(unsigned int irq) { }
96 static unsigned int startup_none(unsigned int irq) { return 0; }
97 static void disable_none(unsigned int irq) { }
98 static void ack_none(unsigned int irq)
101 * 'what should we do if we get a hw irq event on an illegal vector'.
102 * each architecture has to answer this themselves, it doesn't deserve
103 * a generic callback i think.
106 printk("unexpected IRQ trap at vector %02x\n", irq);
107 #ifdef CONFIG_X86_LOCAL_APIC
109 * Currently unexpected vectors happen only on SMP and APIC.
110 * We _must_ ack these because every local APIC has only N
111 * irq slots per priority level, and a 'hanging, unacked' IRQ
112 * holds up an irq slot - in excessive cases (when multiple
113 * unexpected vectors occur) that might lock up the APIC
121 /* startup is the same as "enable", shutdown is same as "disable" */
122 #define shutdown_none disable_none
123 #define end_none enable_none
125 struct hw_interrupt_type no_irq_type = {
135 atomic_t irq_err_count;
136 #if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
137 atomic_t irq_mis_count;
141 * Generic, controller-independent functions:
144 int show_interrupts(struct seq_file *p, void *v)
146 int i = *(loff_t *) v, j;
147 struct irqaction * action;
152 for (j=0; j<NR_CPUS; j++)
154 seq_printf(p, "CPU%d ",j);
159 spin_lock_irqsave(&irq_desc[i].lock, flags);
160 action = irq_desc[i].action;
163 seq_printf(p, "%3d: ",i);
165 seq_printf(p, "%10u ", kstat_irqs(i));
167 for (j = 0; j < NR_CPUS; j++)
169 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
171 seq_printf(p, " %14s", irq_desc[i].handler->typename);
172 seq_printf(p, " %s", action->name);
174 for (action=action->next; action; action = action->next)
175 seq_printf(p, ", %s", action->name);
179 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
180 } else if (i == NR_IRQS) {
181 seq_printf(p, "NMI: ");
182 for (j = 0; j < NR_CPUS; j++)
184 seq_printf(p, "%10u ", nmi_count(j));
186 #ifdef CONFIG_X86_LOCAL_APIC
187 seq_printf(p, "LOC: ");
188 for (j = 0; j < NR_CPUS; j++)
190 seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
193 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
194 #if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
195 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
205 inline void synchronize_irq(unsigned int irq)
207 while (irq_desc[irq].status & IRQ_INPROGRESS)
213 * This should really return information about whether
214 * we should do bottom half handling etc. Right now we
215 * end up _always_ checking the bottom half, which is a
216 * waste of time and is not what some drivers would
219 asmlinkage int handle_IRQ_event(unsigned int irq,
220 struct pt_regs *regs, struct irqaction *action)
222 int status = 1; /* Force the "do bottom halves" bit */
225 if (!(action->flags & SA_INTERRUPT))
228 if (!(action->flags & SA_INTERRUPT))
232 ret = action->handler(irq, action->dev_id, regs);
233 if (ret == IRQ_HANDLED)
234 status |= action->flags;
236 action = action->next;
238 if (status & SA_SAMPLE_RANDOM)
239 add_interrupt_randomness(irq);
244 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
246 struct irqaction *action;
248 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
249 printk(KERN_ERR "irq event %d: bogus return value %x\n",
252 printk(KERN_ERR "irq %d: nobody cared! (screaming interrupt?)\n", irq);
253 printk(KERN_ERR "irq %d: Please try booting with acpi=off and report a bug\n", irq);
256 printk(KERN_ERR "handlers:\n");
257 action = desc->action;
259 printk(KERN_ERR "[<%p>]", action->handler);
260 print_symbol(" (%s)",
261 (unsigned long)action->handler);
263 action = action->next;
267 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
269 static int count = 100;
273 __report_bad_irq(irq, desc, action_ret);
277 static int noirqdebug;
279 int __init noirqdebug_setup(char *str)
282 printk(KERN_INFO "IRQ lockup detection disabled\n");
286 __setup("noirqdebug", noirqdebug_setup);
290 static int __init irqfixup_setup(char *str)
293 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
294 printk(KERN_WARNING "This may impact system performance.\n");
298 __setup("irqfixup", irqfixup_setup);
300 static int __init irqpoll_setup(char *str)
303 printk(KERN_WARNING "Misrouted IRQ fixup and polling support enabled.\n");
304 printk(KERN_WARNING "This may significantly impact system performance.\n");
308 __setup("irqpoll", irqpoll_setup);
311 * Recovery handler for misrouted interrupts
314 static asmlinkage int misrouted_irq(int irq, struct pt_regs *regs)
319 int work = 0; /* Did we do work for a real IRQ */
320 for(i = 1; i < NR_IRQS; i++)
322 struct irqaction *action;
323 if(i == irq) /* Already tried */
326 spin_lock(&desc->lock);
327 action = desc->action;
328 /* Already running on another processor */
329 if(desc->status & IRQ_INPROGRESS)
331 /* Already running: If it is shared get the other
332 CPU to go looking for our mystery interrupt too */
333 if(desc->action && (desc->action->flags & SA_SHIRQ))
334 desc->status |= IRQ_PENDING;
335 spin_unlock(&desc->lock);
338 /* Honour the normal IRQ locking */
339 desc->status |= IRQ_INPROGRESS;
340 spin_unlock(&desc->lock);
343 /* Only shared IRQ handlers are safe to call */
344 if(action->flags & SA_SHIRQ)
346 if(action->handler(i, action->dev_id, regs) == IRQ_HANDLED)
349 action = action->next;
352 /* Now clean up the flags */
353 spin_lock(&desc->lock);
354 action = desc->action;
356 /* While we were looking for a fixup someone queued a real
357 IRQ clashing with our walk */
359 while((desc->status & IRQ_PENDING) && action)
361 /* Perform real IRQ processing for the IRQ we deferred */
363 spin_unlock(&desc->lock);
364 handle_IRQ_event(i, regs, action);
365 spin_lock(&desc->lock);
366 desc->status &= ~IRQ_PENDING;
368 desc->status &= ~IRQ_INPROGRESS;
369 /* If we did actual work for the real IRQ line we must
370 let the IRQ controller clean up too */
372 desc->handler->end(i);
373 spin_unlock(&desc->lock);
375 /* So the caller can adjust the irq error counts */
380 * If 99,900 of the previous 100,000 interrupts have not been handled then
381 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
384 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
385 * device sharing an IRQ with the failing one)
387 * Called under desc->lock
389 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret, struct pt_regs *regs)
391 if (action_ret != IRQ_HANDLED) {
392 desc->irqs_unhandled++;
393 if (action_ret != IRQ_NONE)
394 report_bad_irq(irq, desc, action_ret);
396 if(unlikely(irqfixup)) /* Don't punish working computers */
398 if((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE)
402 union irq_ctx * curctx;
403 union irq_ctx * irqctx;
405 curctx = (union irq_ctx *) current_thread_info();
406 irqctx = hardirq_ctx[smp_processor_id()];
408 spin_unlock(&desc->lock);
411 * this is where we switch to the IRQ stack. However, if we are already using
412 * the IRQ stack (because we interrupted a hardirq handler) we can't do that
413 * and just have to keep using the current stack (which is the irq stack already
417 if (curctx == irqctx)
418 ok = misrouted_irq(irq, regs);
420 /* build the stack frame on the IRQ stack */
421 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
422 irqctx->tinfo.task = curctx->tinfo.task;
423 irqctx->tinfo.previous_esp = current_stack_pointer();
429 " xchgl %%ebx,%%esp \n"
430 " call misrouted_irq \n"
431 " xchgl %%ebx,%%esp \n"
434 : "memory", "cc", "edx", "ecx"
437 spin_lock(&desc->lock);
438 if (curctx != irqctx)
439 irqctx->tinfo.task = NULL;
440 if(action_ret == IRQ_NONE)
441 desc->irqs_unhandled -= ok;
446 if (desc->irq_count < 100000)
450 if (desc->irqs_unhandled > 99900) {
452 * The interrupt is stuck
454 __report_bad_irq(irq, desc, action_ret);
458 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
459 desc->status |= IRQ_DISABLED;
460 desc->handler->disable(irq);
462 desc->irqs_unhandled = 0;
466 * Generic enable/disable code: this just calls
467 * down into the PIC-specific version for the actual
468 * hardware disable after having gotten the irq
473 * disable_irq_nosync - disable an irq without waiting
474 * @irq: Interrupt to disable
476 * Disable the selected interrupt line. Disables and Enables are
478 * Unlike disable_irq(), this function does not ensure existing
479 * instances of the IRQ handler have completed before returning.
481 * This function may be called from IRQ context.
484 inline void disable_irq_nosync(unsigned int irq)
486 irq_desc_t *desc = irq_desc + irq;
489 spin_lock_irqsave(&desc->lock, flags);
490 if (!desc->depth++) {
491 desc->status |= IRQ_DISABLED;
492 desc->handler->disable(irq);
494 spin_unlock_irqrestore(&desc->lock, flags);
498 * disable_irq - disable an irq and wait for completion
499 * @irq: Interrupt to disable
501 * Disable the selected interrupt line. Enables and Disables are
503 * This function waits for any pending IRQ handlers for this interrupt
504 * to complete before returning. If you use this function while
505 * holding a resource the IRQ handler may need you will deadlock.
507 * This function may be called - with care - from IRQ context.
510 void disable_irq(unsigned int irq)
512 irq_desc_t *desc = irq_desc + irq;
513 disable_irq_nosync(irq);
515 synchronize_irq(irq);
519 * enable_irq - enable handling of an irq
520 * @irq: Interrupt to enable
522 * Undoes the effect of one call to disable_irq(). If this
523 * matches the last disable, processing of interrupts on this
524 * IRQ line is re-enabled.
526 * This function may be called from IRQ context.
529 void enable_irq(unsigned int irq)
531 irq_desc_t *desc = irq_desc + irq;
534 spin_lock_irqsave(&desc->lock, flags);
535 switch (desc->depth) {
537 unsigned int status = desc->status & ~IRQ_DISABLED;
538 desc->status = status;
539 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
540 desc->status = status | IRQ_REPLAY;
541 hw_resend_irq(desc->handler,irq);
543 desc->handler->enable(irq);
550 printk("enable_irq(%u) unbalanced from %p\n", irq,
551 __builtin_return_address(0));
553 spin_unlock_irqrestore(&desc->lock, flags);
557 * do_IRQ handles all normal device IRQ's (the special
558 * SMP cross-CPU interrupts have their own specific
561 asmlinkage unsigned int do_IRQ(struct pt_regs regs)
564 * We ack quickly, we don't want the irq controller
565 * thinking we're snobs just because some other CPU has
566 * disabled global interrupts (we have already done the
567 * INT_ACK cycles, it's too late to try to pretend to the
568 * controller that we aren't taking the interrupt).
570 * 0 return value means that this irq is already being
571 * handled by some other CPU. (or is disabled)
573 int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */
574 irq_desc_t *desc = irq_desc + irq;
575 struct irqaction * action;
580 #ifdef CONFIG_DEBUG_STACKOVERFLOW
581 /* Debugging check for stack overflow: is there less than 1KB free? */
585 __asm__ __volatile__("andl %%esp,%0" :
586 "=r" (esp) : "0" (THREAD_SIZE - 1));
587 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
588 printk("do_IRQ: stack overflow: %ld\n",
589 esp - sizeof(struct thread_info));
594 kstat_this_cpu.irqs[irq]++;
595 spin_lock(&desc->lock);
596 desc->handler->ack(irq);
598 REPLAY is when Linux resends an IRQ that was dropped earlier
599 WAITING is used by probe to mark irqs that are being tested
601 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
602 status |= IRQ_PENDING; /* we _want_ to handle it */
605 * If the IRQ is disabled for whatever reason, we cannot
606 * use the action we have.
609 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
610 action = desc->action;
611 status &= ~IRQ_PENDING; /* we commit to handling */
612 status |= IRQ_INPROGRESS; /* we are handling it */
614 desc->status = status;
617 * If there is no IRQ handler or it was disabled, exit early.
618 Since we set PENDING, if another processor is handling
619 a different instance of this same irq, the other processor
620 will take care of it.
622 if (unlikely(!action))
626 * Edge triggered interrupts need to remember
628 * This applies to any hw interrupts that allow a second
629 * instance of the same irq to arrive while we are in do_IRQ
630 * or in the handler. But the code here only handles the _second_
631 * instance of the irq, not the third or fourth. So it is mostly
632 * useful for irq hardware that does not mask cleanly in an
636 irqreturn_t action_ret;
638 union irq_ctx * curctx;
639 union irq_ctx * irqctx;
640 #ifdef CONFIG_IRQSTACKS
641 curctx = (union irq_ctx *) current_thread_info();
642 irqctx = hardirq_ctx[smp_processor_id()];
644 curctx = irqctx = (union irq_ctx *)0;
646 spin_unlock(&desc->lock);
649 * this is where we switch to the IRQ stack. However, if we are already using
650 * the IRQ stack (because we interrupted a hardirq handler) we can't do that
651 * and just have to keep using the current stack (which is the irq stack already
655 if (curctx == irqctx)
656 action_ret = handle_IRQ_event(irq, ®s, action);
658 /* build the stack frame on the IRQ stack */
659 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
660 irqctx->tinfo.task = curctx->tinfo.task;
661 irqctx->tinfo.previous_esp = current_stack_pointer();
663 *--isp = (u32) action;
664 *--isp = (u32) ®s;
668 " xchgl %%ebx,%%esp \n"
669 " call handle_IRQ_event \n"
670 " xchgl %%ebx,%%esp \n"
673 : "memory", "cc", "edx", "ecx"
678 spin_lock(&desc->lock);
680 note_interrupt(irq, desc, action_ret, ®s);
681 if (curctx != irqctx)
682 irqctx->tinfo.task = NULL;
683 if (likely(!(desc->status & IRQ_PENDING)))
685 desc->status &= ~IRQ_PENDING;
687 desc->status &= ~IRQ_INPROGRESS;
691 * The ->end() handler has to deal with interrupts which got
692 * disabled while the handler was running.
694 desc->handler->end(irq);
695 spin_unlock(&desc->lock);
702 int can_request_irq(unsigned int irq, unsigned long irqflags)
704 struct irqaction *action;
708 action = irq_desc[irq].action;
710 if (irqflags & action->flags & SA_SHIRQ)
717 * request_irq - allocate an interrupt line
718 * @irq: Interrupt line to allocate
719 * @handler: Function to be called when the IRQ occurs
720 * @irqflags: Interrupt type flags
721 * @devname: An ascii name for the claiming device
722 * @dev_id: A cookie passed back to the handler function
724 * This call allocates interrupt resources and enables the
725 * interrupt line and IRQ handling. From the point this
726 * call is made your handler function may be invoked. Since
727 * your handler function must clear any interrupt the board
728 * raises, you must take care both to initialise your hardware
729 * and to set up the interrupt handler in the right order.
731 * Dev_id must be globally unique. Normally the address of the
732 * device data structure is used as the cookie. Since the handler
733 * receives this value it makes sense to use it.
735 * If your interrupt is shared you must pass a non NULL dev_id
736 * as this is required when freeing the interrupt.
740 * SA_SHIRQ Interrupt is shared
742 * SA_INTERRUPT Disable local interrupts while processing
744 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
748 int request_irq(unsigned int irq,
749 irqreturn_t (*handler)(int, void *, struct pt_regs *),
750 unsigned long irqflags,
751 const char * devname,
755 struct irqaction * action;
759 * Sanity-check: shared interrupts should REALLY pass in
760 * a real dev-ID, otherwise we'll have trouble later trying
761 * to figure out which interrupt is which (messes up the
762 * interrupt freeing logic etc).
764 if (irqflags & SA_SHIRQ) {
766 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
775 action = (struct irqaction *)
776 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
780 action->handler = handler;
781 action->flags = irqflags;
782 cpus_clear(action->mask);
783 action->name = devname;
785 action->dev_id = dev_id;
787 retval = setup_irq(irq, action);
793 EXPORT_SYMBOL(request_irq);
796 * free_irq - free an interrupt
797 * @irq: Interrupt line to free
798 * @dev_id: Device identity to free
800 * Remove an interrupt handler. The handler is removed and if the
801 * interrupt line is no longer in use by any driver it is disabled.
802 * On a shared IRQ the caller must ensure the interrupt is disabled
803 * on the card it drives before calling this function. The function
804 * does not return until any executing interrupts for this IRQ
807 * This function must not be called from interrupt context.
810 void free_irq(unsigned int irq, void *dev_id)
813 struct irqaction **p;
819 desc = irq_desc + irq;
820 spin_lock_irqsave(&desc->lock,flags);
823 struct irqaction * action = *p;
825 struct irqaction **pp = p;
827 if (action->dev_id != dev_id)
830 /* Found it - now remove it from the list of entries */
833 desc->status |= IRQ_DISABLED;
834 desc->handler->shutdown(irq);
836 spin_unlock_irqrestore(&desc->lock,flags);
838 /* Wait to make sure it's not being used on another CPU */
839 synchronize_irq(irq);
843 printk("Trying to free free IRQ%d\n",irq);
844 spin_unlock_irqrestore(&desc->lock,flags);
849 EXPORT_SYMBOL(free_irq);
852 * IRQ autodetection code..
854 * This depends on the fact that any interrupt that
855 * comes in on to an unassigned handler will get stuck
856 * with "IRQ_WAITING" cleared and the interrupt
860 static DECLARE_MUTEX(probe_sem);
863 * probe_irq_on - begin an interrupt autodetect
865 * Commence probing for an interrupt. The interrupts are scanned
866 * and a mask of potential interrupt lines is returned.
870 unsigned long probe_irq_on(void)
879 * something may have generated an irq long ago and we want to
880 * flush such a longstanding irq before considering it as spurious.
882 for (i = NR_IRQS-1; i > 0; i--) {
885 spin_lock_irq(&desc->lock);
886 if (!irq_desc[i].action)
887 irq_desc[i].handler->startup(i);
888 spin_unlock_irq(&desc->lock);
891 /* Wait for longstanding interrupts to trigger. */
892 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
893 /* about 20ms delay */ barrier();
896 * enable any unassigned irqs
897 * (we must startup again here because if a longstanding irq
898 * happened in the previous stage, it may have masked itself)
900 for (i = NR_IRQS-1; i > 0; i--) {
903 spin_lock_irq(&desc->lock);
905 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
906 if (desc->handler->startup(i))
907 desc->status |= IRQ_PENDING;
909 spin_unlock_irq(&desc->lock);
913 * Wait for spurious interrupts to trigger
915 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
916 /* about 100ms delay */ barrier();
919 * Now filter out any obviously spurious interrupts
922 for (i = 0; i < NR_IRQS; i++) {
923 irq_desc_t *desc = irq_desc + i;
926 spin_lock_irq(&desc->lock);
927 status = desc->status;
929 if (status & IRQ_AUTODETECT) {
930 /* It triggered already - consider it spurious. */
931 if (!(status & IRQ_WAITING)) {
932 desc->status = status & ~IRQ_AUTODETECT;
933 desc->handler->shutdown(i);
938 spin_unlock_irq(&desc->lock);
944 EXPORT_SYMBOL(probe_irq_on);
947 * Return a mask of triggered interrupts (this
948 * can handle only legacy ISA interrupts).
952 * probe_irq_mask - scan a bitmap of interrupt lines
953 * @val: mask of interrupts to consider
955 * Scan the ISA bus interrupt lines and return a bitmap of
956 * active interrupts. The interrupt probe logic state is then
957 * returned to its previous value.
959 * Note: we need to scan all the irq's even though we will
960 * only return ISA irq numbers - just so that we reset them
961 * all to a known state.
963 unsigned int probe_irq_mask(unsigned long val)
969 for (i = 0; i < NR_IRQS; i++) {
970 irq_desc_t *desc = irq_desc + i;
973 spin_lock_irq(&desc->lock);
974 status = desc->status;
976 if (status & IRQ_AUTODETECT) {
977 if (i < 16 && !(status & IRQ_WAITING))
980 desc->status = status & ~IRQ_AUTODETECT;
981 desc->handler->shutdown(i);
983 spin_unlock_irq(&desc->lock);
991 * Return the one interrupt that triggered (this can
992 * handle any interrupt source).
996 * probe_irq_off - end an interrupt autodetect
997 * @val: mask of potential interrupts (unused)
999 * Scans the unused interrupt lines and returns the line which
1000 * appears to have triggered the interrupt. If no interrupt was
1001 * found then zero is returned. If more than one interrupt is
1002 * found then minus the first candidate is returned to indicate
1005 * The interrupt probe logic state is returned to its previous
1008 * BUGS: When used in a module (which arguably shouldnt happen)
1009 * nothing prevents two IRQ probe callers from overlapping. The
1010 * results of this are non-optimal.
1013 int probe_irq_off(unsigned long val)
1015 int i, irq_found, nr_irqs;
1019 for (i = 0; i < NR_IRQS; i++) {
1020 irq_desc_t *desc = irq_desc + i;
1021 unsigned int status;
1023 spin_lock_irq(&desc->lock);
1024 status = desc->status;
1026 if (status & IRQ_AUTODETECT) {
1027 if (!(status & IRQ_WAITING)) {
1032 desc->status = status & ~IRQ_AUTODETECT;
1033 desc->handler->shutdown(i);
1035 spin_unlock_irq(&desc->lock);
1040 irq_found = -irq_found;
1044 EXPORT_SYMBOL(probe_irq_off);
1046 /* this was setup_x86_irq but it seems pretty generic */
1047 int setup_irq(unsigned int irq, struct irqaction * new)
1050 unsigned long flags;
1051 struct irqaction *old, **p;
1052 irq_desc_t *desc = irq_desc + irq;
1054 if (desc->handler == &no_irq_type)
1057 * Some drivers like serial.c use request_irq() heavily,
1058 * so we have to be careful not to interfere with a
1061 if (new->flags & SA_SAMPLE_RANDOM) {
1063 * This function might sleep, we want to call it first,
1064 * outside of the atomic block.
1065 * Yes, this might clear the entropy pool if the wrong
1066 * driver is attempted to be loaded, without actually
1067 * installing a new handler, but is this really a problem,
1068 * only the sysadmin is able to do this.
1070 rand_initialize_irq(irq);
1074 * The following block of code has to be executed atomically
1076 spin_lock_irqsave(&desc->lock,flags);
1078 if ((old = *p) != NULL) {
1079 /* Can't share interrupts unless both agree to */
1080 if (!(old->flags & new->flags & SA_SHIRQ)) {
1081 spin_unlock_irqrestore(&desc->lock,flags);
1085 /* add new interrupt at end of irq queue */
1097 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
1098 desc->handler->startup(irq);
1100 spin_unlock_irqrestore(&desc->lock,flags);
1102 register_irq_proc(irq);
1106 static struct proc_dir_entry * root_irq_dir;
1107 static struct proc_dir_entry * irq_dir [NR_IRQS];
1111 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
1113 cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
1115 static int irq_affinity_read_proc(char *page, char **start, off_t off,
1116 int count, int *eof, void *data)
1118 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
1119 if (count - len < 2)
1121 len += sprintf(page + len, "\n");
1125 int no_irq_affinity;
1127 static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
1128 unsigned long count, void *data)
1130 int irq = (long)data, full_count = count, err;
1131 cpumask_t new_value, tmp;
1133 if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
1136 err = cpumask_parse(buffer, count, new_value);
1141 * Do not allow disabling IRQs completely - it's a too easy
1142 * way to make the system unusable accidentally :-) At least
1143 * one online CPU still has to be targeted.
1145 cpus_and(tmp, new_value, cpu_online_map);
1146 if (cpus_empty(tmp))
1149 irq_affinity[irq] = new_value;
1150 irq_desc[irq].handler->set_affinity(irq,
1151 cpumask_of_cpu(first_cpu(new_value)));
1157 #define MAX_NAMELEN 10
1159 static void register_irq_proc (unsigned int irq)
1161 char name [MAX_NAMELEN];
1163 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
1167 memset(name, 0, MAX_NAMELEN);
1168 sprintf(name, "%d", irq);
1170 /* create /proc/irq/1234 */
1171 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1175 struct proc_dir_entry *entry;
1177 /* create /proc/irq/1234/smp_affinity */
1178 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1182 entry->data = (void *)(long)irq;
1183 entry->read_proc = irq_affinity_read_proc;
1184 entry->write_proc = irq_affinity_write_proc;
1187 smp_affinity_entry[irq] = entry;
1192 void init_irq_proc (void)
1196 /* create /proc/irq */
1197 root_irq_dir = proc_mkdir("irq", NULL);
1198 create_prof_cpu_mask(root_irq_dir);
1200 * Create entries for all existing IRQs.
1202 for (i = 0; i < NR_IRQS; i++)
1203 register_irq_proc(i);
1207 #ifdef CONFIG_IRQSTACKS
1209 * These should really be __section__(".bss.page_aligned") as well, but
1210 * gcc's 3.0 and earlier don't handle that correctly.
1212 static char softirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
1213 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __attribute__((__aligned__(THREAD_SIZE)));
1216 * allocate per-cpu stacks for hardirq and for softirq processing
1218 void irq_ctx_init(int cpu)
1220 union irq_ctx *irqctx;
1222 if (hardirq_ctx[cpu])
1225 irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
1226 irqctx->tinfo.task = NULL;
1227 irqctx->tinfo.exec_domain = NULL;
1228 irqctx->tinfo.cpu = cpu;
1229 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
1230 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
1232 hardirq_ctx[cpu] = irqctx;
1234 irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
1235 irqctx->tinfo.task = NULL;
1236 irqctx->tinfo.exec_domain = NULL;
1237 irqctx->tinfo.cpu = cpu;
1238 irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET;
1239 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
1241 softirq_ctx[cpu] = irqctx;
1243 printk("CPU %u irqstacks, hard=%p soft=%p\n",
1244 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
1247 extern asmlinkage void __do_softirq(void);
1249 asmlinkage void do_softirq(void)
1251 unsigned long flags;
1252 struct thread_info *curctx;
1253 union irq_ctx *irqctx;
1259 local_irq_save(flags);
1261 if (local_softirq_pending()) {
1262 curctx = current_thread_info();
1263 irqctx = softirq_ctx[smp_processor_id()];
1264 irqctx->tinfo.task = curctx->task;
1265 irqctx->tinfo.previous_esp = current_stack_pointer();
1267 /* build the stack frame on the softirq stack */
1268 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
1272 " xchgl %%ebx,%%esp \n"
1273 " call __do_softirq \n"
1274 " movl %%ebx,%%esp \n"
1277 : "memory", "cc", "edx", "ecx", "eax"
1281 local_irq_restore(flags);
1284 EXPORT_SYMBOL(do_softirq);