2 * arch/ppc/kernel/irq.c
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/threads.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/ioport.h>
33 #include <linux/interrupt.h>
34 #include <linux/timex.h>
35 #include <linux/config.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/irq.h>
41 #include <linux/proc_fs.h>
42 #include <linux/random.h>
43 #include <linux/kallsyms.h>
44 #include <linux/profile.h>
46 #include <asm/uaccess.h>
47 #include <asm/bitops.h>
48 #include <asm/system.h>
50 #include <asm/pgtable.h>
52 #include <asm/cache.h>
54 #include <asm/ptrace.h>
55 #include <asm/iSeries/LparData.h>
56 #include <asm/machdep.h>
60 extern void iSeries_smp_message_recv( struct pt_regs * );
63 static void register_irq_proc (unsigned int irq);
65 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
67 .lock = SPIN_LOCK_UNLOCKED
71 int __irq_offset_value;
72 int ppc_spurious_interrupts;
73 unsigned long lpevent_count;
76 setup_irq(unsigned int irq, struct irqaction * new)
80 struct irqaction *old, **p;
81 irq_desc_t *desc = get_irq_desc(irq);
84 * Some drivers like serial.c use request_irq() heavily,
85 * so we have to be careful not to interfere with a
88 if (new->flags & SA_SAMPLE_RANDOM) {
90 * This function might sleep, we want to call it first,
91 * outside of the atomic block.
92 * Yes, this might clear the entropy pool if the wrong
93 * driver is attempted to be loaded, without actually
94 * installing a new handler, but is this really a problem,
95 * only the sysadmin is able to do this.
97 rand_initialize_irq(irq);
101 * The following block of code has to be executed atomically
103 spin_lock_irqsave(&desc->lock,flags);
105 if ((old = *p) != NULL) {
106 /* Can't share interrupts unless both agree to */
107 if (!(old->flags & new->flags & SA_SHIRQ)) {
108 spin_unlock_irqrestore(&desc->lock,flags);
112 /* add new interrupt at end of irq queue */
124 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
125 if (desc->handler && desc->handler->startup)
126 desc->handler->startup(irq);
129 spin_unlock_irqrestore(&desc->lock,flags);
131 register_irq_proc(irq);
137 inline void synchronize_irq(unsigned int irq)
139 while (get_irq_desc(irq)->status & IRQ_INPROGRESS)
143 EXPORT_SYMBOL(synchronize_irq);
145 #endif /* CONFIG_SMP */
147 int request_irq(unsigned int irq,
148 irqreturn_t (*handler)(int, void *, struct pt_regs *),
149 unsigned long irqflags, const char * devname, void *dev_id)
151 struct irqaction *action;
159 action = (struct irqaction *)
160 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
162 printk(KERN_ERR "kmalloc() failed for irq %d !\n", irq);
166 action->handler = handler;
167 action->flags = irqflags;
168 cpus_clear(action->mask);
169 action->name = devname;
170 action->dev_id = dev_id;
173 retval = setup_irq(irq, action);
180 EXPORT_SYMBOL(request_irq);
182 void free_irq(unsigned int irq, void *dev_id)
184 irq_desc_t *desc = get_irq_desc(irq);
185 struct irqaction **p;
188 spin_lock_irqsave(&desc->lock,flags);
191 struct irqaction * action = *p;
193 struct irqaction **pp = p;
195 if (action->dev_id != dev_id)
198 /* Found it - now remove it from the list of entries */
201 desc->status |= IRQ_DISABLED;
204 spin_unlock_irqrestore(&desc->lock,flags);
206 /* Wait to make sure it's not being used on another CPU */
207 synchronize_irq(irq);
211 printk("Trying to free free IRQ%d\n",irq);
212 spin_unlock_irqrestore(&desc->lock,flags);
218 EXPORT_SYMBOL(free_irq);
221 * Generic enable/disable code: this just calls
222 * down into the PIC-specific version for the actual
223 * hardware disable after having gotten the irq
228 * disable_irq_nosync - disable an irq without waiting
229 * @irq: Interrupt to disable
231 * Disable the selected interrupt line. Disables of an interrupt
232 * stack. Unlike disable_irq(), this function does not ensure existing
233 * instances of the IRQ handler have completed before returning.
235 * This function may be called from IRQ context.
238 inline void disable_irq_nosync(unsigned int irq)
240 irq_desc_t *desc = get_irq_desc(irq);
243 spin_lock_irqsave(&desc->lock, flags);
244 if (!desc->depth++) {
245 if (!(desc->status & IRQ_PER_CPU))
246 desc->status |= IRQ_DISABLED;
249 spin_unlock_irqrestore(&desc->lock, flags);
252 EXPORT_SYMBOL(disable_irq_nosync);
255 * disable_irq - disable an irq and wait for completion
256 * @irq: Interrupt to disable
258 * Disable the selected interrupt line. Disables of an interrupt
259 * stack. That is for two disables you need two enables. This
260 * function waits for any pending IRQ handlers for this interrupt
261 * to complete before returning. If you use this function while
262 * holding a resource the IRQ handler may need you will deadlock.
264 * This function may be called - with care - from IRQ context.
267 void disable_irq(unsigned int irq)
269 irq_desc_t *desc = get_irq_desc(irq);
270 disable_irq_nosync(irq);
272 synchronize_irq(irq);
275 EXPORT_SYMBOL(disable_irq);
278 * enable_irq - enable interrupt handling on an irq
279 * @irq: Interrupt to enable
281 * Re-enables the processing of interrupts on this IRQ line
282 * providing no disable_irq calls are now in effect.
284 * This function may be called from IRQ context.
287 void enable_irq(unsigned int irq)
289 irq_desc_t *desc = get_irq_desc(irq);
292 spin_lock_irqsave(&desc->lock, flags);
293 switch (desc->depth) {
295 unsigned int status = desc->status & ~IRQ_DISABLED;
296 desc->status = status;
297 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
298 desc->status = status | IRQ_REPLAY;
299 hw_resend_irq(desc->handler,irq);
308 printk("enable_irq(%u) unbalanced from %p\n", irq,
309 __builtin_return_address(0));
311 spin_unlock_irqrestore(&desc->lock, flags);
314 EXPORT_SYMBOL(enable_irq);
316 int show_interrupts(struct seq_file *p, void *v)
318 int i = *(loff_t *) v, j;
319 struct irqaction * action;
325 for (j=0; j<NR_CPUS; j++) {
327 seq_printf(p, "CPU%d ",j);
333 desc = get_irq_desc(i);
334 spin_lock_irqsave(&desc->lock, flags);
335 action = desc->action;
336 if (!action || !action->handler)
338 seq_printf(p, "%3d: ", i);
340 for (j = 0; j < NR_CPUS; j++) {
342 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
345 seq_printf(p, "%10u ", kstat_irqs(i));
346 #endif /* CONFIG_SMP */
348 seq_printf(p, " %s ", desc->handler->typename );
350 seq_printf(p, " None ");
351 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
352 seq_printf(p, " %s",action->name);
353 for (action=action->next; action; action = action->next)
354 seq_printf(p, ", %s", action->name);
357 spin_unlock_irqrestore(&desc->lock, flags);
358 } else if (i == NR_IRQS)
359 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
363 int handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
368 if (!(action->flags & SA_INTERRUPT))
372 ret = action->handler(irq, action->dev_id, regs);
373 if (ret == IRQ_HANDLED)
374 status |= action->flags;
376 action = action->next;
378 if (status & SA_SAMPLE_RANDOM)
379 add_interrupt_randomness(irq);
384 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
386 struct irqaction *action;
388 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
389 printk(KERN_ERR "irq event %d: bogus return value %x\n",
392 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
395 printk(KERN_ERR "handlers:\n");
396 action = desc->action;
398 printk(KERN_ERR "[<%p>]", action->handler);
399 print_symbol(" (%s)",
400 (unsigned long)action->handler);
402 action = action->next;
406 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
408 static int count = 100;
412 __report_bad_irq(irq, desc, action_ret);
416 static int noirqdebug;
418 static int __init noirqdebug_setup(char *str)
421 printk("IRQ lockup detection disabled\n");
425 __setup("noirqdebug", noirqdebug_setup);
428 * If 99,900 of the previous 100,000 interrupts have not been handled then
429 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
432 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
433 * device sharing an IRQ with the failing one)
435 * Called under desc->lock
437 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
439 if (action_ret != IRQ_HANDLED) {
440 desc->irqs_unhandled++;
441 if (action_ret != IRQ_NONE)
442 report_bad_irq(irq, desc, action_ret);
446 if (desc->irq_count < 100000)
450 if (desc->irqs_unhandled > 99900) {
452 * The interrupt is stuck
454 __report_bad_irq(irq, desc, action_ret);
458 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
459 desc->status |= IRQ_DISABLED;
460 desc->handler->disable(irq);
462 desc->irqs_unhandled = 0;
466 * Eventually, this should take an array of interrupts and an array size
467 * so it can dispatch multiple interrupts.
469 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
472 struct irqaction *action;
473 int cpu = smp_processor_id();
474 irq_desc_t *desc = get_irq_desc(irq);
475 irqreturn_t action_ret;
476 #ifdef CONFIG_IRQSTACKS
477 struct thread_info *curtp, *irqtp;
480 kstat_cpu(cpu).irqs[irq]++;
482 if (desc->status & IRQ_PER_CPU) {
483 /* no locking required for CPU-local interrupts: */
485 action_ret = handle_irq_event(irq, regs, desc->action);
486 desc->handler->end(irq);
490 spin_lock(&desc->lock);
493 REPLAY is when Linux resends an IRQ that was dropped earlier
494 WAITING is used by probe to mark irqs that are being tested
496 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
497 status |= IRQ_PENDING; /* we _want_ to handle it */
500 * If the IRQ is disabled for whatever reason, we cannot
501 * use the action we have.
504 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
505 action = desc->action;
506 if (!action || !action->handler) {
507 ppc_spurious_interrupts++;
508 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
509 /* We can't call disable_irq here, it would deadlock */
512 desc->status |= IRQ_DISABLED;
513 /* This is not a real spurrious interrupt, we
514 * have to eoi it, so we jump to out
519 status &= ~IRQ_PENDING; /* we commit to handling */
520 status |= IRQ_INPROGRESS; /* we are handling it */
522 desc->status = status;
525 * If there is no IRQ handler or it was disabled, exit early.
526 Since we set PENDING, if another processor is handling
527 a different instance of this same irq, the other processor
528 will take care of it.
530 if (unlikely(!action))
534 * Edge triggered interrupts need to remember
536 * This applies to any hw interrupts that allow a second
537 * instance of the same irq to arrive while we are in do_IRQ
538 * or in the handler. But the code here only handles the _second_
539 * instance of the irq, not the third or fourth. So it is mostly
540 * useful for irq hardware that does not mask cleanly in an
544 spin_unlock(&desc->lock);
546 #ifdef CONFIG_IRQSTACKS
547 /* Switch to the irq stack to handle this */
548 curtp = current_thread_info();
549 irqtp = hardirq_ctx[smp_processor_id()];
550 if (curtp != irqtp) {
551 irqtp->task = curtp->task;
553 action_ret = call_handle_irq_event(irq, regs, action, irqtp);
556 set_bits(irqtp->flags, &curtp->flags);
559 action_ret = handle_irq_event(irq, regs, action);
561 spin_lock(&desc->lock);
563 note_interrupt(irq, desc, action_ret);
564 if (likely(!(desc->status & IRQ_PENDING)))
566 desc->status &= ~IRQ_PENDING;
569 desc->status &= ~IRQ_INPROGRESS;
571 * The ->end() handler has to deal with interrupts which got
572 * disabled while the handler was running.
575 if (desc->handler->end)
576 desc->handler->end(irq);
577 else if (desc->handler->enable)
578 desc->handler->enable(irq);
580 spin_unlock(&desc->lock);
583 #ifdef CONFIG_PPC_ISERIES
584 void do_IRQ(struct pt_regs *regs)
586 struct paca_struct *lpaca;
587 struct ItLpQueue *lpq;
591 #ifdef CONFIG_DEBUG_STACKOVERFLOW
592 /* Debugging check for stack overflow: is there less than 2KB free? */
596 sp = __get_SP() & (THREAD_SIZE-1);
598 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
599 printk("do_IRQ: stack overflow: %ld\n",
600 sp - sizeof(struct thread_info));
608 if (lpaca->lppaca.xIntDword.xFields.xIpiCnt) {
609 lpaca->lppaca.xIntDword.xFields.xIpiCnt = 0;
610 iSeries_smp_message_recv(regs);
612 #endif /* CONFIG_SMP */
613 lpq = lpaca->lpqueue_ptr;
614 if (lpq && ItLpQueue_isLpIntPending(lpq))
615 lpevent_count += ItLpQueue_process(lpq, regs);
619 if (lpaca->lppaca.xIntDword.xFields.xDecrInt) {
620 lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;
621 /* Signal a fake decrementer interrupt */
622 timer_interrupt(regs);
626 #else /* CONFIG_PPC_ISERIES */
628 void do_IRQ(struct pt_regs *regs)
634 #ifdef CONFIG_DEBUG_STACKOVERFLOW
635 /* Debugging check for stack overflow: is there less than 2KB free? */
639 sp = __get_SP() & (THREAD_SIZE-1);
641 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
642 printk("do_IRQ: stack overflow: %ld\n",
643 sp - sizeof(struct thread_info));
649 irq = ppc_md.get_irq(regs);
652 ppc_irq_dispatch_handler(regs, irq);
654 /* That's not SMP safe ... but who cares ? */
655 ppc_spurious_interrupts++;
659 #endif /* CONFIG_PPC_ISERIES */
661 unsigned long probe_irq_on (void)
666 EXPORT_SYMBOL(probe_irq_on);
668 int probe_irq_off (unsigned long irqs)
673 EXPORT_SYMBOL(probe_irq_off);
675 unsigned int probe_irq_mask(unsigned long irqs)
680 EXPORT_SYMBOL(probe_irq_mask);
682 void __init init_IRQ(void)
695 static struct proc_dir_entry * root_irq_dir;
696 static struct proc_dir_entry * irq_dir [NR_IRQS];
697 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
699 /* Protected by get_irq_desc(irq)->lock. */
700 #ifdef CONFIG_IRQ_ALL_CPUS
701 cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
702 #else /* CONFIG_IRQ_ALL_CPUS */
703 cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };
704 #endif /* CONFIG_IRQ_ALL_CPUS */
706 static int irq_affinity_read_proc (char *page, char **start, off_t off,
707 int count, int *eof, void *data)
709 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
712 len += sprintf(page + len, "\n");
716 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
717 unsigned long count, void *data)
719 unsigned int irq = (long)data;
720 irq_desc_t *desc = get_irq_desc(irq);
722 cpumask_t new_value, tmp;
724 if (!desc->handler->set_affinity)
727 ret = cpumask_parse(buffer, count, new_value);
732 * We check for CPU_MASK_ALL in xics to send irqs to all cpus.
733 * In some cases CPU_MASK_ALL is smaller than the cpumask (eg
734 * NR_CPUS == 32 and cpumask is a long), so we mask it here to
737 cpus_and(new_value, new_value, CPU_MASK_ALL);
740 * Grab lock here so cpu_online_map can't change, and also
741 * protect irq_affinity[].
743 spin_lock(&desc->lock);
746 * Do not allow disabling IRQs completely - it's a too easy
747 * way to make the system unusable accidentally :-) At least
748 * one online CPU still has to be targeted.
750 cpus_and(tmp, new_value, cpu_online_map);
751 if (cpus_empty(tmp)) {
756 irq_affinity[irq] = new_value;
757 desc->handler->set_affinity(irq, new_value);
761 spin_unlock(&desc->lock);
765 #define MAX_NAMELEN 10
767 static void register_irq_proc (unsigned int irq)
769 struct proc_dir_entry *entry;
770 char name [MAX_NAMELEN];
772 if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
775 memset(name, 0, MAX_NAMELEN);
776 sprintf(name, "%d", irq);
778 /* create /proc/irq/1234 */
779 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
781 /* create /proc/irq/1234/smp_affinity */
782 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
786 entry->data = (void *)(long)irq;
787 entry->read_proc = irq_affinity_read_proc;
788 entry->write_proc = irq_affinity_write_proc;
791 smp_affinity_entry[irq] = entry;
794 void init_irq_proc (void)
798 /* create /proc/irq */
799 root_irq_dir = proc_mkdir("irq", NULL);
801 /* create /proc/irq/prof_cpu_mask */
802 create_prof_cpu_mask(root_irq_dir);
805 * Create entries for all existing IRQs.
808 if (get_irq_desc(i)->handler == NULL)
810 register_irq_proc(i);
814 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
819 #ifndef CONFIG_PPC_ISERIES
821 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
824 #define UNDEFINED_IRQ 0xffffffff
825 unsigned int virt_irq_to_real_map[NR_IRQS];
828 * Don't use virtual irqs 0, 1, 2 for devices.
829 * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
830 * and 2 is the XICS IPI interrupt.
831 * We limit virtual irqs to 17 less than NR_IRQS so that when we
832 * offset them by 16 (to reserve the first 16 for ISA interrupts)
833 * we don't end up with an interrupt number >= NR_IRQS.
835 #define MIN_VIRT_IRQ 3
836 #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
837 #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
843 for (i = 0; i < NR_IRQS; i++)
844 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
847 /* Create a mapping for a real_irq if it doesn't already exist.
848 * Return the virtual irq as a convenience.
850 int virt_irq_create_mapping(unsigned int real_irq)
852 unsigned int virq, first_virq;
855 if (naca->interrupt_controller == IC_OPEN_PIC)
856 return real_irq; /* no mapping for openpic (for now) */
858 /* don't map interrupts < MIN_VIRT_IRQ */
859 if (real_irq < MIN_VIRT_IRQ) {
860 virt_irq_to_real_map[real_irq] = real_irq;
864 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
866 if (virq > MAX_VIRT_IRQ)
867 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
869 /* search for this number or a free slot */
871 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
872 if (virt_irq_to_real_map[virq] == real_irq)
874 if (++virq > MAX_VIRT_IRQ)
876 if (virq == first_virq)
877 goto nospace; /* oops, no free slots */
880 virt_irq_to_real_map[virq] = real_irq;
885 printk(KERN_CRIT "Interrupt table is full\n");
886 printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
887 "in your kernel sources and rebuild.\n", NR_IRQS);
894 * In most cases will get a hit on the very first slot checked in the
895 * virt_irq_to_real_map. Only when there are a large number of
896 * IRQs will this be expensive.
898 unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
901 unsigned int first_virq;
905 if (virq > MAX_VIRT_IRQ)
906 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
911 if (virt_irq_to_real_map[virq] == real_irq)
916 if (virq >= MAX_VIRT_IRQ)
919 } while (first_virq != virq);
925 #endif /* CONFIG_PPC_ISERIES */
927 #ifdef CONFIG_IRQSTACKS
928 struct thread_info *softirq_ctx[NR_CPUS];
929 struct thread_info *hardirq_ctx[NR_CPUS];
931 void irq_ctx_init(void)
933 struct thread_info *tp;
937 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
940 tp->preempt_count = SOFTIRQ_OFFSET;
942 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
945 tp->preempt_count = HARDIRQ_OFFSET;
949 void do_softirq(void)
952 struct thread_info *curtp, *irqtp;
957 local_irq_save(flags);
959 if (local_softirq_pending()) {
960 curtp = current_thread_info();
961 irqtp = softirq_ctx[smp_processor_id()];
962 irqtp->task = curtp->task;
963 call_do_softirq(irqtp);
967 local_irq_restore(flags);
969 EXPORT_SYMBOL(do_softirq);
971 #endif /* CONFIG_IRQSTACKS */