2 * arch/ppc/kernel/irq.c
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
9 * Copyright (C) 1996-2001 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
14 * This file contains the code used by various IRQ handling routines:
15 * asking for different IRQ's should be done through these routines
16 * instead of just grabbing them. Thus setups with different IRQ numbers
17 * shouldn't result in any weird surprises, and installing new handlers
20 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
21 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
22 * mask register (of which only 16 are defined), hence the weird shifting
23 * and complement of the cached_irq_mask. I want to be able to stuff
24 * this right into the SIU SMASK register.
25 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
26 * to reduce code space and undefined function references.
29 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/threads.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/signal.h>
34 #include <linux/sched.h>
35 #include <linux/ptrace.h>
36 #include <linux/ioport.h>
37 #include <linux/interrupt.h>
38 #include <linux/timex.h>
39 #include <linux/config.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/seq_file.h>
48 #include <linux/cpumask.h>
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/system.h>
54 #include <asm/pgtable.h>
56 #include <asm/cache.h>
58 #include <asm/ptrace.h>
60 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
62 extern atomic_t ipi_recv;
63 extern atomic_t ipi_sent;
64 void enable_irq(unsigned int irq_nr);
65 void disable_irq(unsigned int irq_nr);
67 static void register_irq_proc (unsigned int irq);
69 #define MAXCOUNT 10000000
71 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
73 .lock = SPIN_LOCK_UNLOCKED
77 int ppc_spurious_interrupts = 0;
78 struct irqaction *ppc_irq_action[NR_IRQS];
79 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
80 unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
81 atomic_t ppc_n_lost_interrupts;
83 /* nasty hack for shared irq's since we need to do kmalloc calls but
84 * can't very early in the boot when we need to do a request irq.
85 * this needs to be removed.
88 #define IRQ_KMALLOC_ENTRIES 8
89 static int cache_bitmask = 0;
90 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
91 extern int mem_init_done;
93 #if defined(CONFIG_TAU_INT)
94 extern int tau_interrupts(unsigned long cpu);
95 extern int tau_initialized;
98 void *irq_kmalloc(size_t size, int pri)
102 return kmalloc(size,pri);
103 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
104 if ( ! ( cache_bitmask & (1<<i) ) )
106 cache_bitmask |= (1<<i);
107 return (void *)(&malloc_cache[i]);
112 void irq_kfree(void *ptr)
115 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
116 if ( ptr == &malloc_cache[i] )
118 cache_bitmask &= ~(1<<i);
125 setup_irq(unsigned int irq, struct irqaction * new)
129 struct irqaction *old, **p;
130 irq_desc_t *desc = irq_desc + irq;
133 * Some drivers like serial.c use request_irq() heavily,
134 * so we have to be careful not to interfere with a
137 if (new->flags & SA_SAMPLE_RANDOM) {
139 * This function might sleep, we want to call it first,
140 * outside of the atomic block.
141 * Yes, this might clear the entropy pool if the wrong
142 * driver is attempted to be loaded, without actually
143 * installing a new handler, but is this really a problem,
144 * only the sysadmin is able to do this.
146 rand_initialize_irq(irq);
150 * The following block of code has to be executed atomically
152 spin_lock_irqsave(&desc->lock,flags);
154 if ((old = *p) != NULL) {
155 /* Can't share interrupts unless both agree to */
156 if (!(old->flags & new->flags & SA_SHIRQ)) {
157 spin_unlock_irqrestore(&desc->lock,flags);
161 /* add new interrupt at end of irq queue */
173 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
175 if (desc->handler->startup)
176 desc->handler->startup(irq);
177 else if (desc->handler->enable)
178 desc->handler->enable(irq);
181 spin_unlock_irqrestore(&desc->lock,flags);
183 register_irq_proc(irq);
187 void free_irq(unsigned int irq, void* dev_id)
190 struct irqaction **p;
193 desc = irq_desc + irq;
194 spin_lock_irqsave(&desc->lock,flags);
197 struct irqaction * action = *p;
199 struct irqaction **pp = p;
201 if (action->dev_id != dev_id)
204 /* Found it - now remove it from the list of entries */
207 desc->status |= IRQ_DISABLED;
210 spin_unlock_irqrestore(&desc->lock,flags);
212 synchronize_irq(irq);
216 printk("Trying to free free IRQ%d\n",irq);
217 spin_unlock_irqrestore(&desc->lock,flags);
223 EXPORT_SYMBOL(free_irq);
225 int request_irq(unsigned int irq,
226 irqreturn_t (*handler)(int, void *, struct pt_regs *),
227 unsigned long irqflags, const char * devname, void *dev_id)
229 struct irqaction *action;
235 printk(KERN_ERR "request_irq called with NULL handler!\n");
240 action = (struct irqaction *)
241 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
243 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
247 action->handler = handler;
248 action->flags = irqflags;
249 cpus_clear(action->mask);
250 action->name = devname;
251 action->dev_id = dev_id;
254 retval = setup_irq(irq, action);
263 EXPORT_SYMBOL(request_irq);
266 * Generic enable/disable code: this just calls
267 * down into the PIC-specific version for the actual
268 * hardware disable after having gotten the irq
273 * disable_irq_nosync - disable an irq without waiting
274 * @irq: Interrupt to disable
276 * Disable the selected interrupt line. Disables of an interrupt
277 * stack. Unlike disable_irq(), this function does not ensure existing
278 * instances of the IRQ handler have completed before returning.
280 * This function may be called from IRQ context.
283 void disable_irq_nosync(unsigned int irq)
285 irq_desc_t *desc = irq_desc + irq;
288 spin_lock_irqsave(&desc->lock, flags);
289 if (!desc->depth++) {
290 if (!(desc->status & IRQ_PER_CPU))
291 desc->status |= IRQ_DISABLED;
294 spin_unlock_irqrestore(&desc->lock, flags);
298 * disable_irq - disable an irq and wait for completion
299 * @irq: Interrupt to disable
301 * Disable the selected interrupt line. Disables of an interrupt
302 * stack. That is for two disables you need two enables. This
303 * function waits for any pending IRQ handlers for this interrupt
304 * to complete before returning. If you use this function while
305 * holding a resource the IRQ handler may need you will deadlock.
307 * This function may be called - with care - from IRQ context.
310 void disable_irq(unsigned int irq)
312 irq_desc_t *desc = irq_desc + irq;
313 disable_irq_nosync(irq);
315 synchronize_irq(irq);
319 * enable_irq - enable interrupt handling on an irq
320 * @irq: Interrupt to enable
322 * Re-enables the processing of interrupts on this IRQ line
323 * providing no disable_irq calls are now in effect.
325 * This function may be called from IRQ context.
328 void enable_irq(unsigned int irq)
330 irq_desc_t *desc = irq_desc + irq;
333 spin_lock_irqsave(&desc->lock, flags);
334 switch (desc->depth) {
336 unsigned int status = desc->status & ~IRQ_DISABLED;
337 desc->status = status;
338 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
339 desc->status = status | IRQ_REPLAY;
340 hw_resend_irq(desc->handler,irq);
349 printk("enable_irq(%u) unbalanced\n", irq);
351 spin_unlock_irqrestore(&desc->lock, flags);
354 int show_interrupts(struct seq_file *p, void *v)
356 int i = *(loff_t *) v, j;
357 struct irqaction * action;
362 for (j=0; j<NR_CPUS; j++)
364 seq_printf(p, "CPU%d ", j);
369 spin_lock_irqsave(&irq_desc[i].lock, flags);
370 action = irq_desc[i].action;
371 if ( !action || !action->handler )
373 seq_printf(p, "%3d: ", i);
375 for (j = 0; j < NR_CPUS; j++)
377 seq_printf(p, "%10u ",
378 kstat_cpu(j).irqs[i]);
380 seq_printf(p, "%10u ", kstat_irqs(i));
381 #endif /* CONFIG_SMP */
382 if (irq_desc[i].handler)
383 seq_printf(p, " %s ", irq_desc[i].handler->typename);
385 seq_puts(p, " None ");
386 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
387 seq_printf(p, " %s", action->name);
388 for (action = action->next; action; action = action->next)
389 seq_printf(p, ", %s", action->name);
392 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
393 } else if (i == NR_IRQS) {
394 #ifdef CONFIG_TAU_INT
395 if (tau_initialized){
396 seq_puts(p, "TAU: ");
397 for (j = 0; j < NR_CPUS; j++)
399 seq_printf(p, "%10u ", tau_interrupts(j));
400 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
404 /* should this be per processor send/receive? */
405 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
406 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
408 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
414 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
418 if (!(action->flags & SA_INTERRUPT))
422 status |= action->flags;
423 action->handler(irq, action->dev_id, regs);
424 action = action->next;
426 if (status & SA_SAMPLE_RANDOM)
427 add_interrupt_randomness(irq);
432 * Eventually, this should take an array of interrupts and an array size
433 * so it can dispatch multiple interrupts.
435 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
438 struct irqaction *action;
439 irq_desc_t *desc = irq_desc + irq;
441 kstat_this_cpu.irqs[irq]++;
442 spin_lock(&desc->lock);
445 REPLAY is when Linux resends an IRQ that was dropped earlier
446 WAITING is used by probe to mark irqs that are being tested
448 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
449 if (!(status & IRQ_PER_CPU))
450 status |= IRQ_PENDING; /* we _want_ to handle it */
453 * If the IRQ is disabled for whatever reason, we cannot
454 * use the action we have.
457 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
458 action = desc->action;
459 if (!action || !action->handler) {
460 ppc_spurious_interrupts++;
461 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
462 /* We can't call disable_irq here, it would deadlock */
464 desc->status |= IRQ_DISABLED;
466 /* This is a real interrupt, we have to eoi it,
470 status &= ~IRQ_PENDING; /* we commit to handling */
471 if (!(status & IRQ_PER_CPU))
472 status |= IRQ_INPROGRESS; /* we are handling it */
474 desc->status = status;
477 * If there is no IRQ handler or it was disabled, exit early.
478 Since we set PENDING, if another processor is handling
479 a different instance of this same irq, the other processor
480 will take care of it.
482 if (unlikely(!action))
487 * Edge triggered interrupts need to remember
489 * This applies to any hw interrupts that allow a second
490 * instance of the same irq to arrive while we are in do_IRQ
491 * or in the handler. But the code here only handles the _second_
492 * instance of the irq, not the third or fourth. So it is mostly
493 * useful for irq hardware that does not mask cleanly in an
497 spin_unlock(&desc->lock);
498 handle_irq_event(irq, regs, action);
499 spin_lock(&desc->lock);
501 if (likely(!(desc->status & IRQ_PENDING)))
503 desc->status &= ~IRQ_PENDING;
506 desc->status &= ~IRQ_INPROGRESS;
508 * The ->end() handler has to deal with interrupts which got
509 * disabled while the handler was running.
511 if (irq_desc[irq].handler) {
512 if (irq_desc[irq].handler->end)
513 irq_desc[irq].handler->end(irq);
514 else if (irq_desc[irq].handler->enable)
515 irq_desc[irq].handler->enable(irq);
517 spin_unlock(&desc->lock);
520 void do_IRQ(struct pt_regs *regs)
526 * Every platform is required to implement ppc_md.get_irq.
527 * This function will either return an irq number or -1 to
528 * indicate there are no more pending. But the first time
529 * through the loop this means there wasn't and IRQ pending.
530 * The value -2 is for buggy hardware and means that this IRQ
531 * has already been handled. -- Tom
533 while ((irq = ppc_md.get_irq(regs)) >= 0) {
534 ppc_irq_dispatch_handler(regs, irq);
537 if (irq != -2 && first)
538 /* That's not SMP safe ... but who cares ? */
539 ppc_spurious_interrupts++;
543 unsigned long probe_irq_on (void)
548 EXPORT_SYMBOL(probe_irq_on);
550 int probe_irq_off (unsigned long irqs)
555 EXPORT_SYMBOL(probe_irq_off);
557 unsigned int probe_irq_mask(unsigned long irqs)
563 void synchronize_irq(unsigned int irq)
565 while (irq_desc[irq].status & IRQ_INPROGRESS)
568 #endif /* CONFIG_SMP */
570 static struct proc_dir_entry *root_irq_dir;
571 static struct proc_dir_entry *irq_dir[NR_IRQS];
572 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
574 #ifdef CONFIG_IRQ_ALL_CPUS
575 #define DEFAULT_CPU_AFFINITY CPU_MASK_ALL
577 #define DEFAULT_CPU_AFFINITY cpumask_of_cpu(0)
580 cpumask_t irq_affinity [NR_IRQS];
582 static int irq_affinity_read_proc (char *page, char **start, off_t off,
583 int count, int *eof, void *data)
585 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
588 len += sprintf(page + len, "\n");
592 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
593 unsigned long count, void *data)
595 int irq = (int) data, full_count = count, err;
596 cpumask_t new_value, tmp;
598 if (!irq_desc[irq].handler->set_affinity)
601 err = cpumask_parse(buffer, count, new_value);
604 * Do not allow disabling IRQs completely - it's a too easy
605 * way to make the system unusable accidentally :-) At least
606 * one online CPU still has to be targeted.
608 * We assume a 1-1 logical<->physical cpu mapping here. If
609 * we assume that the cpu indices in /proc/irq/../smp_affinity
610 * are actually logical cpu #'s then we have no problem.
611 * -- Cort <cort@fsmlabs.com>
613 cpus_and(tmp, new_value, cpu_online_map);
617 irq_affinity[irq] = new_value;
618 irq_desc[irq].handler->set_affinity(irq, new_value);
623 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
624 int count, int *eof, void *data)
626 int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
629 len += sprintf(page + len, "\n");
633 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
634 unsigned long count, void *data)
637 int full_count = count;
638 cpumask_t *mask = (cpumask_t *)data;
641 err = cpumask_parse(buffer, count, new_value);
649 #define MAX_NAMELEN 10
651 static void register_irq_proc (unsigned int irq)
653 struct proc_dir_entry *entry;
654 char name [MAX_NAMELEN];
656 if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
659 memset(name, 0, MAX_NAMELEN);
660 sprintf(name, "%d", irq);
662 /* create /proc/irq/1234 */
663 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
665 /* create /proc/irq/1234/smp_affinity */
666 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
669 entry->data = (void *)irq;
670 entry->read_proc = irq_affinity_read_proc;
671 entry->write_proc = irq_affinity_write_proc;
673 smp_affinity_entry[irq] = entry;
676 unsigned long prof_cpu_mask = -1;
678 void init_irq_proc (void)
680 struct proc_dir_entry *entry;
683 /* create /proc/irq */
684 root_irq_dir = proc_mkdir("irq", NULL);
686 /* create /proc/irq/prof_cpu_mask */
687 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
690 entry->data = (void *)&prof_cpu_mask;
691 entry->read_proc = prof_cpu_mask_read_proc;
692 entry->write_proc = prof_cpu_mask_write_proc;
695 * Create entries for all existing IRQs.
697 for (i = 0; i < NR_IRQS; i++) {
698 if (irq_desc[i].handler == NULL)
700 register_irq_proc(i);
704 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
709 void __init init_IRQ(void)
713 for (i = 0; i < NR_IRQS; ++i)
714 irq_affinity[i] = DEFAULT_CPU_AFFINITY;