ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / ppc / kernel / irq.c
1 /*
2  *  arch/ppc/kernel/irq.c
3  *
4  *  Derived from arch/i386/kernel/irq.c
5  *    Copyright (C) 1992 Linus Torvalds
6  *  Adapted from arch/i386 by Gary Thomas
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
9  *    Copyright (C) 1996-2001 Cort Dougan
10  *  Adapted for Power Macintosh by Paul Mackerras
11  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13  *
14  * This file contains the code used by various IRQ handling routines:
15  * asking for different IRQ's should be done through these routines
16  * instead of just grabbing them. Thus setups with different IRQ numbers
17  * shouldn't result in any weird surprises, and installing new handlers
18  * should be easier.
19  *
20  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
21  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
22  * mask register (of which only 16 are defined), hence the weird shifting
23  * and complement of the cached_irq_mask.  I want to be able to stuff
24  * this right into the SIU SMASK register.
25  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
26  * to reduce code space and undefined function references.
27  */
28
29 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/threads.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/signal.h>
34 #include <linux/sched.h>
35 #include <linux/ptrace.h>
36 #include <linux/ioport.h>
37 #include <linux/interrupt.h>
38 #include <linux/timex.h>
39 #include <linux/config.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/seq_file.h>
48 #include <linux/cpumask.h>
49
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/system.h>
53 #include <asm/io.h>
54 #include <asm/pgtable.h>
55 #include <asm/irq.h>
56 #include <asm/cache.h>
57 #include <asm/prom.h>
58 #include <asm/ptrace.h>
59
60 #define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
61
62 extern atomic_t ipi_recv;
63 extern atomic_t ipi_sent;
64 void enable_irq(unsigned int irq_nr);
65 void disable_irq(unsigned int irq_nr);
66
67 static void register_irq_proc (unsigned int irq);
68
69 #define MAXCOUNT 10000000
70
71 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
72         [0 ... NR_IRQS-1] = {
73                 .lock = SPIN_LOCK_UNLOCKED
74         }
75 };
76
77 int ppc_spurious_interrupts = 0;
78 struct irqaction *ppc_irq_action[NR_IRQS];
79 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
80 unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
81 atomic_t ppc_n_lost_interrupts;
82
83 /* nasty hack for shared irq's since we need to do kmalloc calls but
84  * can't very early in the boot when we need to do a request irq.
85  * this needs to be removed.
86  * -- Cort
87  */
88 #define IRQ_KMALLOC_ENTRIES 8
89 static int cache_bitmask = 0;
90 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
91 extern int mem_init_done;
92
93 #if defined(CONFIG_TAU_INT)
94 extern int tau_interrupts(unsigned long cpu);
95 extern int tau_initialized;
96 #endif
97
98 void *irq_kmalloc(size_t size, int pri)
99 {
100         unsigned int i;
101         if ( mem_init_done )
102                 return kmalloc(size,pri);
103         for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
104                 if ( ! ( cache_bitmask & (1<<i) ) )
105                 {
106                         cache_bitmask |= (1<<i);
107                         return (void *)(&malloc_cache[i]);
108                 }
109         return 0;
110 }
111
112 void irq_kfree(void *ptr)
113 {
114         unsigned int i;
115         for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
116                 if ( ptr == &malloc_cache[i] )
117                 {
118                         cache_bitmask &= ~(1<<i);
119                         return;
120                 }
121         kfree(ptr);
122 }
123
124 int
125 setup_irq(unsigned int irq, struct irqaction * new)
126 {
127         int shared = 0;
128         unsigned long flags;
129         struct irqaction *old, **p;
130         irq_desc_t *desc = irq_desc + irq;
131
132         /*
133          * Some drivers like serial.c use request_irq() heavily,
134          * so we have to be careful not to interfere with a
135          * running system.
136          */
137         if (new->flags & SA_SAMPLE_RANDOM) {
138                 /*
139                  * This function might sleep, we want to call it first,
140                  * outside of the atomic block.
141                  * Yes, this might clear the entropy pool if the wrong
142                  * driver is attempted to be loaded, without actually
143                  * installing a new handler, but is this really a problem,
144                  * only the sysadmin is able to do this.
145                  */
146                 rand_initialize_irq(irq);
147         }
148
149         /*
150          * The following block of code has to be executed atomically
151          */
152         spin_lock_irqsave(&desc->lock,flags);
153         p = &desc->action;
154         if ((old = *p) != NULL) {
155                 /* Can't share interrupts unless both agree to */
156                 if (!(old->flags & new->flags & SA_SHIRQ)) {
157                         spin_unlock_irqrestore(&desc->lock,flags);
158                         return -EBUSY;
159                 }
160
161                 /* add new interrupt at end of irq queue */
162                 do {
163                         p = &old->next;
164                         old = *p;
165                 } while (old);
166                 shared = 1;
167         }
168
169         *p = new;
170
171         if (!shared) {
172                 desc->depth = 0;
173                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
174                 unmask_irq(irq);
175         }
176         spin_unlock_irqrestore(&desc->lock,flags);
177
178         register_irq_proc(irq);
179         return 0;
180 }
181
182 void free_irq(unsigned int irq, void* dev_id)
183 {
184         irq_desc_t *desc;
185         struct irqaction **p;
186         unsigned long flags;
187
188         desc = irq_desc + irq;
189         spin_lock_irqsave(&desc->lock,flags);
190         p = &desc->action;
191         for (;;) {
192                 struct irqaction * action = *p;
193                 if (action) {
194                         struct irqaction **pp = p;
195                         p = &action->next;
196                         if (action->dev_id != dev_id)
197                                 continue;
198
199                         /* Found it - now remove it from the list of entries */
200                         *pp = action->next;
201                         if (!desc->action) {
202                                 desc->status |= IRQ_DISABLED;
203                                 mask_irq(irq);
204                         }
205                         spin_unlock_irqrestore(&desc->lock,flags);
206
207                         synchronize_irq(irq);
208                         irq_kfree(action);
209                         return;
210                 }
211                 printk("Trying to free free IRQ%d\n",irq);
212                 spin_unlock_irqrestore(&desc->lock,flags);
213                 break;
214         }
215         return;
216 }
217
218 EXPORT_SYMBOL(free_irq);
219
220 int request_irq(unsigned int irq,
221         irqreturn_t (*handler)(int, void *, struct pt_regs *),
222         unsigned long irqflags, const char * devname, void *dev_id)
223 {
224         struct irqaction *action;
225         int retval;
226
227         if (irq >= NR_IRQS)
228                 return -EINVAL;
229         if (!handler) {
230                 printk(KERN_ERR "request_irq called with NULL handler!\n");
231                 dump_stack();
232                 return 0;
233         }
234
235         action = (struct irqaction *)
236                 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
237         if (!action) {
238                 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
239                 return -ENOMEM;
240         }
241
242         action->handler = handler;
243         action->flags = irqflags;                       
244         action->mask = 0;
245         action->name = devname;
246         action->dev_id = dev_id;
247         action->next = NULL;
248
249         retval = setup_irq(irq, action);
250         if (retval) {
251                 kfree(action);
252                 return retval;
253         }
254
255         return 0;
256 }
257
258 EXPORT_SYMBOL(request_irq);
259
260 /*
261  * Generic enable/disable code: this just calls
262  * down into the PIC-specific version for the actual
263  * hardware disable after having gotten the irq
264  * controller lock.
265  */
266
267 /**
268  *      disable_irq_nosync - disable an irq without waiting
269  *      @irq: Interrupt to disable
270  *
271  *      Disable the selected interrupt line. Disables of an interrupt
272  *      stack. Unlike disable_irq(), this function does not ensure existing
273  *      instances of the IRQ handler have completed before returning.
274  *
275  *      This function may be called from IRQ context.
276  */
277
278 void disable_irq_nosync(unsigned int irq)
279 {
280         irq_desc_t *desc = irq_desc + irq;
281         unsigned long flags;
282
283         spin_lock_irqsave(&desc->lock, flags);
284         if (!desc->depth++) {
285                 if (!(desc->status & IRQ_PER_CPU))
286                         desc->status |= IRQ_DISABLED;
287                 mask_irq(irq);
288         }
289         spin_unlock_irqrestore(&desc->lock, flags);
290 }
291
292 /**
293  *      disable_irq - disable an irq and wait for completion
294  *      @irq: Interrupt to disable
295  *
296  *      Disable the selected interrupt line. Disables of an interrupt
297  *      stack. That is for two disables you need two enables. This
298  *      function waits for any pending IRQ handlers for this interrupt
299  *      to complete before returning. If you use this function while
300  *      holding a resource the IRQ handler may need you will deadlock.
301  *
302  *      This function may be called - with care - from IRQ context.
303  */
304
305 void disable_irq(unsigned int irq)
306 {
307         disable_irq_nosync(irq);
308         synchronize_irq(irq);
309 }
310
311 /**
312  *      enable_irq - enable interrupt handling on an irq
313  *      @irq: Interrupt to enable
314  *
315  *      Re-enables the processing of interrupts on this IRQ line
316  *      providing no disable_irq calls are now in effect.
317  *
318  *      This function may be called from IRQ context.
319  */
320
321 void enable_irq(unsigned int irq)
322 {
323         irq_desc_t *desc = irq_desc + irq;
324         unsigned long flags;
325
326         spin_lock_irqsave(&desc->lock, flags);
327         switch (desc->depth) {
328         case 1: {
329                 unsigned int status = desc->status & ~IRQ_DISABLED;
330                 desc->status = status;
331                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
332                         desc->status = status | IRQ_REPLAY;
333                         hw_resend_irq(desc->handler,irq);
334                 }
335                 unmask_irq(irq);
336                 /* fall-through */
337         }
338         default:
339                 desc->depth--;
340                 break;
341         case 0:
342                 printk("enable_irq(%u) unbalanced\n", irq);
343         }
344         spin_unlock_irqrestore(&desc->lock, flags);
345 }
346
347 int show_interrupts(struct seq_file *p, void *v)
348 {
349         int i = *(loff_t *) v, j;
350         struct irqaction * action;
351         unsigned long flags;
352
353         if (i == 0) {
354                 seq_puts(p, "           ");
355                 for (j=0; j<NR_CPUS; j++)
356                         if (cpu_online(j))
357                                 seq_printf(p, "CPU%d       ", j);
358                 seq_putc(p, '\n');
359         }
360
361         if (i < NR_IRQS) {
362                 spin_lock_irqsave(&irq_desc[i].lock, flags);
363                 action = irq_desc[i].action;
364                 if ( !action || !action->handler )
365                         goto skip;
366                 seq_printf(p, "%3d: ", i);
367 #ifdef CONFIG_SMP
368                 for (j = 0; j < NR_CPUS; j++)
369                         if (cpu_online(j))
370                                 seq_printf(p, "%10u ",
371                                            kstat_cpu(j).irqs[i]);
372 #else
373                 seq_printf(p, "%10u ", kstat_irqs(i));
374 #endif /* CONFIG_SMP */
375                 if (irq_desc[i].handler)
376                         seq_printf(p, " %s ", irq_desc[i].handler->typename);
377                 else
378                         seq_puts(p, "  None      ");
379                 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge  ");
380                 seq_printf(p, "    %s", action->name);
381                 for (action = action->next; action; action = action->next)
382                         seq_printf(p, ", %s", action->name);
383                 seq_putc(p, '\n');
384 skip:
385                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
386         } else if (i == NR_IRQS) {
387 #ifdef CONFIG_TAU_INT
388                 if (tau_initialized){
389                         seq_puts(p, "TAU: ");
390                         for (j = 0; j < NR_CPUS; j++)
391                                 if (cpu_online(j))
392                                         seq_printf(p, "%10u ", tau_interrupts(j));
393                         seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
394                 }
395 #endif
396 #ifdef CONFIG_SMP
397                 /* should this be per processor send/receive? */
398                 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
399                                 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
400 #endif
401                 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
402         }
403         return 0;
404 }
405
406 static inline void
407 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
408 {
409         int status = 0;
410
411         if (!(action->flags & SA_INTERRUPT))
412                 local_irq_enable();
413
414         do {
415                 status |= action->flags;
416                 action->handler(irq, action->dev_id, regs);
417                 action = action->next;
418         } while (action);
419         if (status & SA_SAMPLE_RANDOM)
420                 add_interrupt_randomness(irq);
421         local_irq_disable();
422 }
423
424 /*
425  * Eventually, this should take an array of interrupts and an array size
426  * so it can dispatch multiple interrupts.
427  */
428 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
429 {
430         int status;
431         struct irqaction *action;
432         irq_desc_t *desc = irq_desc + irq;
433
434         kstat_this_cpu.irqs[irq]++;
435         spin_lock(&desc->lock);
436         ack_irq(irq);
437         /*
438            REPLAY is when Linux resends an IRQ that was dropped earlier
439            WAITING is used by probe to mark irqs that are being tested
440            */
441         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
442         if (!(status & IRQ_PER_CPU))
443                 status |= IRQ_PENDING; /* we _want_ to handle it */
444
445         /*
446          * If the IRQ is disabled for whatever reason, we cannot
447          * use the action we have.
448          */
449         action = NULL;
450         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
451                 action = desc->action;
452                 if (!action || !action->handler) {
453                         ppc_spurious_interrupts++;
454                         printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
455                         /* We can't call disable_irq here, it would deadlock */
456                         ++desc->depth;
457                         desc->status |= IRQ_DISABLED;
458                         mask_irq(irq);
459                         /* This is a real interrupt, we have to eoi it,
460                            so we jump to out */
461                         goto out;
462                 }
463                 status &= ~IRQ_PENDING; /* we commit to handling */
464                 if (!(status & IRQ_PER_CPU))
465                         status |= IRQ_INPROGRESS; /* we are handling it */
466         }
467         desc->status = status;
468
469         /*
470          * If there is no IRQ handler or it was disabled, exit early.
471            Since we set PENDING, if another processor is handling
472            a different instance of this same irq, the other processor
473            will take care of it.
474          */
475         if (unlikely(!action))
476                 goto out;
477
478
479         /*
480          * Edge triggered interrupts need to remember
481          * pending events.
482          * This applies to any hw interrupts that allow a second
483          * instance of the same irq to arrive while we are in do_IRQ
484          * or in the handler. But the code here only handles the _second_
485          * instance of the irq, not the third or fourth. So it is mostly
486          * useful for irq hardware that does not mask cleanly in an
487          * SMP environment.
488          */
489         for (;;) {
490                 spin_unlock(&desc->lock);
491                 handle_irq_event(irq, regs, action);
492                 spin_lock(&desc->lock);
493
494                 if (likely(!(desc->status & IRQ_PENDING)))
495                         break;
496                 desc->status &= ~IRQ_PENDING;
497         }
498 out:
499         desc->status &= ~IRQ_INPROGRESS;
500         /*
501          * The ->end() handler has to deal with interrupts which got
502          * disabled while the handler was running.
503          */
504         if (irq_desc[irq].handler) {
505                 if (irq_desc[irq].handler->end)
506                         irq_desc[irq].handler->end(irq);
507                 else if (irq_desc[irq].handler->enable)
508                         irq_desc[irq].handler->enable(irq);
509         }
510         spin_unlock(&desc->lock);
511 }
512
513 void do_IRQ(struct pt_regs *regs)
514 {
515         int irq, first = 1;
516         irq_enter();
517
518         /*
519          * Every platform is required to implement ppc_md.get_irq.
520          * This function will either return an irq number or -1 to
521          * indicate there are no more pending.  But the first time
522          * through the loop this means there wasn't and IRQ pending.
523          * The value -2 is for buggy hardware and means that this IRQ
524          * has already been handled. -- Tom
525          */
526         while ((irq = ppc_md.get_irq(regs)) >= 0) {
527                 ppc_irq_dispatch_handler(regs, irq);
528                 first = 0;
529         }
530         if (irq != -2 && first)
531                 /* That's not SMP safe ... but who cares ? */
532                 ppc_spurious_interrupts++;
533         irq_exit();
534 }
535
536 unsigned long probe_irq_on (void)
537 {
538         return 0;
539 }
540
541 EXPORT_SYMBOL(probe_irq_on);
542
543 int probe_irq_off (unsigned long irqs)
544 {
545         return 0;
546 }
547
548 EXPORT_SYMBOL(probe_irq_off);
549
550 unsigned int probe_irq_mask(unsigned long irqs)
551 {
552         return 0;
553 }
554
555 #ifdef CONFIG_SMP
556 void synchronize_irq(unsigned int irq)
557 {
558         while (irq_desc[irq].status & IRQ_INPROGRESS)
559                 barrier();
560 }
561 #endif /* CONFIG_SMP */
562
563 static struct proc_dir_entry *root_irq_dir;
564 static struct proc_dir_entry *irq_dir[NR_IRQS];
565 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
566
567 #ifdef CONFIG_IRQ_ALL_CPUS
568 #define DEFAULT_CPU_AFFINITY CPU_MASK_ALL
569 #else
570 #define DEFAULT_CPU_AFFINITY cpumask_of_cpu(0)
571 #endif
572
573 cpumask_t irq_affinity [NR_IRQS];
574
575 static int irq_affinity_read_proc (char *page, char **start, off_t off,
576                         int count, int *eof, void *data)
577 {
578         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
579         if (count - len < 2)
580                 return -EINVAL;
581         len += sprintf(page + len, "\n");
582         return len;
583 }
584
585 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
586                                         unsigned long count, void *data)
587 {
588         int irq = (int) data, full_count = count, err;
589         cpumask_t new_value, tmp;
590
591         if (!irq_desc[irq].handler->set_affinity)
592                 return -EIO;
593
594         err = cpumask_parse(buffer, count, new_value);
595
596         /*
597          * Do not allow disabling IRQs completely - it's a too easy
598          * way to make the system unusable accidentally :-) At least
599          * one online CPU still has to be targeted.
600          *
601          * We assume a 1-1 logical<->physical cpu mapping here.  If
602          * we assume that the cpu indices in /proc/irq/../smp_affinity
603          * are actually logical cpu #'s then we have no problem.
604          *  -- Cort <cort@fsmlabs.com>
605          */
606         cpus_and(tmp, new_value, cpu_online_map);
607         if (cpus_empty(tmp))
608                 return -EINVAL;
609
610         irq_affinity[irq] = new_value;
611         irq_desc[irq].handler->set_affinity(irq, new_value);
612
613         return full_count;
614 }
615
616 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
617                         int count, int *eof, void *data)
618 {
619         int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
620         if (count - len < 2)
621                 return -EINVAL;
622         len += sprintf(page + len, "\n");
623         return len;
624 }
625
626 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
627                                         unsigned long count, void *data)
628 {
629         cpumask_t *mask = (cpumask_t *)data, full_count = count, err;
630         cpumask_t new_value;
631
632         err = cpumask_parse(buffer, count, new_value);
633         if (err)
634                 return err;
635
636         *mask = new_value;
637         return full_count;
638 }
639
640 #define MAX_NAMELEN 10
641
642 static void register_irq_proc (unsigned int irq)
643 {
644         struct proc_dir_entry *entry;
645         char name [MAX_NAMELEN];
646
647         if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
648                 return;
649
650         memset(name, 0, MAX_NAMELEN);
651         sprintf(name, "%d", irq);
652
653         /* create /proc/irq/1234 */
654         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
655
656         /* create /proc/irq/1234/smp_affinity */
657         entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
658
659         entry->nlink = 1;
660         entry->data = (void *)irq;
661         entry->read_proc = irq_affinity_read_proc;
662         entry->write_proc = irq_affinity_write_proc;
663
664         smp_affinity_entry[irq] = entry;
665 }
666
667 unsigned long prof_cpu_mask = -1;
668
669 void init_irq_proc (void)
670 {
671         struct proc_dir_entry *entry;
672         int i;
673
674         /* create /proc/irq */
675         root_irq_dir = proc_mkdir("irq", 0);
676
677         /* create /proc/irq/prof_cpu_mask */
678         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
679
680         entry->nlink = 1;
681         entry->data = (void *)&prof_cpu_mask;
682         entry->read_proc = prof_cpu_mask_read_proc;
683         entry->write_proc = prof_cpu_mask_write_proc;
684
685         /*
686          * Create entries for all existing IRQs.
687          */
688         for (i = 0; i < NR_IRQS; i++) {
689                 if (irq_desc[i].handler == NULL)
690                         continue;
691                 register_irq_proc(i);
692         }
693 }
694
695 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
696 {
697         return IRQ_NONE;
698 }
699
700 void __init init_IRQ(void)
701 {
702         int i;
703
704         for (i = 0; i < NR_IRQS; ++i)
705                 irq_affinity[i] = DEFAULT_CPU_AFFINITY;
706
707         ppc_md.init_IRQ();
708 }