patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / ppc / kernel / irq.c
1 /*
2  *  arch/ppc/kernel/irq.c
3  *
4  *  Derived from arch/i386/kernel/irq.c
5  *    Copyright (C) 1992 Linus Torvalds
6  *  Adapted from arch/i386 by Gary Thomas
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
9  *    Copyright (C) 1996-2001 Cort Dougan
10  *  Adapted for Power Macintosh by Paul Mackerras
11  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13  *
14  * This file contains the code used by various IRQ handling routines:
15  * asking for different IRQ's should be done through these routines
16  * instead of just grabbing them. Thus setups with different IRQ numbers
17  * shouldn't result in any weird surprises, and installing new handlers
18  * should be easier.
19  *
20  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
21  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
22  * mask register (of which only 16 are defined), hence the weird shifting
23  * and complement of the cached_irq_mask.  I want to be able to stuff
24  * this right into the SIU SMASK register.
25  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
26  * to reduce code space and undefined function references.
27  */
28
29 #include <linux/errno.h>
30 #include <linux/module.h>
31 #include <linux/threads.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/signal.h>
34 #include <linux/sched.h>
35 #include <linux/ptrace.h>
36 #include <linux/ioport.h>
37 #include <linux/interrupt.h>
38 #include <linux/timex.h>
39 #include <linux/config.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/pci.h>
43 #include <linux/delay.h>
44 #include <linux/irq.h>
45 #include <linux/proc_fs.h>
46 #include <linux/random.h>
47 #include <linux/seq_file.h>
48 #include <linux/cpumask.h>
49
50 #include <asm/uaccess.h>
51 #include <asm/bitops.h>
52 #include <asm/system.h>
53 #include <asm/io.h>
54 #include <asm/pgtable.h>
55 #include <asm/irq.h>
56 #include <asm/cache.h>
57 #include <asm/prom.h>
58 #include <asm/ptrace.h>
59
60 #define NR_MASK_WORDS   ((NR_IRQS + 31) / 32)
61
62 extern atomic_t ipi_recv;
63 extern atomic_t ipi_sent;
64 void enable_irq(unsigned int irq_nr);
65 void disable_irq(unsigned int irq_nr);
66
67 static void register_irq_proc (unsigned int irq);
68
69 #define MAXCOUNT 10000000
70
71 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
72         [0 ... NR_IRQS-1] = {
73                 .lock = SPIN_LOCK_UNLOCKED
74         }
75 };
76
77 int ppc_spurious_interrupts = 0;
78 struct irqaction *ppc_irq_action[NR_IRQS];
79 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
80 unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
81 atomic_t ppc_n_lost_interrupts;
82
83 /* nasty hack for shared irq's since we need to do kmalloc calls but
84  * can't very early in the boot when we need to do a request irq.
85  * this needs to be removed.
86  * -- Cort
87  */
88 #define IRQ_KMALLOC_ENTRIES 8
89 static int cache_bitmask = 0;
90 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
91 extern int mem_init_done;
92
93 #if defined(CONFIG_TAU_INT)
94 extern int tau_interrupts(unsigned long cpu);
95 extern int tau_initialized;
96 #endif
97
98 void *irq_kmalloc(size_t size, int pri)
99 {
100         unsigned int i;
101         if ( mem_init_done )
102                 return kmalloc(size,pri);
103         for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
104                 if ( ! ( cache_bitmask & (1<<i) ) )
105                 {
106                         cache_bitmask |= (1<<i);
107                         return (void *)(&malloc_cache[i]);
108                 }
109         return 0;
110 }
111
112 void irq_kfree(void *ptr)
113 {
114         unsigned int i;
115         for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
116                 if ( ptr == &malloc_cache[i] )
117                 {
118                         cache_bitmask &= ~(1<<i);
119                         return;
120                 }
121         kfree(ptr);
122 }
123
124 int
125 setup_irq(unsigned int irq, struct irqaction * new)
126 {
127         int shared = 0;
128         unsigned long flags;
129         struct irqaction *old, **p;
130         irq_desc_t *desc = irq_desc + irq;
131
132         /*
133          * Some drivers like serial.c use request_irq() heavily,
134          * so we have to be careful not to interfere with a
135          * running system.
136          */
137         if (new->flags & SA_SAMPLE_RANDOM) {
138                 /*
139                  * This function might sleep, we want to call it first,
140                  * outside of the atomic block.
141                  * Yes, this might clear the entropy pool if the wrong
142                  * driver is attempted to be loaded, without actually
143                  * installing a new handler, but is this really a problem,
144                  * only the sysadmin is able to do this.
145                  */
146                 rand_initialize_irq(irq);
147         }
148
149         /*
150          * The following block of code has to be executed atomically
151          */
152         spin_lock_irqsave(&desc->lock,flags);
153         p = &desc->action;
154         if ((old = *p) != NULL) {
155                 /* Can't share interrupts unless both agree to */
156                 if (!(old->flags & new->flags & SA_SHIRQ)) {
157                         spin_unlock_irqrestore(&desc->lock,flags);
158                         return -EBUSY;
159                 }
160
161                 /* add new interrupt at end of irq queue */
162                 do {
163                         p = &old->next;
164                         old = *p;
165                 } while (old);
166                 shared = 1;
167         }
168
169         *p = new;
170
171         if (!shared) {
172                 desc->depth = 0;
173                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
174                 unmask_irq(irq);
175         }
176         spin_unlock_irqrestore(&desc->lock,flags);
177
178         register_irq_proc(irq);
179         return 0;
180 }
181
182 void free_irq(unsigned int irq, void* dev_id)
183 {
184         irq_desc_t *desc;
185         struct irqaction **p;
186         unsigned long flags;
187
188         desc = irq_desc + irq;
189         spin_lock_irqsave(&desc->lock,flags);
190         p = &desc->action;
191         for (;;) {
192                 struct irqaction * action = *p;
193                 if (action) {
194                         struct irqaction **pp = p;
195                         p = &action->next;
196                         if (action->dev_id != dev_id)
197                                 continue;
198
199                         /* Found it - now remove it from the list of entries */
200                         *pp = action->next;
201                         if (!desc->action) {
202                                 desc->status |= IRQ_DISABLED;
203                                 mask_irq(irq);
204                         }
205                         spin_unlock_irqrestore(&desc->lock,flags);
206
207                         synchronize_irq(irq);
208                         irq_kfree(action);
209                         return;
210                 }
211                 printk("Trying to free free IRQ%d\n",irq);
212                 spin_unlock_irqrestore(&desc->lock,flags);
213                 break;
214         }
215         return;
216 }
217
218 EXPORT_SYMBOL(free_irq);
219
220 int request_irq(unsigned int irq,
221         irqreturn_t (*handler)(int, void *, struct pt_regs *),
222         unsigned long irqflags, const char * devname, void *dev_id)
223 {
224         struct irqaction *action;
225         int retval;
226
227         if (irq >= NR_IRQS)
228                 return -EINVAL;
229         if (!handler) {
230                 printk(KERN_ERR "request_irq called with NULL handler!\n");
231                 dump_stack();
232                 return 0;
233         }
234
235         action = (struct irqaction *)
236                 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
237         if (!action) {
238                 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
239                 return -ENOMEM;
240         }
241
242         action->handler = handler;
243         action->flags = irqflags;                       
244         action->mask = 0;
245         action->name = devname;
246         action->dev_id = dev_id;
247         action->next = NULL;
248
249         retval = setup_irq(irq, action);
250         if (retval) {
251                 kfree(action);
252                 return retval;
253         }
254
255         return 0;
256 }
257
258 EXPORT_SYMBOL(request_irq);
259
260 /*
261  * Generic enable/disable code: this just calls
262  * down into the PIC-specific version for the actual
263  * hardware disable after having gotten the irq
264  * controller lock.
265  */
266
267 /**
268  *      disable_irq_nosync - disable an irq without waiting
269  *      @irq: Interrupt to disable
270  *
271  *      Disable the selected interrupt line. Disables of an interrupt
272  *      stack. Unlike disable_irq(), this function does not ensure existing
273  *      instances of the IRQ handler have completed before returning.
274  *
275  *      This function may be called from IRQ context.
276  */
277
278 void disable_irq_nosync(unsigned int irq)
279 {
280         irq_desc_t *desc = irq_desc + irq;
281         unsigned long flags;
282
283         spin_lock_irqsave(&desc->lock, flags);
284         if (!desc->depth++) {
285                 if (!(desc->status & IRQ_PER_CPU))
286                         desc->status |= IRQ_DISABLED;
287                 mask_irq(irq);
288         }
289         spin_unlock_irqrestore(&desc->lock, flags);
290 }
291
292 /**
293  *      disable_irq - disable an irq and wait for completion
294  *      @irq: Interrupt to disable
295  *
296  *      Disable the selected interrupt line. Disables of an interrupt
297  *      stack. That is for two disables you need two enables. This
298  *      function waits for any pending IRQ handlers for this interrupt
299  *      to complete before returning. If you use this function while
300  *      holding a resource the IRQ handler may need you will deadlock.
301  *
302  *      This function may be called - with care - from IRQ context.
303  */
304
305 void disable_irq(unsigned int irq)
306 {
307         irq_desc_t *desc = irq_desc + irq;
308         disable_irq_nosync(irq);
309         if (desc->action)
310                 synchronize_irq(irq);
311 }
312
313 /**
314  *      enable_irq - enable interrupt handling on an irq
315  *      @irq: Interrupt to enable
316  *
317  *      Re-enables the processing of interrupts on this IRQ line
318  *      providing no disable_irq calls are now in effect.
319  *
320  *      This function may be called from IRQ context.
321  */
322
323 void enable_irq(unsigned int irq)
324 {
325         irq_desc_t *desc = irq_desc + irq;
326         unsigned long flags;
327
328         spin_lock_irqsave(&desc->lock, flags);
329         switch (desc->depth) {
330         case 1: {
331                 unsigned int status = desc->status & ~IRQ_DISABLED;
332                 desc->status = status;
333                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
334                         desc->status = status | IRQ_REPLAY;
335                         hw_resend_irq(desc->handler,irq);
336                 }
337                 unmask_irq(irq);
338                 /* fall-through */
339         }
340         default:
341                 desc->depth--;
342                 break;
343         case 0:
344                 printk("enable_irq(%u) unbalanced\n", irq);
345         }
346         spin_unlock_irqrestore(&desc->lock, flags);
347 }
348
349 int show_interrupts(struct seq_file *p, void *v)
350 {
351         int i = *(loff_t *) v, j;
352         struct irqaction * action;
353         unsigned long flags;
354
355         if (i == 0) {
356                 seq_puts(p, "           ");
357                 for (j=0; j<NR_CPUS; j++)
358                         if (cpu_online(j))
359                                 seq_printf(p, "CPU%d       ", j);
360                 seq_putc(p, '\n');
361         }
362
363         if (i < NR_IRQS) {
364                 spin_lock_irqsave(&irq_desc[i].lock, flags);
365                 action = irq_desc[i].action;
366                 if ( !action || !action->handler )
367                         goto skip;
368                 seq_printf(p, "%3d: ", i);
369 #ifdef CONFIG_SMP
370                 for (j = 0; j < NR_CPUS; j++)
371                         if (cpu_online(j))
372                                 seq_printf(p, "%10u ",
373                                            kstat_cpu(j).irqs[i]);
374 #else
375                 seq_printf(p, "%10u ", kstat_irqs(i));
376 #endif /* CONFIG_SMP */
377                 if (irq_desc[i].handler)
378                         seq_printf(p, " %s ", irq_desc[i].handler->typename);
379                 else
380                         seq_puts(p, "  None      ");
381                 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge  ");
382                 seq_printf(p, "    %s", action->name);
383                 for (action = action->next; action; action = action->next)
384                         seq_printf(p, ", %s", action->name);
385                 seq_putc(p, '\n');
386 skip:
387                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
388         } else if (i == NR_IRQS) {
389 #ifdef CONFIG_TAU_INT
390                 if (tau_initialized){
391                         seq_puts(p, "TAU: ");
392                         for (j = 0; j < NR_CPUS; j++)
393                                 if (cpu_online(j))
394                                         seq_printf(p, "%10u ", tau_interrupts(j));
395                         seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
396                 }
397 #endif
398 #ifdef CONFIG_SMP
399                 /* should this be per processor send/receive? */
400                 seq_printf(p, "IPI (recv/sent): %10u/%u\n",
401                                 atomic_read(&ipi_recv), atomic_read(&ipi_sent));
402 #endif
403                 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
404         }
405         return 0;
406 }
407
408 static inline void
409 handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
410 {
411         int status = 0;
412
413         if (!(action->flags & SA_INTERRUPT))
414                 local_irq_enable();
415
416         do {
417                 status |= action->flags;
418                 action->handler(irq, action->dev_id, regs);
419                 action = action->next;
420         } while (action);
421         if (status & SA_SAMPLE_RANDOM)
422                 add_interrupt_randomness(irq);
423         local_irq_disable();
424 }
425
426 /*
427  * Eventually, this should take an array of interrupts and an array size
428  * so it can dispatch multiple interrupts.
429  */
430 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
431 {
432         int status;
433         struct irqaction *action;
434         irq_desc_t *desc = irq_desc + irq;
435
436         kstat_this_cpu.irqs[irq]++;
437         spin_lock(&desc->lock);
438         ack_irq(irq);
439         /*
440            REPLAY is when Linux resends an IRQ that was dropped earlier
441            WAITING is used by probe to mark irqs that are being tested
442            */
443         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
444         if (!(status & IRQ_PER_CPU))
445                 status |= IRQ_PENDING; /* we _want_ to handle it */
446
447         /*
448          * If the IRQ is disabled for whatever reason, we cannot
449          * use the action we have.
450          */
451         action = NULL;
452         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
453                 action = desc->action;
454                 if (!action || !action->handler) {
455                         ppc_spurious_interrupts++;
456                         printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
457                         /* We can't call disable_irq here, it would deadlock */
458                         ++desc->depth;
459                         desc->status |= IRQ_DISABLED;
460                         mask_irq(irq);
461                         /* This is a real interrupt, we have to eoi it,
462                            so we jump to out */
463                         goto out;
464                 }
465                 status &= ~IRQ_PENDING; /* we commit to handling */
466                 if (!(status & IRQ_PER_CPU))
467                         status |= IRQ_INPROGRESS; /* we are handling it */
468         }
469         desc->status = status;
470
471         /*
472          * If there is no IRQ handler or it was disabled, exit early.
473            Since we set PENDING, if another processor is handling
474            a different instance of this same irq, the other processor
475            will take care of it.
476          */
477         if (unlikely(!action))
478                 goto out;
479
480
481         /*
482          * Edge triggered interrupts need to remember
483          * pending events.
484          * This applies to any hw interrupts that allow a second
485          * instance of the same irq to arrive while we are in do_IRQ
486          * or in the handler. But the code here only handles the _second_
487          * instance of the irq, not the third or fourth. So it is mostly
488          * useful for irq hardware that does not mask cleanly in an
489          * SMP environment.
490          */
491         for (;;) {
492                 spin_unlock(&desc->lock);
493                 handle_irq_event(irq, regs, action);
494                 spin_lock(&desc->lock);
495
496                 if (likely(!(desc->status & IRQ_PENDING)))
497                         break;
498                 desc->status &= ~IRQ_PENDING;
499         }
500 out:
501         desc->status &= ~IRQ_INPROGRESS;
502         /*
503          * The ->end() handler has to deal with interrupts which got
504          * disabled while the handler was running.
505          */
506         if (irq_desc[irq].handler) {
507                 if (irq_desc[irq].handler->end)
508                         irq_desc[irq].handler->end(irq);
509                 else if (irq_desc[irq].handler->enable)
510                         irq_desc[irq].handler->enable(irq);
511         }
512         spin_unlock(&desc->lock);
513 }
514
515 void do_IRQ(struct pt_regs *regs)
516 {
517         int irq, first = 1;
518         irq_enter();
519
520         /*
521          * Every platform is required to implement ppc_md.get_irq.
522          * This function will either return an irq number or -1 to
523          * indicate there are no more pending.  But the first time
524          * through the loop this means there wasn't and IRQ pending.
525          * The value -2 is for buggy hardware and means that this IRQ
526          * has already been handled. -- Tom
527          */
528         while ((irq = ppc_md.get_irq(regs)) >= 0) {
529                 ppc_irq_dispatch_handler(regs, irq);
530                 first = 0;
531         }
532         if (irq != -2 && first)
533                 /* That's not SMP safe ... but who cares ? */
534                 ppc_spurious_interrupts++;
535         irq_exit();
536 }
537
538 unsigned long probe_irq_on (void)
539 {
540         return 0;
541 }
542
543 EXPORT_SYMBOL(probe_irq_on);
544
545 int probe_irq_off (unsigned long irqs)
546 {
547         return 0;
548 }
549
550 EXPORT_SYMBOL(probe_irq_off);
551
552 unsigned int probe_irq_mask(unsigned long irqs)
553 {
554         return 0;
555 }
556
557 #ifdef CONFIG_SMP
558 void synchronize_irq(unsigned int irq)
559 {
560         while (irq_desc[irq].status & IRQ_INPROGRESS)
561                 barrier();
562 }
563 #endif /* CONFIG_SMP */
564
565 static struct proc_dir_entry *root_irq_dir;
566 static struct proc_dir_entry *irq_dir[NR_IRQS];
567 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
568
569 #ifdef CONFIG_IRQ_ALL_CPUS
570 #define DEFAULT_CPU_AFFINITY CPU_MASK_ALL
571 #else
572 #define DEFAULT_CPU_AFFINITY cpumask_of_cpu(0)
573 #endif
574
575 cpumask_t irq_affinity [NR_IRQS];
576
577 static int irq_affinity_read_proc (char *page, char **start, off_t off,
578                         int count, int *eof, void *data)
579 {
580         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
581         if (count - len < 2)
582                 return -EINVAL;
583         len += sprintf(page + len, "\n");
584         return len;
585 }
586
587 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
588                                         unsigned long count, void *data)
589 {
590         int irq = (int) data, full_count = count, err;
591         cpumask_t new_value, tmp;
592
593         if (!irq_desc[irq].handler->set_affinity)
594                 return -EIO;
595
596         err = cpumask_parse(buffer, count, new_value);
597
598         /*
599          * Do not allow disabling IRQs completely - it's a too easy
600          * way to make the system unusable accidentally :-) At least
601          * one online CPU still has to be targeted.
602          *
603          * We assume a 1-1 logical<->physical cpu mapping here.  If
604          * we assume that the cpu indices in /proc/irq/../smp_affinity
605          * are actually logical cpu #'s then we have no problem.
606          *  -- Cort <cort@fsmlabs.com>
607          */
608         cpus_and(tmp, new_value, cpu_online_map);
609         if (cpus_empty(tmp))
610                 return -EINVAL;
611
612         irq_affinity[irq] = new_value;
613         irq_desc[irq].handler->set_affinity(irq, new_value);
614
615         return full_count;
616 }
617
618 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
619                         int count, int *eof, void *data)
620 {
621         int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
622         if (count - len < 2)
623                 return -EINVAL;
624         len += sprintf(page + len, "\n");
625         return len;
626 }
627
628 static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,
629                                         unsigned long count, void *data)
630 {
631         cpumask_t *mask = (cpumask_t *)data, full_count = count, err;
632         cpumask_t new_value;
633
634         err = cpumask_parse(buffer, count, new_value);
635         if (err)
636                 return err;
637
638         *mask = new_value;
639         return full_count;
640 }
641
642 #define MAX_NAMELEN 10
643
644 static void register_irq_proc (unsigned int irq)
645 {
646         struct proc_dir_entry *entry;
647         char name [MAX_NAMELEN];
648
649         if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
650                 return;
651
652         memset(name, 0, MAX_NAMELEN);
653         sprintf(name, "%d", irq);
654
655         /* create /proc/irq/1234 */
656         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
657
658         /* create /proc/irq/1234/smp_affinity */
659         entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
660
661         entry->nlink = 1;
662         entry->data = (void *)irq;
663         entry->read_proc = irq_affinity_read_proc;
664         entry->write_proc = irq_affinity_write_proc;
665
666         smp_affinity_entry[irq] = entry;
667 }
668
669 unsigned long prof_cpu_mask = -1;
670
671 void init_irq_proc (void)
672 {
673         struct proc_dir_entry *entry;
674         int i;
675
676         /* create /proc/irq */
677         root_irq_dir = proc_mkdir("irq", 0);
678
679         /* create /proc/irq/prof_cpu_mask */
680         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
681
682         entry->nlink = 1;
683         entry->data = (void *)&prof_cpu_mask;
684         entry->read_proc = prof_cpu_mask_read_proc;
685         entry->write_proc = prof_cpu_mask_write_proc;
686
687         /*
688          * Create entries for all existing IRQs.
689          */
690         for (i = 0; i < NR_IRQS; i++) {
691                 if (irq_desc[i].handler == NULL)
692                         continue;
693                 register_irq_proc(i);
694         }
695 }
696
697 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
698 {
699         return IRQ_NONE;
700 }
701
702 void __init init_IRQ(void)
703 {
704         int i;
705
706         for (i = 0; i < NR_IRQS; ++i)
707                 irq_affinity[i] = DEFAULT_CPU_AFFINITY;
708
709         ppc_md.init_IRQ();
710 }