ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / sh / kernel / irq.c
1 /* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
2  *
3  * linux/arch/sh/kernel/irq.c
4  *
5  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6  *
7  *
8  * SuperH version:  Copyright (C) 1999  Niibe Yutaka
9  */
10
11 /*
12  * IRQs are in fact implemented a bit like signal handlers for the kernel.
13  * Naturally it's not a 1:1 relation, but there are similarities.
14  */
15
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/ptrace.h>
19 #include <linux/errno.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/signal.h>
22 #include <linux/sched.h>
23 #include <linux/ioport.h>
24 #include <linux/interrupt.h>
25 #include <linux/timex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/seq_file.h>
33 #include <linux/kallsyms.h>
34
35 #include <asm/system.h>
36 #include <asm/io.h>
37 #include <asm/bitops.h>
38 #include <asm/pgalloc.h>
39 #include <asm/delay.h>
40 #include <asm/irq.h>
41 #include <linux/irq.h>
42
43 /*
44  * Controller mappings for all interrupt sources:
45  */
46 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
47         [0 ... NR_IRQS-1] = {
48                 .handler = &no_irq_type,
49                 .lock = SPIN_LOCK_UNLOCKED
50         }
51 };
52
53 /*
54  * Special irq handlers.
55  */
56
57 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
58 { return IRQ_NONE; }
59
60 /*
61  * Generic no controller code
62  */
63
64 static void enable_none(unsigned int irq) { }
65 static unsigned int startup_none(unsigned int irq) { return 0; }
66 static void disable_none(unsigned int irq) { }
67 static void ack_none(unsigned int irq)
68 {
69 /*
70  * 'what should we do if we get a hw irq event on an illegal vector'.
71  * each architecture has to answer this themselves, it doesn't deserve
72  * a generic callback i think.
73  */
74         printk("unexpected IRQ trap at vector %02x\n", irq);
75 }
76
77 /* startup is the same as "enable", shutdown is same as "disable" */
78 #define shutdown_none   disable_none
79 #define end_none        enable_none
80
81 struct hw_interrupt_type no_irq_type = {
82         "none",
83         startup_none,
84         shutdown_none,
85         enable_none,
86         disable_none,
87         ack_none,
88         end_none
89 };
90
91 /*
92  * Generic, controller-independent functions:
93  */
94
95 #if defined(CONFIG_PROC_FS)
96 int show_interrupts(struct seq_file *p, void *v)
97 {
98         int i = *(loff_t *) v, j;
99         struct irqaction * action;
100         unsigned long flags;
101
102         if (i == 0) {
103                 seq_puts(p, "           ");
104                 for (j=0; j<NR_CPUS; j++)
105                         if (cpu_online(j))
106                                 seq_printf(p, "CPU%d       ",j);
107                 seq_putc(p, '\n');
108         }
109
110         if (i < ACTUAL_NR_IRQS) {
111                 spin_lock_irqsave(&irq_desc[i].lock, flags);
112                 action = irq_desc[i].action;
113                 if (!action)
114                         goto unlock;
115                 seq_printf(p, "%3d: ",i);
116                 seq_printf(p, "%10u ", kstat_irqs(i));
117                 seq_printf(p, " %14s", irq_desc[i].handler->typename);
118                 seq_printf(p, "  %s", action->name);
119
120                 for (action=action->next; action; action = action->next)
121                         seq_printf(p, ", %s", action->name);
122                 seq_putc(p, '\n');
123 unlock:
124                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
125         }
126         return 0;
127 }
128 #endif
129
130 /*
131  * This should really return information about whether
132  * we should do bottom half handling etc. Right now we
133  * end up _always_ checking the bottom half, which is a
134  * waste of time and is not what some drivers would
135  * prefer.
136  */
137 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
138 {
139         int status = 1; /* Force the "do bottom halves" bit */
140         int retval = 0;
141
142         if (!(action->flags & SA_INTERRUPT))
143                 local_irq_enable();
144
145         do {
146                 status |= action->flags;
147                 retval |= action->handler(irq, action->dev_id, regs);
148                 action = action->next;
149         } while (action);
150
151         if (status & SA_SAMPLE_RANDOM)
152                 add_interrupt_randomness(irq);
153
154         local_irq_disable();
155         return retval;
156 }
157
158 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
159 {
160         struct irqaction *action;
161
162         if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
163                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
164                                 irq, action_ret);
165         } else {
166                 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
167         }
168         dump_stack();
169         printk(KERN_ERR "handlers:\n");
170         action = desc->action;
171         do {
172                 printk(KERN_ERR "[<%p>]", action->handler);
173                 print_symbol(" (%s)",
174                         (unsigned long)action->handler);
175                 printk("\n");
176                 action = action->next;
177         } while (action);
178 }
179
180 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
181 {
182         static int count = 100;
183
184         if (count) {
185                 count--;
186                 __report_bad_irq(irq, desc, action_ret);
187         }
188 }
189
190 static int noirqdebug;
191
192 static int __init noirqdebug_setup(char *str)
193 {
194         noirqdebug = 1;
195         printk("IRQ lockup detection disabled\n");
196         return 1;
197 }
198
199 __setup("noirqdebug", noirqdebug_setup);
200
201 /*
202  * If 99,900 of the previous 100,000 interrupts have not been handled then
203  * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
204  * turn the IRQ off.
205  *
206  * (The other 100-of-100,000 interrupts may have been a correctly-functioning
207  *  device sharing an IRQ with the failing one)
208  *
209  * Called under desc->lock
210  */
211 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
212 {
213         if (action_ret != IRQ_HANDLED) {
214                 desc->irqs_unhandled++;
215                 if (action_ret != IRQ_NONE)
216                         report_bad_irq(irq, desc, action_ret);
217         }
218
219         desc->irq_count++;
220         if (desc->irq_count < 100000)
221                 return;
222
223         desc->irq_count = 0;
224         if (desc->irqs_unhandled > 99900) {
225                 /*
226                  * The interrupt is stuck
227                  */
228                 __report_bad_irq(irq, desc, action_ret);
229                 /*
230                  * Now kill the IRQ
231                  */
232                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
233                 desc->status |= IRQ_DISABLED;
234                 desc->handler->disable(irq);
235         }
236         desc->irqs_unhandled = 0;
237 }
238
239 /*
240  * Generic enable/disable code: this just calls
241  * down into the PIC-specific version for the actual
242  * hardware disable after having gotten the irq
243  * controller lock. 
244  */
245 inline void disable_irq_nosync(unsigned int irq)
246 {
247         irq_desc_t *desc = irq_desc + irq;
248         unsigned long flags;
249
250         spin_lock_irqsave(&desc->lock, flags);
251         if (!desc->depth++) {
252                 desc->status |= IRQ_DISABLED;
253                 desc->handler->disable(irq);
254         }
255         spin_unlock_irqrestore(&desc->lock, flags);
256 }
257
258 /*
259  * Synchronous version of the above, making sure the IRQ is
260  * no longer running on any other IRQ..
261  */
262 void disable_irq(unsigned int irq)
263 {
264         irq_desc_t *desc = irq_desc + irq;
265         disable_irq_nosync(irq);
266         if (desc->action)
267                 synchronize_irq(irq);
268 }
269
270 void enable_irq(unsigned int irq)
271 {
272         irq_desc_t *desc = irq_desc + irq;
273         unsigned long flags;
274
275         spin_lock_irqsave(&desc->lock, flags);
276         switch (desc->depth) {
277         case 1: {
278                 unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
279                 desc->status = status;
280                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
281                         desc->status = status | IRQ_REPLAY;
282                         hw_resend_irq(desc->handler,irq);
283                 }
284                 desc->handler->enable(irq);
285                 /* fall-through */
286         }
287         default:
288                 desc->depth--;
289                 break;
290         case 0:
291                 printk("enable_irq() unbalanced from %p\n",
292                        __builtin_return_address(0));
293         }
294         spin_unlock_irqrestore(&desc->lock, flags);
295 }
296
297 /*
298  * do_IRQ handles all normal device IRQ's.
299  */
300 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
301                       unsigned long r6, unsigned long r7,
302                       struct pt_regs regs)
303 {       
304         /* 
305          * We ack quickly, we don't want the irq controller
306          * thinking we're snobs just because some other CPU has
307          * disabled global interrupts (we have already done the
308          * INT_ACK cycles, it's too late to try to pretend to the
309          * controller that we aren't taking the interrupt).
310          *
311          * 0 return value means that this irq is already being
312          * handled by some other CPU. (or is disabled)
313          */
314         int irq;
315         irq_desc_t *desc;
316         struct irqaction * action;
317         unsigned int status;
318
319         irq_enter();
320
321 #ifdef CONFIG_PREEMPT
322         /*
323          * At this point we're now about to actually call handlers,
324          * and interrupts might get reenabled during them... bump
325          * preempt_count to prevent any preemption while the handler
326          * called here is pending...
327          */
328         preempt_disable();
329 #endif
330
331         /* Get IRQ number */
332         asm volatile("stc       r2_bank, %0\n\t"
333                      "shlr2     %0\n\t"
334                      "shlr2     %0\n\t"
335                      "shlr      %0\n\t"
336                      "add       #-16, %0\n\t"
337                      :"=z" (irq));
338         irq = irq_demux(irq);
339
340         kstat_this_cpu.irqs[irq]++;
341         desc = irq_desc + irq;
342         spin_lock(&desc->lock);
343         desc->handler->ack(irq);
344         /*
345            REPLAY is when Linux resends an IRQ that was dropped earlier
346            WAITING is used by probe to mark irqs that are being tested
347            */
348         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
349         status |= IRQ_PENDING; /* we _want_ to handle it */
350
351         /*
352          * If the IRQ is disabled for whatever reason, we cannot
353          * use the action we have.
354          */
355         action = NULL;
356         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
357                 action = desc->action;
358                 status &= ~IRQ_PENDING; /* we commit to handling */
359                 status |= IRQ_INPROGRESS; /* we are handling it */
360         }
361         desc->status = status;
362
363         /*
364          * If there is no IRQ handler or it was disabled, exit early.
365            Since we set PENDING, if another processor is handling
366            a different instance of this same irq, the other processor
367            will take care of it.
368          */
369         if (unlikely(!action))
370                 goto out;
371
372         /*
373          * Edge triggered interrupts need to remember
374          * pending events.
375          * This applies to any hw interrupts that allow a second
376          * instance of the same irq to arrive while we are in do_IRQ
377          * or in the handler. But the code here only handles the _second_
378          * instance of the irq, not the third or fourth. So it is mostly
379          * useful for irq hardware that does not mask cleanly in an
380          * SMP environment.
381          */
382         for (;;) {
383                 irqreturn_t action_ret;
384
385                 spin_unlock(&desc->lock);
386                 action_ret = handle_IRQ_event(irq, &regs, action);
387                 spin_lock(&desc->lock);
388                 if (!noirqdebug)
389                         note_interrupt(irq, desc, action_ret);
390                 if (likely(!(desc->status & IRQ_PENDING)))
391                         break;
392                 desc->status &= ~IRQ_PENDING;
393         }
394         desc->status &= ~IRQ_INPROGRESS;
395
396 out:
397         /*
398          * The ->end() handler has to deal with interrupts which got
399          * disabled while the handler was running.
400          */
401         desc->handler->end(irq);
402         spin_unlock(&desc->lock);
403
404         irq_exit();
405
406 #ifdef CONFIG_PREEMPT
407         /*
408          * We're done with the handlers, interrupts should be
409          * currently disabled; decrement preempt_count now so
410          * as we return preemption may be allowed...
411          */
412         preempt_enable_no_resched();
413 #endif
414
415         return 1;
416 }
417
418 int request_irq(unsigned int irq, 
419                 irqreturn_t (*handler)(int, void *, struct pt_regs *),
420                 unsigned long irqflags, 
421                 const char * devname,
422                 void *dev_id)
423 {
424         int retval;
425         struct irqaction * action;
426
427         if (irq >= ACTUAL_NR_IRQS)
428                 return -EINVAL;
429         if (!handler)
430                 return -EINVAL;
431
432         action = (struct irqaction *)
433                         kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
434         if (!action)
435                 return -ENOMEM;
436
437         action->handler = handler;
438         action->flags = irqflags;
439         action->mask = 0;
440         action->name = devname;
441         action->next = NULL;
442         action->dev_id = dev_id;
443
444         retval = setup_irq(irq, action);
445         if (retval)
446                 kfree(action);
447         return retval;
448 }
449
450 EXPORT_SYMBOL(request_irq);
451
452 void free_irq(unsigned int irq, void *dev_id)
453 {
454         irq_desc_t *desc;
455         struct irqaction **p;
456         unsigned long flags;
457
458         if (irq >= ACTUAL_NR_IRQS)
459                 return;
460
461         desc = irq_desc + irq;
462         spin_lock_irqsave(&desc->lock,flags);
463         p = &desc->action;
464         for (;;) {
465                 struct irqaction * action = *p;
466                 if (action) {
467                         struct irqaction **pp = p;
468                         p = &action->next;
469                         if (action->dev_id != dev_id)
470                                 continue;
471
472                         /* Found it - now remove it from the list of entries */
473                         *pp = action->next;
474                         if (!desc->action) {
475                                 desc->status |= IRQ_DISABLED;
476                                 desc->handler->shutdown(irq);
477                         }
478                         spin_unlock_irqrestore(&desc->lock,flags);
479                         synchronize_irq(irq);
480                         kfree(action);
481                         return;
482                 }
483                 printk("Trying to free free IRQ%d\n",irq);
484                 spin_unlock_irqrestore(&desc->lock,flags);
485                 return;
486         }
487 }
488
489 EXPORT_SYMBOL(free_irq);
490
491 static DECLARE_MUTEX(probe_sem);
492
493 /*
494  * IRQ autodetection code..
495  *
496  * This depends on the fact that any interrupt that
497  * comes in on to an unassigned handler will get stuck
498  * with "IRQ_WAITING" cleared and the interrupt
499  * disabled.
500  */
501 unsigned long probe_irq_on(void)
502 {
503         unsigned int i;
504         irq_desc_t *desc;
505         unsigned long val;
506         unsigned long delay;
507
508         down(&probe_sem);
509         /* 
510          * something may have generated an irq long ago and we want to
511          * flush such a longstanding irq before considering it as spurious. 
512          */
513         for (i = NR_IRQS-1; i > 0; i--) {
514                 desc = irq_desc + i;
515
516                 spin_lock_irq(&desc->lock);
517                 if (!desc->action)
518                         desc->handler->startup(i);
519                 spin_unlock_irq(&desc->lock);
520         }
521
522         /* Wait for longstanding interrupts to trigger. */
523         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
524                 /* about 20ms delay */ barrier();
525
526         /*
527          * enable any unassigned irqs
528          * (we must startup again here because if a longstanding irq
529          * happened in the previous stage, it may have masked itself)
530          */
531         for (i = NR_IRQS-1; i > 0; i--) {
532                 desc = irq_desc + i;
533
534                 spin_lock_irq(&desc->lock);
535                 if (!desc->action) {
536                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
537                         if (desc->handler->startup(i))
538                                 desc->status |= IRQ_PENDING;
539                 }
540                 spin_unlock_irq(&desc->lock);
541         }
542
543         /*
544          * Wait for spurious interrupts to trigger
545          */
546         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
547                 /* about 100ms delay */ barrier();
548
549         /*
550          * Now filter out any obviously spurious interrupts
551          */
552         val = 0;
553         for (i=0; i<NR_IRQS; i++) {
554                 unsigned int status;
555
556                 desc = irq_desc + i;
557
558                 spin_lock_irq(&desc->lock);
559                 status = desc->status;
560
561                 if (status & IRQ_AUTODETECT) {
562                         /* It triggered already - consider it spurious. */
563                         if (!(status & IRQ_WAITING)) {
564                                 desc->status = status & ~IRQ_AUTODETECT;
565                                 desc->handler->shutdown(i);
566                         } else
567                                 if (i < 32)
568                                         val |= 1 << i;
569                 }
570                 spin_unlock_irq(&desc->lock);
571         }
572
573         return val;
574 }
575
576 EXPORT_SYMBOL(probe_irq_on);
577
578 int probe_irq_off(unsigned long val)
579 {
580         int i, irq_found, nr_irqs;
581
582         nr_irqs = 0;
583         irq_found = 0;
584         for (i=0; i<NR_IRQS; i++) {
585                 irq_desc_t *desc = irq_desc + i;
586                 unsigned int status;
587
588                 spin_lock_irq(&desc->lock);
589                 status = desc->status;
590
591                 if (status & IRQ_AUTODETECT) {
592                         if (!(status & IRQ_WAITING)) {
593                                 if (!nr_irqs)
594                                         irq_found = i;
595                                 nr_irqs++;
596                         }
597                         desc->status = status & ~IRQ_AUTODETECT;
598                         desc->handler->shutdown(i);
599                 }
600                 spin_unlock_irq(&desc->lock);
601         }
602         up(&probe_sem);
603
604         if (nr_irqs > 1)
605                 irq_found = -irq_found;
606         return irq_found;
607 }
608
609 EXPORT_SYMBOL(probe_irq_off);
610
611 int setup_irq(unsigned int irq, struct irqaction * new)
612 {
613         int shared = 0;
614         struct irqaction *old, **p;
615         unsigned long flags;
616         irq_desc_t *desc = irq_desc + irq;
617
618         if (desc->handler == &no_irq_type)
619                 return -ENOSYS;
620         /*
621          * Some drivers like serial.c use request_irq() heavily,
622          * so we have to be careful not to interfere with a
623          * running system.
624          */
625         if (new->flags & SA_SAMPLE_RANDOM) {
626                 /*
627                  * This function might sleep, we want to call it first,
628                  * outside of the atomic block.
629                  * Yes, this might clear the entropy pool if the wrong
630                  * driver is attempted to be loaded, without actually
631                  * installing a new handler, but is this really a problem,
632                  * only the sysadmin is able to do this.
633                  */
634                 rand_initialize_irq(irq);
635         }
636
637         /*
638          * The following block of code has to be executed atomically
639          */
640         spin_lock_irqsave(&desc->lock,flags);
641         p = &desc->action;
642         if ((old = *p) != NULL) {
643                 /* Can't share interrupts unless both agree to */
644                 if (!(old->flags & new->flags & SA_SHIRQ)) {
645                         spin_unlock_irqrestore(&desc->lock,flags);
646                         return -EBUSY;
647                 }
648
649                 /* add new interrupt at end of irq queue */
650                 do {
651                         p = &old->next;
652                         old = *p;
653                 } while (old);
654                 shared = 1;
655         }
656
657         *p = new;
658
659         if (!shared) {
660                 desc->depth = 0;
661                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
662                 desc->handler->startup(irq);
663         }
664         spin_unlock_irqrestore(&desc->lock,flags);
665         return 0;
666 }
667
668 #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
669
670 void init_irq_proc(void)
671 {
672 }
673 #endif
674
675 /* Taken from the 2.5 alpha port */
676 #ifdef CONFIG_SMP
677 void synchronize_irq(unsigned int irq)
678 {
679         /* is there anything to synchronize with? */
680         if (!irq_desc[irq].action)
681                 return;
682
683         while (irq_desc[irq].status & IRQ_INPROGRESS)
684                 barrier();
685 }
686 #endif