vserver 1.9.5.x5
[linux-2.6.git] / arch / sh / kernel / irq.c
1 /* $Id: irq.c,v 1.20 2004/01/13 05:52:11 kkojima Exp $
2  *
3  * linux/arch/sh/kernel/irq.c
4  *
5  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6  *
7  *
8  * SuperH version:  Copyright (C) 1999  Niibe Yutaka
9  */
10
11 /*
12  * IRQs are in fact implemented a bit like signal handlers for the kernel.
13  * Naturally it's not a 1:1 relation, but there are similarities.
14  */
15
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/ptrace.h>
19 #include <linux/errno.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/signal.h>
22 #include <linux/sched.h>
23 #include <linux/ioport.h>
24 #include <linux/interrupt.h>
25 #include <linux/timex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/seq_file.h>
33 #include <linux/kallsyms.h>
34 #include <linux/bitops.h>
35
36 #include <asm/system.h>
37 #include <asm/io.h>
38 #include <asm/pgalloc.h>
39 #include <asm/delay.h>
40 #include <asm/irq.h>
41 #include <linux/irq.h>
42
43 /*
44  * Controller mappings for all interrupt sources:
45  */
46 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
47         [0 ... NR_IRQS-1] = {
48                 .handler = &no_irq_type,
49                 .lock = SPIN_LOCK_UNLOCKED
50         }
51 };
52
53 /*
54  * Special irq handlers.
55  */
56
57 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
58 { return IRQ_NONE; }
59
60 /*
61  * Generic no controller code
62  */
63
64 static void enable_none(unsigned int irq) { }
65 static unsigned int startup_none(unsigned int irq) { return 0; }
66 static void disable_none(unsigned int irq) { }
67 static void ack_none(unsigned int irq)
68 {
69 /*
70  * 'what should we do if we get a hw irq event on an illegal vector'.
71  * each architecture has to answer this themselves, it doesn't deserve
72  * a generic callback i think.
73  */
74         printk("unexpected IRQ trap at vector %02x\n", irq);
75 }
76
77 /* startup is the same as "enable", shutdown is same as "disable" */
78 #define shutdown_none   disable_none
79 #define end_none        enable_none
80
81 struct hw_interrupt_type no_irq_type = {
82         "none",
83         startup_none,
84         shutdown_none,
85         enable_none,
86         disable_none,
87         ack_none,
88         end_none
89 };
90
91 /*
92  * Generic, controller-independent functions:
93  */
94
95 #if defined(CONFIG_PROC_FS)
96 int show_interrupts(struct seq_file *p, void *v)
97 {
98         int i = *(loff_t *) v, j;
99         struct irqaction * action;
100         unsigned long flags;
101
102         if (i == 0) {
103                 seq_puts(p, "           ");
104                 for (j=0; j<NR_CPUS; j++)
105                         if (cpu_online(j))
106                                 seq_printf(p, "CPU%d       ",j);
107                 seq_putc(p, '\n');
108         }
109
110         if (i < ACTUAL_NR_IRQS) {
111                 spin_lock_irqsave(&irq_desc[i].lock, flags);
112                 action = irq_desc[i].action;
113                 if (!action)
114                         goto unlock;
115                 seq_printf(p, "%3d: ",i);
116                 seq_printf(p, "%10u ", kstat_irqs(i));
117                 seq_printf(p, " %14s", irq_desc[i].handler->typename);
118                 seq_printf(p, "  %s", action->name);
119
120                 for (action=action->next; action; action = action->next)
121                         seq_printf(p, ", %s", action->name);
122                 seq_putc(p, '\n');
123 unlock:
124                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
125         }
126         return 0;
127 }
128 #endif
129
130 /*
131  * This should really return information about whether
132  * we should do bottom half handling etc. Right now we
133  * end up _always_ checking the bottom half, which is a
134  * waste of time and is not what some drivers would
135  * prefer.
136  */
137 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
138 {
139         int status = 1; /* Force the "do bottom halves" bit */
140         int ret, retval = 0;
141
142         if (!(action->flags & SA_INTERRUPT))
143                 local_irq_enable();
144
145         do {
146                 ret = action->handler(irq, action->dev_id, regs);
147                 if (ret == IRQ_HANDLED)
148                         status |= action->flags;
149                 retval |= ret;
150                 action = action->next;
151         } while (action);
152
153         if (status & SA_SAMPLE_RANDOM)
154                 add_interrupt_randomness(irq);
155
156         local_irq_disable();
157         return retval;
158 }
159
160 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
161 {
162         struct irqaction *action;
163
164         if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
165                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
166                                 irq, action_ret);
167         } else {
168                 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
169         }
170         dump_stack();
171         printk(KERN_ERR "handlers:\n");
172         action = desc->action;
173         do {
174                 printk(KERN_ERR "[<%p>]", action->handler);
175                 print_symbol(" (%s)",
176                         (unsigned long)action->handler);
177                 printk("\n");
178                 action = action->next;
179         } while (action);
180 }
181
182 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
183 {
184         static int count = 100;
185
186         if (count) {
187                 count--;
188                 __report_bad_irq(irq, desc, action_ret);
189         }
190 }
191
192 static int noirqdebug;
193
194 static int __init noirqdebug_setup(char *str)
195 {
196         noirqdebug = 1;
197         printk("IRQ lockup detection disabled\n");
198         return 1;
199 }
200
201 __setup("noirqdebug", noirqdebug_setup);
202
203 /*
204  * If 99,900 of the previous 100,000 interrupts have not been handled then
205  * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
206  * turn the IRQ off.
207  *
208  * (The other 100-of-100,000 interrupts may have been a correctly-functioning
209  *  device sharing an IRQ with the failing one)
210  *
211  * Called under desc->lock
212  */
213 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
214 {
215         if (action_ret != IRQ_HANDLED) {
216                 desc->irqs_unhandled++;
217                 if (action_ret != IRQ_NONE)
218                         report_bad_irq(irq, desc, action_ret);
219         }
220
221         desc->irq_count++;
222         if (desc->irq_count < 100000)
223                 return;
224
225         desc->irq_count = 0;
226         if (desc->irqs_unhandled > 99900) {
227                 /*
228                  * The interrupt is stuck
229                  */
230                 __report_bad_irq(irq, desc, action_ret);
231                 /*
232                  * Now kill the IRQ
233                  */
234                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
235                 desc->status |= IRQ_DISABLED;
236                 desc->handler->disable(irq);
237         }
238         desc->irqs_unhandled = 0;
239 }
240
241 /*
242  * Generic enable/disable code: this just calls
243  * down into the PIC-specific version for the actual
244  * hardware disable after having gotten the irq
245  * controller lock. 
246  */
247 inline void disable_irq_nosync(unsigned int irq)
248 {
249         irq_desc_t *desc = irq_desc + irq;
250         unsigned long flags;
251
252         spin_lock_irqsave(&desc->lock, flags);
253         if (!desc->depth++) {
254                 desc->status |= IRQ_DISABLED;
255                 desc->handler->disable(irq);
256         }
257         spin_unlock_irqrestore(&desc->lock, flags);
258 }
259
260 /*
261  * Synchronous version of the above, making sure the IRQ is
262  * no longer running on any other IRQ..
263  */
264 void disable_irq(unsigned int irq)
265 {
266         irq_desc_t *desc = irq_desc + irq;
267         disable_irq_nosync(irq);
268         if (desc->action)
269                 synchronize_irq(irq);
270 }
271
272 void enable_irq(unsigned int irq)
273 {
274         irq_desc_t *desc = irq_desc + irq;
275         unsigned long flags;
276
277         spin_lock_irqsave(&desc->lock, flags);
278         switch (desc->depth) {
279         case 1: {
280                 unsigned int status = desc->status & ~(IRQ_DISABLED | IRQ_INPROGRESS);
281                 desc->status = status;
282                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
283                         desc->status = status | IRQ_REPLAY;
284                         hw_resend_irq(desc->handler,irq);
285                 }
286                 desc->handler->enable(irq);
287                 /* fall-through */
288         }
289         default:
290                 desc->depth--;
291                 break;
292         case 0:
293                 printk("enable_irq() unbalanced from %p\n",
294                        __builtin_return_address(0));
295         }
296         spin_unlock_irqrestore(&desc->lock, flags);
297 }
298
299 /*
300  * do_IRQ handles all normal device IRQ's.
301  */
302 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
303                       unsigned long r6, unsigned long r7,
304                       struct pt_regs regs)
305 {       
306         /* 
307          * We ack quickly, we don't want the irq controller
308          * thinking we're snobs just because some other CPU has
309          * disabled global interrupts (we have already done the
310          * INT_ACK cycles, it's too late to try to pretend to the
311          * controller that we aren't taking the interrupt).
312          *
313          * 0 return value means that this irq is already being
314          * handled by some other CPU. (or is disabled)
315          */
316         int irq;
317         irq_desc_t *desc;
318         struct irqaction * action;
319         unsigned int status;
320
321         irq_enter();
322
323 #ifdef CONFIG_PREEMPT
324         /*
325          * At this point we're now about to actually call handlers,
326          * and interrupts might get reenabled during them... bump
327          * preempt_count to prevent any preemption while the handler
328          * called here is pending...
329          */
330         preempt_disable();
331 #endif
332
333         /* Get IRQ number */
334         asm volatile("stc       r2_bank, %0\n\t"
335                      "shlr2     %0\n\t"
336                      "shlr2     %0\n\t"
337                      "shlr      %0\n\t"
338                      "add       #-16, %0\n\t"
339                      :"=z" (irq));
340         irq = irq_demux(irq);
341
342         kstat_this_cpu.irqs[irq]++;
343         desc = irq_desc + irq;
344         spin_lock(&desc->lock);
345         desc->handler->ack(irq);
346         /*
347            REPLAY is when Linux resends an IRQ that was dropped earlier
348            WAITING is used by probe to mark irqs that are being tested
349            */
350         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
351         status |= IRQ_PENDING; /* we _want_ to handle it */
352
353         /*
354          * If the IRQ is disabled for whatever reason, we cannot
355          * use the action we have.
356          */
357         action = NULL;
358         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
359                 action = desc->action;
360                 status &= ~IRQ_PENDING; /* we commit to handling */
361                 status |= IRQ_INPROGRESS; /* we are handling it */
362         }
363         desc->status = status;
364
365         /*
366          * If there is no IRQ handler or it was disabled, exit early.
367            Since we set PENDING, if another processor is handling
368            a different instance of this same irq, the other processor
369            will take care of it.
370          */
371         if (unlikely(!action))
372                 goto out;
373
374         /*
375          * Edge triggered interrupts need to remember
376          * pending events.
377          * This applies to any hw interrupts that allow a second
378          * instance of the same irq to arrive while we are in do_IRQ
379          * or in the handler. But the code here only handles the _second_
380          * instance of the irq, not the third or fourth. So it is mostly
381          * useful for irq hardware that does not mask cleanly in an
382          * SMP environment.
383          */
384         for (;;) {
385                 irqreturn_t action_ret;
386
387                 spin_unlock(&desc->lock);
388                 action_ret = handle_IRQ_event(irq, &regs, action);
389                 spin_lock(&desc->lock);
390                 if (!noirqdebug)
391                         note_interrupt(irq, desc, action_ret);
392                 if (likely(!(desc->status & IRQ_PENDING)))
393                         break;
394                 desc->status &= ~IRQ_PENDING;
395         }
396         desc->status &= ~IRQ_INPROGRESS;
397
398 out:
399         /*
400          * The ->end() handler has to deal with interrupts which got
401          * disabled while the handler was running.
402          */
403         desc->handler->end(irq);
404         spin_unlock(&desc->lock);
405
406         irq_exit();
407
408 #ifdef CONFIG_PREEMPT
409         /*
410          * We're done with the handlers, interrupts should be
411          * currently disabled; decrement preempt_count now so
412          * as we return preemption may be allowed...
413          */
414         preempt_enable_no_resched();
415 #endif
416
417         return 1;
418 }
419
420 int request_irq(unsigned int irq, 
421                 irqreturn_t (*handler)(int, void *, struct pt_regs *),
422                 unsigned long irqflags, 
423                 const char * devname,
424                 void *dev_id)
425 {
426         int retval;
427         struct irqaction * action;
428
429         if (irq >= ACTUAL_NR_IRQS)
430                 return -EINVAL;
431         if (!handler)
432                 return -EINVAL;
433
434         action = (struct irqaction *)
435                         kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
436         if (!action)
437                 return -ENOMEM;
438
439         action->handler = handler;
440         action->flags = irqflags;
441         cpus_clear(action->mask);
442         action->name = devname;
443         action->next = NULL;
444         action->dev_id = dev_id;
445
446         retval = setup_irq(irq, action);
447         if (retval)
448                 kfree(action);
449         return retval;
450 }
451
452 EXPORT_SYMBOL(request_irq);
453
454 void free_irq(unsigned int irq, void *dev_id)
455 {
456         irq_desc_t *desc;
457         struct irqaction **p;
458         unsigned long flags;
459
460         if (irq >= ACTUAL_NR_IRQS)
461                 return;
462
463         desc = irq_desc + irq;
464         spin_lock_irqsave(&desc->lock,flags);
465         p = &desc->action;
466         for (;;) {
467                 struct irqaction * action = *p;
468                 if (action) {
469                         struct irqaction **pp = p;
470                         p = &action->next;
471                         if (action->dev_id != dev_id)
472                                 continue;
473
474                         /* Found it - now remove it from the list of entries */
475                         *pp = action->next;
476                         if (!desc->action) {
477                                 desc->status |= IRQ_DISABLED;
478                                 desc->handler->shutdown(irq);
479                         }
480                         spin_unlock_irqrestore(&desc->lock,flags);
481                         synchronize_irq(irq);
482                         kfree(action);
483                         return;
484                 }
485                 printk("Trying to free free IRQ%d\n",irq);
486                 spin_unlock_irqrestore(&desc->lock,flags);
487                 return;
488         }
489 }
490
491 EXPORT_SYMBOL(free_irq);
492
493 static DECLARE_MUTEX(probe_sem);
494
495 /*
496  * IRQ autodetection code..
497  *
498  * This depends on the fact that any interrupt that
499  * comes in on to an unassigned handler will get stuck
500  * with "IRQ_WAITING" cleared and the interrupt
501  * disabled.
502  */
503 unsigned long probe_irq_on(void)
504 {
505         unsigned int i;
506         irq_desc_t *desc;
507         unsigned long val;
508         unsigned long delay;
509
510         down(&probe_sem);
511         /* 
512          * something may have generated an irq long ago and we want to
513          * flush such a longstanding irq before considering it as spurious. 
514          */
515         for (i = NR_IRQS-1; i > 0; i--) {
516                 desc = irq_desc + i;
517
518                 spin_lock_irq(&desc->lock);
519                 if (!desc->action)
520                         desc->handler->startup(i);
521                 spin_unlock_irq(&desc->lock);
522         }
523
524         /* Wait for longstanding interrupts to trigger. */
525         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
526                 /* about 20ms delay */ barrier();
527
528         /*
529          * enable any unassigned irqs
530          * (we must startup again here because if a longstanding irq
531          * happened in the previous stage, it may have masked itself)
532          */
533         for (i = NR_IRQS-1; i > 0; i--) {
534                 desc = irq_desc + i;
535
536                 spin_lock_irq(&desc->lock);
537                 if (!desc->action) {
538                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
539                         if (desc->handler->startup(i))
540                                 desc->status |= IRQ_PENDING;
541                 }
542                 spin_unlock_irq(&desc->lock);
543         }
544
545         /*
546          * Wait for spurious interrupts to trigger
547          */
548         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
549                 /* about 100ms delay */ barrier();
550
551         /*
552          * Now filter out any obviously spurious interrupts
553          */
554         val = 0;
555         for (i=0; i<NR_IRQS; i++) {
556                 unsigned int status;
557
558                 desc = irq_desc + i;
559
560                 spin_lock_irq(&desc->lock);
561                 status = desc->status;
562
563                 if (status & IRQ_AUTODETECT) {
564                         /* It triggered already - consider it spurious. */
565                         if (!(status & IRQ_WAITING)) {
566                                 desc->status = status & ~IRQ_AUTODETECT;
567                                 desc->handler->shutdown(i);
568                         } else
569                                 if (i < 32)
570                                         val |= 1 << i;
571                 }
572                 spin_unlock_irq(&desc->lock);
573         }
574
575         return val;
576 }
577
578 EXPORT_SYMBOL(probe_irq_on);
579
580 /* Return a mask of triggered interrupts (this
581  * can handle only legacy ISA interrupts).
582  */
583
584 /*
585  *      probe_irq_mask - scan a bitmap of interrupt lines
586  *      @val:   mask of interrupts to consider
587  *
588  *      Scan the ISA bus interrupt lines and return a bitmap of
589  *      active interrupts. The interrupt probe logic state is then
590  *      returned to its previous value.
591  *
592  *      Note: we need to scan all the irq's even though we will
593  *      only return ISA irq numbers - just so that we reset them
594  *      all to a known state.
595  */
596 unsigned int probe_irq_mask(unsigned long val)
597 {
598         int i;
599         unsigned int mask;
600
601         mask = 0;
602         for (i = 0; i < NR_IRQS; i++) {
603                 irq_desc_t *desc = irq_desc + i;
604                 unsigned int status;
605
606                 spin_lock_irq(&desc->lock);
607                 status = desc->status;
608
609                 if (status & IRQ_AUTODETECT) {
610                         if (i < 16 && !(status & IRQ_WAITING))
611                                 mask |= 1 << i;
612
613                         desc->status = status & ~IRQ_AUTODETECT;
614                         desc->handler->shutdown(i);
615                 }
616                 spin_unlock_irq(&desc->lock);
617         }
618         up(&probe_sem);
619
620         return mask & val;
621 }
622
623 int probe_irq_off(unsigned long val)
624 {
625         int i, irq_found, nr_irqs;
626
627         nr_irqs = 0;
628         irq_found = 0;
629         for (i=0; i<NR_IRQS; i++) {
630                 irq_desc_t *desc = irq_desc + i;
631                 unsigned int status;
632
633                 spin_lock_irq(&desc->lock);
634                 status = desc->status;
635
636                 if (status & IRQ_AUTODETECT) {
637                         if (!(status & IRQ_WAITING)) {
638                                 if (!nr_irqs)
639                                         irq_found = i;
640                                 nr_irqs++;
641                         }
642                         desc->status = status & ~IRQ_AUTODETECT;
643                         desc->handler->shutdown(i);
644                 }
645                 spin_unlock_irq(&desc->lock);
646         }
647         up(&probe_sem);
648
649         if (nr_irqs > 1)
650                 irq_found = -irq_found;
651         return irq_found;
652 }
653
654 EXPORT_SYMBOL(probe_irq_off);
655
656 int setup_irq(unsigned int irq, struct irqaction * new)
657 {
658         int shared = 0;
659         struct irqaction *old, **p;
660         unsigned long flags;
661         irq_desc_t *desc = irq_desc + irq;
662
663         if (desc->handler == &no_irq_type)
664                 return -ENOSYS;
665         /*
666          * Some drivers like serial.c use request_irq() heavily,
667          * so we have to be careful not to interfere with a
668          * running system.
669          */
670         if (new->flags & SA_SAMPLE_RANDOM) {
671                 /*
672                  * This function might sleep, we want to call it first,
673                  * outside of the atomic block.
674                  * Yes, this might clear the entropy pool if the wrong
675                  * driver is attempted to be loaded, without actually
676                  * installing a new handler, but is this really a problem,
677                  * only the sysadmin is able to do this.
678                  */
679                 rand_initialize_irq(irq);
680         }
681
682         /*
683          * The following block of code has to be executed atomically
684          */
685         spin_lock_irqsave(&desc->lock,flags);
686         p = &desc->action;
687         if ((old = *p) != NULL) {
688                 /* Can't share interrupts unless both agree to */
689                 if (!(old->flags & new->flags & SA_SHIRQ)) {
690                         spin_unlock_irqrestore(&desc->lock,flags);
691                         return -EBUSY;
692                 }
693
694                 /* add new interrupt at end of irq queue */
695                 do {
696                         p = &old->next;
697                         old = *p;
698                 } while (old);
699                 shared = 1;
700         }
701
702         *p = new;
703
704         if (!shared) {
705                 desc->depth = 0;
706                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
707                 desc->handler->startup(irq);
708         }
709         spin_unlock_irqrestore(&desc->lock,flags);
710         return 0;
711 }
712
713 #if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
714
715 void init_irq_proc(void)
716 {
717 }
718 #endif
719
720 /* Taken from the 2.5 alpha port */
721 #ifdef CONFIG_SMP
722 void synchronize_irq(unsigned int irq)
723 {
724         /* is there anything to synchronize with? */
725         if (!irq_desc[irq].action)
726                 return;
727
728         while (irq_desc[irq].status & IRQ_INPROGRESS)
729                 barrier();
730 }
731 #endif