ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / arm / kernel / irq.c
1 /*
2  *  linux/arch/arm/kernel/irq.c
3  *
4  *  Copyright (C) 1992 Linus Torvalds
5  *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *  This file contains the code used by various IRQ handling routines:
12  *  asking for different IRQ's should be done through these routines
13  *  instead of just grabbing them. Thus setups with different IRQ numbers
14  *  shouldn't result in any weird surprises, and installing new handlers
15  *  should be easier.
16  *
17  *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
18  *  Naturally it's not a 1:1 relation, but there are similarities.
19  */
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
35
36 #include <asm/irq.h>
37 #include <asm/system.h>
38 #include <asm/mach/irq.h>
39
40 /*
41  * Maximum IRQ count.  Currently, this is arbitary.  However, it should
42  * not be set too low to prevent false triggering.  Conversely, if it
43  * is set too high, then you could miss a stuck IRQ.
44  *
45  * Maybe we ought to set a timer and re-enable the IRQ at a later time?
46  */
47 #define MAX_IRQ_CNT     100000
48
49 static volatile unsigned long irq_err_count;
50 static spinlock_t irq_controller_lock;
51 static LIST_HEAD(irq_pending);
52
53 struct irqdesc irq_desc[NR_IRQS];
54 void (*init_arch_irq)(void) __initdata = NULL;
55
56 /*
57  * Dummy mask/unmask handler
58  */
59 void dummy_mask_unmask_irq(unsigned int irq)
60 {
61 }
62
63 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
64 {
65         return IRQ_NONE;
66 }
67
68 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
69 {
70         irq_err_count += 1;
71         printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
72 }
73
74 static struct irqchip bad_chip = {
75         .ack    = dummy_mask_unmask_irq,
76         .mask   = dummy_mask_unmask_irq,
77         .unmask = dummy_mask_unmask_irq,
78 };
79
80 static struct irqdesc bad_irq_desc = {
81         .chip           = &bad_chip,
82         .handle         = do_bad_IRQ,
83         .pend           = LIST_HEAD_INIT(bad_irq_desc.pend),
84         .disable_depth  = 1,
85 };
86
87 /**
88  *      disable_irq - disable an irq and wait for completion
89  *      @irq: Interrupt to disable
90  *
91  *      Disable the selected interrupt line.  Enables and disables
92  *      are nested.  We do this lazily.
93  *
94  *      This function may be called from IRQ context.
95  */
96 void disable_irq(unsigned int irq)
97 {
98         struct irqdesc *desc = irq_desc + irq;
99         unsigned long flags;
100
101         spin_lock_irqsave(&irq_controller_lock, flags);
102         desc->disable_depth++;
103         list_del_init(&desc->pend);
104         spin_unlock_irqrestore(&irq_controller_lock, flags);
105 }
106
107 /**
108  *      enable_irq - enable interrupt handling on an irq
109  *      @irq: Interrupt to enable
110  *
111  *      Re-enables the processing of interrupts on this IRQ line.
112  *      Note that this may call the interrupt handler, so you may
113  *      get unexpected results if you hold IRQs disabled.
114  *
115  *      This function may be called from IRQ context.
116  */
117 void enable_irq(unsigned int irq)
118 {
119         struct irqdesc *desc = irq_desc + irq;
120         unsigned long flags;
121
122         spin_lock_irqsave(&irq_controller_lock, flags);
123         if (unlikely(!desc->disable_depth)) {
124                 printk("enable_irq(%u) unbalanced from %p\n", irq,
125                         __builtin_return_address(0));
126         } else if (!--desc->disable_depth) {
127                 desc->probing = 0;
128                 desc->chip->unmask(irq);
129
130                 /*
131                  * If the interrupt is waiting to be processed,
132                  * try to re-run it.  We can't directly run it
133                  * from here since the caller might be in an
134                  * interrupt-protected region.
135                  */
136                 if (desc->pending && list_empty(&desc->pend)) {
137                         desc->pending = 0;
138                         if (!desc->chip->retrigger ||
139                             desc->chip->retrigger(irq))
140                                 list_add(&desc->pend, &irq_pending);
141                 }
142         }
143         spin_unlock_irqrestore(&irq_controller_lock, flags);
144 }
145
146 /*
147  * Enable wake on selected irq
148  */
149 void enable_irq_wake(unsigned int irq)
150 {
151         struct irqdesc *desc = irq_desc + irq;
152         unsigned long flags;
153
154         spin_lock_irqsave(&irq_controller_lock, flags);
155         if (desc->chip->wake)
156                 desc->chip->wake(irq, 1);
157         spin_unlock_irqrestore(&irq_controller_lock, flags);
158 }
159
160 void disable_irq_wake(unsigned int irq)
161 {
162         struct irqdesc *desc = irq_desc + irq;
163         unsigned long flags;
164
165         spin_lock_irqsave(&irq_controller_lock, flags);
166         if (desc->chip->wake)
167                 desc->chip->wake(irq, 0);
168         spin_unlock_irqrestore(&irq_controller_lock, flags);
169 }
170
171 int show_interrupts(struct seq_file *p, void *v)
172 {
173         int i = *(loff_t *) v;
174         struct irqaction * action;
175         unsigned long flags;
176
177         if (i < NR_IRQS) {
178                 spin_lock_irqsave(&irq_controller_lock, flags);
179                 action = irq_desc[i].action;
180                 if (!action)
181                         goto unlock;
182
183                 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
184                 seq_printf(p, "  %s", action->name);
185                 for (action = action->next; action; action = action->next)
186                         seq_printf(p, ", %s", action->name);
187
188                 seq_putc(p, '\n');
189 unlock:
190                 spin_unlock_irqrestore(&irq_controller_lock, flags);
191         } else if (i == NR_IRQS) {
192 #ifdef CONFIG_ARCH_ACORN
193                 show_fiq_list(p, v);
194 #endif
195                 seq_printf(p, "Err: %10lu\n", irq_err_count);
196         }
197         return 0;
198 }
199
200 /*
201  * IRQ lock detection.
202  *
203  * Hopefully, this should get us out of a few locked situations.
204  * However, it may take a while for this to happen, since we need
205  * a large number if IRQs to appear in the same jiffie with the
206  * same instruction pointer (or within 2 instructions).
207  */
208 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
209 {
210         unsigned long instr_ptr = instruction_pointer(regs);
211
212         if (desc->lck_jif == jiffies &&
213             desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
214                 desc->lck_cnt += 1;
215
216                 if (desc->lck_cnt > MAX_IRQ_CNT) {
217                         printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
218                         return 1;
219                 }
220         } else {
221                 desc->lck_cnt = 0;
222                 desc->lck_pc  = instruction_pointer(regs);
223                 desc->lck_jif = jiffies;
224         }
225         return 0;
226 }
227
228 static void
229 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
230 {
231         static int count = 100;
232         struct irqaction *action;
233
234         if (!count)
235                 return;
236
237         count--;
238
239         if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
240                 printk("irq%u: bogus retval mask %x\n", irq, ret);
241         } else {
242                 printk("irq%u: nobody cared\n", irq);
243         }
244         show_regs(regs);
245         dump_stack();
246         printk(KERN_ERR "handlers:");
247         action = desc->action;
248         do {
249                 printk("\n" KERN_ERR "[<%p>]", action->handler);
250                 print_symbol(" (%s)", (unsigned long)action->handler);
251                 action = action->next;
252         } while (action);
253         printk("\n");
254 }
255
256 static int
257 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
258 {
259         unsigned int status;
260         int retval = 0;
261
262         spin_unlock(&irq_controller_lock);
263
264         if (!(action->flags & SA_INTERRUPT))
265                 local_irq_enable();
266
267         status = 0;
268         do {
269                 status |= action->flags;
270                 retval |= action->handler(irq, action->dev_id, regs);
271                 action = action->next;
272         } while (action);
273
274         if (status & SA_SAMPLE_RANDOM)
275                 add_interrupt_randomness(irq);
276
277         spin_lock_irq(&irq_controller_lock);
278
279         return retval;
280 }
281
282 /*
283  * This is for software-decoded IRQs.  The caller is expected to
284  * handle the ack, clear, mask and unmask issues.
285  */
286 void
287 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
288 {
289         struct irqaction *action;
290         const int cpu = smp_processor_id();
291
292         desc->triggered = 1;
293
294         kstat_cpu(cpu).irqs[irq]++;
295
296         action = desc->action;
297         if (action) {
298                 int ret = __do_irq(irq, action, regs);
299                 if (ret != IRQ_HANDLED)
300                         report_bad_irq(irq, regs, desc, ret);
301         }
302 }
303
304 /*
305  * Most edge-triggered IRQ implementations seem to take a broken
306  * approach to this.  Hence the complexity.
307  */
308 void
309 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
310 {
311         const int cpu = smp_processor_id();
312
313         desc->triggered = 1;
314
315         /*
316          * If we're currently running this IRQ, or its disabled,
317          * we shouldn't process the IRQ.  Instead, turn on the
318          * hardware masks.
319          */
320         if (unlikely(desc->running || desc->disable_depth))
321                 goto running;
322
323         /*
324          * Acknowledge and clear the IRQ, but don't mask it.
325          */
326         desc->chip->ack(irq);
327
328         /*
329          * Mark the IRQ currently in progress.
330          */
331         desc->running = 1;
332
333         kstat_cpu(cpu).irqs[irq]++;
334
335         do {
336                 struct irqaction *action;
337                 int ret;
338
339                 action = desc->action;
340                 if (!action)
341                         break;
342
343                 if (desc->pending && !desc->disable_depth) {
344                         desc->pending = 0;
345                         desc->chip->unmask(irq);
346                 }
347
348                 ret = __do_irq(irq, action, regs);
349                 if (ret != IRQ_HANDLED)
350                         report_bad_irq(irq, regs, desc, ret);
351         } while (desc->pending && !desc->disable_depth);
352
353         desc->running = 0;
354
355         /*
356          * If we were disabled or freed, shut down the handler.
357          */
358         if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
359                 return;
360
361  running:
362         /*
363          * We got another IRQ while this one was masked or
364          * currently running.  Delay it.
365          */
366         desc->pending = 1;
367         desc->chip->mask(irq);
368         desc->chip->ack(irq);
369 }
370
371 /*
372  * Level-based IRQ handler.  Nice and simple.
373  */
374 void
375 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
376 {
377         struct irqaction *action;
378         const int cpu = smp_processor_id();
379
380         desc->triggered = 1;
381
382         /*
383          * Acknowledge, clear _AND_ disable the interrupt.
384          */
385         desc->chip->ack(irq);
386
387         if (likely(!desc->disable_depth)) {
388                 kstat_cpu(cpu).irqs[irq]++;
389
390                 /*
391                  * Return with this interrupt masked if no action
392                  */
393                 action = desc->action;
394                 if (action) {
395                         int ret = __do_irq(irq, desc->action, regs);
396
397                         if (ret != IRQ_HANDLED)
398                                 report_bad_irq(irq, regs, desc, ret);
399
400                         if (likely(!desc->disable_depth &&
401                                    !check_irq_lock(desc, irq, regs)))
402                                 desc->chip->unmask(irq);
403                 }
404         }
405 }
406
407 static void do_pending_irqs(struct pt_regs *regs)
408 {
409         struct list_head head, *l, *n;
410
411         do {
412                 struct irqdesc *desc;
413
414                 /*
415                  * First, take the pending interrupts off the list.
416                  * The act of calling the handlers may add some IRQs
417                  * back onto the list.
418                  */
419                 head = irq_pending;
420                 INIT_LIST_HEAD(&irq_pending);
421                 head.next->prev = &head;
422                 head.prev->next = &head;
423
424                 /*
425                  * Now run each entry.  We must delete it from our
426                  * list before calling the handler.
427                  */
428                 list_for_each_safe(l, n, &head) {
429                         desc = list_entry(l, struct irqdesc, pend);
430                         list_del_init(&desc->pend);
431                         desc->handle(desc - irq_desc, desc, regs);
432                 }
433
434                 /*
435                  * The list must be empty.
436                  */
437                 BUG_ON(!list_empty(&head));
438         } while (!list_empty(&irq_pending));
439 }
440
441 /*
442  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
443  * come via this function.  Instead, they should provide their
444  * own 'handler'
445  */
446 asmlinkage void asm_do_IRQ(int irq, struct pt_regs *regs)
447 {
448         struct irqdesc *desc = irq_desc + irq;
449
450         /*
451          * Some hardware gives randomly wrong interrupts.  Rather
452          * than crashing, do something sensible.
453          */
454         if (irq >= NR_IRQS)
455                 desc = &bad_irq_desc;
456
457         irq_enter();
458         spin_lock(&irq_controller_lock);
459         desc->handle(irq, desc, regs);
460
461         /*
462          * Now re-run any pending interrupts.
463          */
464         if (!list_empty(&irq_pending))
465                 do_pending_irqs(regs);
466
467         spin_unlock(&irq_controller_lock);
468         irq_exit();
469 }
470
471 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
472 {
473         struct irqdesc *desc;
474         unsigned long flags;
475
476         if (irq >= NR_IRQS) {
477                 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
478                 return;
479         }
480
481         if (handle == NULL)
482                 handle = do_bad_IRQ;
483
484         desc = irq_desc + irq;
485
486         if (is_chained && desc->chip == &bad_chip)
487                 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
488
489         spin_lock_irqsave(&irq_controller_lock, flags);
490         if (handle == do_bad_IRQ) {
491                 desc->chip->mask(irq);
492                 desc->chip->ack(irq);
493                 desc->disable_depth = 1;
494         }
495         desc->handle = handle;
496         if (handle != do_bad_IRQ && is_chained) {
497                 desc->valid = 0;
498                 desc->probe_ok = 0;
499                 desc->disable_depth = 0;
500                 desc->chip->unmask(irq);
501         }
502         spin_unlock_irqrestore(&irq_controller_lock, flags);
503 }
504
505 void set_irq_chip(unsigned int irq, struct irqchip *chip)
506 {
507         struct irqdesc *desc;
508         unsigned long flags;
509
510         if (irq >= NR_IRQS) {
511                 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
512                 return;
513         }
514
515         if (chip == NULL)
516                 chip = &bad_chip;
517
518         desc = irq_desc + irq;
519         spin_lock_irqsave(&irq_controller_lock, flags);
520         desc->chip = chip;
521         spin_unlock_irqrestore(&irq_controller_lock, flags);
522 }
523
524 int set_irq_type(unsigned int irq, unsigned int type)
525 {
526         struct irqdesc *desc;
527         unsigned long flags;
528         int ret = -ENXIO;
529
530         if (irq >= NR_IRQS) {
531                 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
532                 return -ENODEV;
533         }
534
535         desc = irq_desc + irq;
536         if (desc->chip->type) {
537                 spin_lock_irqsave(&irq_controller_lock, flags);
538                 ret = desc->chip->type(irq, type);
539                 spin_unlock_irqrestore(&irq_controller_lock, flags);
540         }
541
542         return ret;
543 }
544
545 void set_irq_flags(unsigned int irq, unsigned int iflags)
546 {
547         struct irqdesc *desc;
548         unsigned long flags;
549
550         if (irq >= NR_IRQS) {
551                 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
552                 return;
553         }
554
555         desc = irq_desc + irq;
556         spin_lock_irqsave(&irq_controller_lock, flags);
557         desc->valid = (iflags & IRQF_VALID) != 0;
558         desc->probe_ok = (iflags & IRQF_PROBE) != 0;
559         desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
560         spin_unlock_irqrestore(&irq_controller_lock, flags);
561 }
562
563 int setup_irq(unsigned int irq, struct irqaction *new)
564 {
565         int shared = 0;
566         struct irqaction *old, **p;
567         unsigned long flags;
568         struct irqdesc *desc;
569
570         /*
571          * Some drivers like serial.c use request_irq() heavily,
572          * so we have to be careful not to interfere with a
573          * running system.
574          */
575         if (new->flags & SA_SAMPLE_RANDOM) {
576                 /*
577                  * This function might sleep, we want to call it first,
578                  * outside of the atomic block.
579                  * Yes, this might clear the entropy pool if the wrong
580                  * driver is attempted to be loaded, without actually
581                  * installing a new handler, but is this really a problem,
582                  * only the sysadmin is able to do this.
583                  */
584                 rand_initialize_irq(irq);
585         }
586
587         /*
588          * The following block of code has to be executed atomically
589          */
590         desc = irq_desc + irq;
591         spin_lock_irqsave(&irq_controller_lock, flags);
592         p = &desc->action;
593         if ((old = *p) != NULL) {
594                 /* Can't share interrupts unless both agree to */
595                 if (!(old->flags & new->flags & SA_SHIRQ)) {
596                         spin_unlock_irqrestore(&irq_controller_lock, flags);
597                         return -EBUSY;
598                 }
599
600                 /* add new interrupt at end of irq queue */
601                 do {
602                         p = &old->next;
603                         old = *p;
604                 } while (old);
605                 shared = 1;
606         }
607
608         *p = new;
609
610         if (!shared) {
611                 desc->probing = 0;
612                 desc->running = 0;
613                 desc->pending = 0;
614                 desc->disable_depth = 1;
615                 if (!desc->noautoenable) {
616                         desc->disable_depth = 0;
617                         desc->chip->unmask(irq);
618                 }
619         }
620
621         spin_unlock_irqrestore(&irq_controller_lock, flags);
622         return 0;
623 }
624
625 /**
626  *      request_irq - allocate an interrupt line
627  *      @irq: Interrupt line to allocate
628  *      @handler: Function to be called when the IRQ occurs
629  *      @irqflags: Interrupt type flags
630  *      @devname: An ascii name for the claiming device
631  *      @dev_id: A cookie passed back to the handler function
632  *
633  *      This call allocates interrupt resources and enables the
634  *      interrupt line and IRQ handling. From the point this
635  *      call is made your handler function may be invoked. Since
636  *      your handler function must clear any interrupt the board
637  *      raises, you must take care both to initialise your hardware
638  *      and to set up the interrupt handler in the right order.
639  *
640  *      Dev_id must be globally unique. Normally the address of the
641  *      device data structure is used as the cookie. Since the handler
642  *      receives this value it makes sense to use it.
643  *
644  *      If your interrupt is shared you must pass a non NULL dev_id
645  *      as this is required when freeing the interrupt.
646  *
647  *      Flags:
648  *
649  *      SA_SHIRQ                Interrupt is shared
650  *
651  *      SA_INTERRUPT            Disable local interrupts while processing
652  *
653  *      SA_SAMPLE_RANDOM        The interrupt can be used for entropy
654  *
655  */
656 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
657                  unsigned long irq_flags, const char * devname, void *dev_id)
658 {
659         unsigned long retval;
660         struct irqaction *action;
661
662         if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
663             (irq_flags & SA_SHIRQ && !dev_id))
664                 return -EINVAL;
665
666         action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
667         if (!action)
668                 return -ENOMEM;
669
670         action->handler = handler;
671         action->flags = irq_flags;
672         action->mask = 0;
673         action->name = devname;
674         action->next = NULL;
675         action->dev_id = dev_id;
676
677         retval = setup_irq(irq, action);
678
679         if (retval)
680                 kfree(action);
681         return retval;
682 }
683
684 EXPORT_SYMBOL(request_irq);
685
686 /**
687  *      free_irq - free an interrupt
688  *      @irq: Interrupt line to free
689  *      @dev_id: Device identity to free
690  *
691  *      Remove an interrupt handler. The handler is removed and if the
692  *      interrupt line is no longer in use by any driver it is disabled.
693  *      On a shared IRQ the caller must ensure the interrupt is disabled
694  *      on the card it drives before calling this function.
695  *
696  *      This function must not be called from interrupt context.
697  */
698 void free_irq(unsigned int irq, void *dev_id)
699 {
700         struct irqaction * action, **p;
701         unsigned long flags;
702
703         if (irq >= NR_IRQS || !irq_desc[irq].valid) {
704                 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
705                 dump_stack();
706                 return;
707         }
708
709         spin_lock_irqsave(&irq_controller_lock, flags);
710         for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
711                 if (action->dev_id != dev_id)
712                         continue;
713
714                 /* Found it - now free it */
715                 *p = action->next;
716                 break;
717         }
718         spin_unlock_irqrestore(&irq_controller_lock, flags);
719
720         if (!action) {
721                 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
722                 dump_stack();
723         } else {
724                 synchronize_irq(irq);
725                 kfree(action);
726         }
727 }
728
729 EXPORT_SYMBOL(free_irq);
730
731 static DECLARE_MUTEX(probe_sem);
732
733 /* Start the interrupt probing.  Unlike other architectures,
734  * we don't return a mask of interrupts from probe_irq_on,
735  * but return the number of interrupts enabled for the probe.
736  * The interrupts which have been enabled for probing is
737  * instead recorded in the irq_desc structure.
738  */
739 unsigned long probe_irq_on(void)
740 {
741         unsigned int i, irqs = 0;
742         unsigned long delay;
743
744         down(&probe_sem);
745
746         /*
747          * first snaffle up any unassigned but
748          * probe-able interrupts
749          */
750         spin_lock_irq(&irq_controller_lock);
751         for (i = 0; i < NR_IRQS; i++) {
752                 if (!irq_desc[i].probe_ok || irq_desc[i].action)
753                         continue;
754
755                 irq_desc[i].probing = 1;
756                 irq_desc[i].triggered = 0;
757                 if (irq_desc[i].chip->type)
758                         irq_desc[i].chip->type(i, IRQT_PROBE);
759                 irq_desc[i].chip->unmask(i);
760                 irqs += 1;
761         }
762         spin_unlock_irq(&irq_controller_lock);
763
764         /*
765          * wait for spurious interrupts to mask themselves out again
766          */
767         for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
768                 /* min 100ms delay */;
769
770         /*
771          * now filter out any obviously spurious interrupts
772          */
773         spin_lock_irq(&irq_controller_lock);
774         for (i = 0; i < NR_IRQS; i++) {
775                 if (irq_desc[i].probing && irq_desc[i].triggered) {
776                         irq_desc[i].probing = 0;
777                         irqs -= 1;
778                 }
779         }
780         spin_unlock_irq(&irq_controller_lock);
781
782         return irqs;
783 }
784
785 EXPORT_SYMBOL(probe_irq_on);
786
787 unsigned int probe_irq_mask(unsigned long irqs)
788 {
789         unsigned int mask = 0, i;
790
791         spin_lock_irq(&irq_controller_lock);
792         for (i = 0; i < 16 && i < NR_IRQS; i++)
793                 if (irq_desc[i].probing && irq_desc[i].triggered)
794                         mask |= 1 << i;
795         spin_unlock_irq(&irq_controller_lock);
796
797         up(&probe_sem);
798
799         return mask;
800 }
801
802 /*
803  * Possible return values:
804  *  >= 0 - interrupt number
805  *    -1 - no interrupt/many interrupts
806  */
807 int probe_irq_off(unsigned long irqs)
808 {
809         unsigned int i;
810         int irq_found = NO_IRQ;
811
812         /*
813          * look at the interrupts, and find exactly one
814          * that we were probing has been triggered
815          */
816         spin_lock_irq(&irq_controller_lock);
817         for (i = 0; i < NR_IRQS; i++) {
818                 if (irq_desc[i].probing &&
819                     irq_desc[i].triggered) {
820                         if (irq_found != NO_IRQ) {
821                                 irq_found = NO_IRQ;
822                                 goto out;
823                         }
824                         irq_found = i;
825                 }
826         }
827
828         if (irq_found == -1)
829                 irq_found = NO_IRQ;
830 out:
831         spin_unlock_irq(&irq_controller_lock);
832
833         up(&probe_sem);
834
835         return irq_found;
836 }
837
838 EXPORT_SYMBOL(probe_irq_off);
839
840 void __init init_irq_proc(void)
841 {
842 }
843
844 void __init init_IRQ(void)
845 {
846         struct irqdesc *desc;
847         extern void init_dma(void);
848         int irq;
849
850         for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
851                 *desc = bad_irq_desc;
852                 INIT_LIST_HEAD(&desc->pend);
853         }
854
855         init_arch_irq();
856         init_dma();
857 }