patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / arm / kernel / irq.c
1 /*
2  *  linux/arch/arm/kernel/irq.c
3  *
4  *  Copyright (C) 1992 Linus Torvalds
5  *  Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  *  This file contains the code used by various IRQ handling routines:
12  *  asking for different IRQ's should be done through these routines
13  *  instead of just grabbing them. Thus setups with different IRQ numbers
14  *  shouldn't result in any weird surprises, and installing new handlers
15  *  should be easier.
16  *
17  *  IRQ's are in fact implemented a bit like signal handlers for the kernel.
18  *  Naturally it's not a 1:1 relation, but there are similarities.
19  */
20 #include <linux/config.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/ioport.h>
25 #include <linux/interrupt.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/random.h>
29 #include <linux/smp.h>
30 #include <linux/init.h>
31 #include <linux/seq_file.h>
32 #include <linux/errno.h>
33 #include <linux/list.h>
34 #include <linux/kallsyms.h>
35
36 #include <asm/irq.h>
37 #include <asm/system.h>
38 #include <asm/mach/irq.h>
39
40 /*
41  * Maximum IRQ count.  Currently, this is arbitary.  However, it should
42  * not be set too low to prevent false triggering.  Conversely, if it
43  * is set too high, then you could miss a stuck IRQ.
44  *
45  * Maybe we ought to set a timer and re-enable the IRQ at a later time?
46  */
47 #define MAX_IRQ_CNT     100000
48
49 static volatile unsigned long irq_err_count;
50 static spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
51 static LIST_HEAD(irq_pending);
52
53 struct irqdesc irq_desc[NR_IRQS];
54 void (*init_arch_irq)(void) __initdata = NULL;
55
56 /*
57  * Dummy mask/unmask handler
58  */
59 void dummy_mask_unmask_irq(unsigned int irq)
60 {
61 }
62
63 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
64 {
65         return IRQ_NONE;
66 }
67
68 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
69 {
70         irq_err_count += 1;
71         printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
72 }
73
74 static struct irqchip bad_chip = {
75         .ack    = dummy_mask_unmask_irq,
76         .mask   = dummy_mask_unmask_irq,
77         .unmask = dummy_mask_unmask_irq,
78 };
79
80 static struct irqdesc bad_irq_desc = {
81         .chip           = &bad_chip,
82         .handle         = do_bad_IRQ,
83         .pend           = LIST_HEAD_INIT(bad_irq_desc.pend),
84         .disable_depth  = 1,
85 };
86
87 /**
88  *      disable_irq - disable an irq and wait for completion
89  *      @irq: Interrupt to disable
90  *
91  *      Disable the selected interrupt line.  Enables and disables
92  *      are nested.  We do this lazily.
93  *
94  *      This function may be called from IRQ context.
95  */
96 void disable_irq(unsigned int irq)
97 {
98         struct irqdesc *desc = irq_desc + irq;
99         unsigned long flags;
100
101         spin_lock_irqsave(&irq_controller_lock, flags);
102         desc->disable_depth++;
103         list_del_init(&desc->pend);
104         spin_unlock_irqrestore(&irq_controller_lock, flags);
105 }
106 EXPORT_SYMBOL(disable_irq);
107
108 /**
109  *      enable_irq - enable interrupt handling on an irq
110  *      @irq: Interrupt to enable
111  *
112  *      Re-enables the processing of interrupts on this IRQ line.
113  *      Note that this may call the interrupt handler, so you may
114  *      get unexpected results if you hold IRQs disabled.
115  *
116  *      This function may be called from IRQ context.
117  */
118 void enable_irq(unsigned int irq)
119 {
120         struct irqdesc *desc = irq_desc + irq;
121         unsigned long flags;
122
123         spin_lock_irqsave(&irq_controller_lock, flags);
124         if (unlikely(!desc->disable_depth)) {
125                 printk("enable_irq(%u) unbalanced from %p\n", irq,
126                         __builtin_return_address(0));
127         } else if (!--desc->disable_depth) {
128                 desc->probing = 0;
129                 desc->chip->unmask(irq);
130
131                 /*
132                  * If the interrupt is waiting to be processed,
133                  * try to re-run it.  We can't directly run it
134                  * from here since the caller might be in an
135                  * interrupt-protected region.
136                  */
137                 if (desc->pending && list_empty(&desc->pend)) {
138                         desc->pending = 0;
139                         if (!desc->chip->retrigger ||
140                             desc->chip->retrigger(irq))
141                                 list_add(&desc->pend, &irq_pending);
142                 }
143         }
144         spin_unlock_irqrestore(&irq_controller_lock, flags);
145 }
146 EXPORT_SYMBOL(enable_irq);
147
148 /*
149  * Enable wake on selected irq
150  */
151 void enable_irq_wake(unsigned int irq)
152 {
153         struct irqdesc *desc = irq_desc + irq;
154         unsigned long flags;
155
156         spin_lock_irqsave(&irq_controller_lock, flags);
157         if (desc->chip->wake)
158                 desc->chip->wake(irq, 1);
159         spin_unlock_irqrestore(&irq_controller_lock, flags);
160 }
161 EXPORT_SYMBOL(enable_irq_wake);
162
163 void disable_irq_wake(unsigned int irq)
164 {
165         struct irqdesc *desc = irq_desc + irq;
166         unsigned long flags;
167
168         spin_lock_irqsave(&irq_controller_lock, flags);
169         if (desc->chip->wake)
170                 desc->chip->wake(irq, 0);
171         spin_unlock_irqrestore(&irq_controller_lock, flags);
172 }
173 EXPORT_SYMBOL(disable_irq_wake);
174
175 int show_interrupts(struct seq_file *p, void *v)
176 {
177         int i = *(loff_t *) v;
178         struct irqaction * action;
179         unsigned long flags;
180
181         if (i < NR_IRQS) {
182                 spin_lock_irqsave(&irq_controller_lock, flags);
183                 action = irq_desc[i].action;
184                 if (!action)
185                         goto unlock;
186
187                 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
188                 seq_printf(p, "  %s", action->name);
189                 for (action = action->next; action; action = action->next)
190                         seq_printf(p, ", %s", action->name);
191
192                 seq_putc(p, '\n');
193 unlock:
194                 spin_unlock_irqrestore(&irq_controller_lock, flags);
195         } else if (i == NR_IRQS) {
196 #ifdef CONFIG_ARCH_ACORN
197                 show_fiq_list(p, v);
198 #endif
199                 seq_printf(p, "Err: %10lu\n", irq_err_count);
200         }
201         return 0;
202 }
203
204 /*
205  * IRQ lock detection.
206  *
207  * Hopefully, this should get us out of a few locked situations.
208  * However, it may take a while for this to happen, since we need
209  * a large number if IRQs to appear in the same jiffie with the
210  * same instruction pointer (or within 2 instructions).
211  */
212 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
213 {
214         unsigned long instr_ptr = instruction_pointer(regs);
215
216         if (desc->lck_jif == jiffies &&
217             desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
218                 desc->lck_cnt += 1;
219
220                 if (desc->lck_cnt > MAX_IRQ_CNT) {
221                         printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
222                         return 1;
223                 }
224         } else {
225                 desc->lck_cnt = 0;
226                 desc->lck_pc  = instruction_pointer(regs);
227                 desc->lck_jif = jiffies;
228         }
229         return 0;
230 }
231
232 static void
233 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
234 {
235         static int count = 100;
236         struct irqaction *action;
237
238         if (!count)
239                 return;
240
241         count--;
242
243         if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
244                 printk("irq%u: bogus retval mask %x\n", irq, ret);
245         } else {
246                 printk("irq%u: nobody cared\n", irq);
247         }
248         show_regs(regs);
249         dump_stack();
250         printk(KERN_ERR "handlers:");
251         action = desc->action;
252         do {
253                 printk("\n" KERN_ERR "[<%p>]", action->handler);
254                 print_symbol(" (%s)", (unsigned long)action->handler);
255                 action = action->next;
256         } while (action);
257         printk("\n");
258 }
259
260 static int
261 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
262 {
263         unsigned int status;
264         int retval = 0;
265
266         spin_unlock(&irq_controller_lock);
267
268         if (!(action->flags & SA_INTERRUPT))
269                 local_irq_enable();
270
271         status = 0;
272         do {
273                 status |= action->flags;
274                 retval |= action->handler(irq, action->dev_id, regs);
275                 action = action->next;
276         } while (action);
277
278         if (status & SA_SAMPLE_RANDOM)
279                 add_interrupt_randomness(irq);
280
281         spin_lock_irq(&irq_controller_lock);
282
283         return retval;
284 }
285
286 /*
287  * This is for software-decoded IRQs.  The caller is expected to
288  * handle the ack, clear, mask and unmask issues.
289  */
290 void
291 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
292 {
293         struct irqaction *action;
294         const int cpu = smp_processor_id();
295
296         desc->triggered = 1;
297
298         kstat_cpu(cpu).irqs[irq]++;
299
300         action = desc->action;
301         if (action) {
302                 int ret = __do_irq(irq, action, regs);
303                 if (ret != IRQ_HANDLED)
304                         report_bad_irq(irq, regs, desc, ret);
305         }
306 }
307
308 /*
309  * Most edge-triggered IRQ implementations seem to take a broken
310  * approach to this.  Hence the complexity.
311  */
312 void
313 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
314 {
315         const int cpu = smp_processor_id();
316
317         desc->triggered = 1;
318
319         /*
320          * If we're currently running this IRQ, or its disabled,
321          * we shouldn't process the IRQ.  Instead, turn on the
322          * hardware masks.
323          */
324         if (unlikely(desc->running || desc->disable_depth))
325                 goto running;
326
327         /*
328          * Acknowledge and clear the IRQ, but don't mask it.
329          */
330         desc->chip->ack(irq);
331
332         /*
333          * Mark the IRQ currently in progress.
334          */
335         desc->running = 1;
336
337         kstat_cpu(cpu).irqs[irq]++;
338
339         do {
340                 struct irqaction *action;
341                 int ret;
342
343                 action = desc->action;
344                 if (!action)
345                         break;
346
347                 if (desc->pending && !desc->disable_depth) {
348                         desc->pending = 0;
349                         desc->chip->unmask(irq);
350                 }
351
352                 ret = __do_irq(irq, action, regs);
353                 if (ret != IRQ_HANDLED)
354                         report_bad_irq(irq, regs, desc, ret);
355         } while (desc->pending && !desc->disable_depth);
356
357         desc->running = 0;
358
359         /*
360          * If we were disabled or freed, shut down the handler.
361          */
362         if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
363                 return;
364
365  running:
366         /*
367          * We got another IRQ while this one was masked or
368          * currently running.  Delay it.
369          */
370         desc->pending = 1;
371         desc->chip->mask(irq);
372         desc->chip->ack(irq);
373 }
374
375 /*
376  * Level-based IRQ handler.  Nice and simple.
377  */
378 void
379 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
380 {
381         struct irqaction *action;
382         const int cpu = smp_processor_id();
383
384         desc->triggered = 1;
385
386         /*
387          * Acknowledge, clear _AND_ disable the interrupt.
388          */
389         desc->chip->ack(irq);
390
391         if (likely(!desc->disable_depth)) {
392                 kstat_cpu(cpu).irqs[irq]++;
393
394                 /*
395                  * Return with this interrupt masked if no action
396                  */
397                 action = desc->action;
398                 if (action) {
399                         int ret = __do_irq(irq, desc->action, regs);
400
401                         if (ret != IRQ_HANDLED)
402                                 report_bad_irq(irq, regs, desc, ret);
403
404                         if (likely(!desc->disable_depth &&
405                                    !check_irq_lock(desc, irq, regs)))
406                                 desc->chip->unmask(irq);
407                 }
408         }
409 }
410
411 static void do_pending_irqs(struct pt_regs *regs)
412 {
413         struct list_head head, *l, *n;
414
415         do {
416                 struct irqdesc *desc;
417
418                 /*
419                  * First, take the pending interrupts off the list.
420                  * The act of calling the handlers may add some IRQs
421                  * back onto the list.
422                  */
423                 head = irq_pending;
424                 INIT_LIST_HEAD(&irq_pending);
425                 head.next->prev = &head;
426                 head.prev->next = &head;
427
428                 /*
429                  * Now run each entry.  We must delete it from our
430                  * list before calling the handler.
431                  */
432                 list_for_each_safe(l, n, &head) {
433                         desc = list_entry(l, struct irqdesc, pend);
434                         list_del_init(&desc->pend);
435                         desc->handle(desc - irq_desc, desc, regs);
436                 }
437
438                 /*
439                  * The list must be empty.
440                  */
441                 BUG_ON(!list_empty(&head));
442         } while (!list_empty(&irq_pending));
443 }
444
445 /*
446  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
447  * come via this function.  Instead, they should provide their
448  * own 'handler'
449  */
450 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
451 {
452         struct irqdesc *desc = irq_desc + irq;
453
454         /*
455          * Some hardware gives randomly wrong interrupts.  Rather
456          * than crashing, do something sensible.
457          */
458         if (irq >= NR_IRQS)
459                 desc = &bad_irq_desc;
460
461         irq_enter();
462         spin_lock(&irq_controller_lock);
463         desc->handle(irq, desc, regs);
464
465         /*
466          * Now re-run any pending interrupts.
467          */
468         if (!list_empty(&irq_pending))
469                 do_pending_irqs(regs);
470
471         spin_unlock(&irq_controller_lock);
472         irq_exit();
473 }
474
475 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
476 {
477         struct irqdesc *desc;
478         unsigned long flags;
479
480         if (irq >= NR_IRQS) {
481                 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
482                 return;
483         }
484
485         if (handle == NULL)
486                 handle = do_bad_IRQ;
487
488         desc = irq_desc + irq;
489
490         if (is_chained && desc->chip == &bad_chip)
491                 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
492
493         spin_lock_irqsave(&irq_controller_lock, flags);
494         if (handle == do_bad_IRQ) {
495                 desc->chip->mask(irq);
496                 desc->chip->ack(irq);
497                 desc->disable_depth = 1;
498         }
499         desc->handle = handle;
500         if (handle != do_bad_IRQ && is_chained) {
501                 desc->valid = 0;
502                 desc->probe_ok = 0;
503                 desc->disable_depth = 0;
504                 desc->chip->unmask(irq);
505         }
506         spin_unlock_irqrestore(&irq_controller_lock, flags);
507 }
508
509 void set_irq_chip(unsigned int irq, struct irqchip *chip)
510 {
511         struct irqdesc *desc;
512         unsigned long flags;
513
514         if (irq >= NR_IRQS) {
515                 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
516                 return;
517         }
518
519         if (chip == NULL)
520                 chip = &bad_chip;
521
522         desc = irq_desc + irq;
523         spin_lock_irqsave(&irq_controller_lock, flags);
524         desc->chip = chip;
525         spin_unlock_irqrestore(&irq_controller_lock, flags);
526 }
527
528 int set_irq_type(unsigned int irq, unsigned int type)
529 {
530         struct irqdesc *desc;
531         unsigned long flags;
532         int ret = -ENXIO;
533
534         if (irq >= NR_IRQS) {
535                 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
536                 return -ENODEV;
537         }
538
539         desc = irq_desc + irq;
540         if (desc->chip->type) {
541                 spin_lock_irqsave(&irq_controller_lock, flags);
542                 ret = desc->chip->type(irq, type);
543                 spin_unlock_irqrestore(&irq_controller_lock, flags);
544         }
545
546         return ret;
547 }
548 EXPORT_SYMBOL(set_irq_type);
549
550 void set_irq_flags(unsigned int irq, unsigned int iflags)
551 {
552         struct irqdesc *desc;
553         unsigned long flags;
554
555         if (irq >= NR_IRQS) {
556                 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
557                 return;
558         }
559
560         desc = irq_desc + irq;
561         spin_lock_irqsave(&irq_controller_lock, flags);
562         desc->valid = (iflags & IRQF_VALID) != 0;
563         desc->probe_ok = (iflags & IRQF_PROBE) != 0;
564         desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0;
565         spin_unlock_irqrestore(&irq_controller_lock, flags);
566 }
567
568 int setup_irq(unsigned int irq, struct irqaction *new)
569 {
570         int shared = 0;
571         struct irqaction *old, **p;
572         unsigned long flags;
573         struct irqdesc *desc;
574
575         /*
576          * Some drivers like serial.c use request_irq() heavily,
577          * so we have to be careful not to interfere with a
578          * running system.
579          */
580         if (new->flags & SA_SAMPLE_RANDOM) {
581                 /*
582                  * This function might sleep, we want to call it first,
583                  * outside of the atomic block.
584                  * Yes, this might clear the entropy pool if the wrong
585                  * driver is attempted to be loaded, without actually
586                  * installing a new handler, but is this really a problem,
587                  * only the sysadmin is able to do this.
588                  */
589                 rand_initialize_irq(irq);
590         }
591
592         /*
593          * The following block of code has to be executed atomically
594          */
595         desc = irq_desc + irq;
596         spin_lock_irqsave(&irq_controller_lock, flags);
597         p = &desc->action;
598         if ((old = *p) != NULL) {
599                 /* Can't share interrupts unless both agree to */
600                 if (!(old->flags & new->flags & SA_SHIRQ)) {
601                         spin_unlock_irqrestore(&irq_controller_lock, flags);
602                         return -EBUSY;
603                 }
604
605                 /* add new interrupt at end of irq queue */
606                 do {
607                         p = &old->next;
608                         old = *p;
609                 } while (old);
610                 shared = 1;
611         }
612
613         *p = new;
614
615         if (!shared) {
616                 desc->probing = 0;
617                 desc->running = 0;
618                 desc->pending = 0;
619                 desc->disable_depth = 1;
620                 if (!desc->noautoenable) {
621                         desc->disable_depth = 0;
622                         desc->chip->unmask(irq);
623                 }
624         }
625
626         spin_unlock_irqrestore(&irq_controller_lock, flags);
627         return 0;
628 }
629
630 /**
631  *      request_irq - allocate an interrupt line
632  *      @irq: Interrupt line to allocate
633  *      @handler: Function to be called when the IRQ occurs
634  *      @irqflags: Interrupt type flags
635  *      @devname: An ascii name for the claiming device
636  *      @dev_id: A cookie passed back to the handler function
637  *
638  *      This call allocates interrupt resources and enables the
639  *      interrupt line and IRQ handling. From the point this
640  *      call is made your handler function may be invoked. Since
641  *      your handler function must clear any interrupt the board
642  *      raises, you must take care both to initialise your hardware
643  *      and to set up the interrupt handler in the right order.
644  *
645  *      Dev_id must be globally unique. Normally the address of the
646  *      device data structure is used as the cookie. Since the handler
647  *      receives this value it makes sense to use it.
648  *
649  *      If your interrupt is shared you must pass a non NULL dev_id
650  *      as this is required when freeing the interrupt.
651  *
652  *      Flags:
653  *
654  *      SA_SHIRQ                Interrupt is shared
655  *
656  *      SA_INTERRUPT            Disable local interrupts while processing
657  *
658  *      SA_SAMPLE_RANDOM        The interrupt can be used for entropy
659  *
660  */
661 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
662                  unsigned long irq_flags, const char * devname, void *dev_id)
663 {
664         unsigned long retval;
665         struct irqaction *action;
666
667         if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
668             (irq_flags & SA_SHIRQ && !dev_id))
669                 return -EINVAL;
670
671         action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
672         if (!action)
673                 return -ENOMEM;
674
675         action->handler = handler;
676         action->flags = irq_flags;
677         action->mask = 0;
678         action->name = devname;
679         action->next = NULL;
680         action->dev_id = dev_id;
681
682         retval = setup_irq(irq, action);
683
684         if (retval)
685                 kfree(action);
686         return retval;
687 }
688
689 EXPORT_SYMBOL(request_irq);
690
691 /**
692  *      free_irq - free an interrupt
693  *      @irq: Interrupt line to free
694  *      @dev_id: Device identity to free
695  *
696  *      Remove an interrupt handler. The handler is removed and if the
697  *      interrupt line is no longer in use by any driver it is disabled.
698  *      On a shared IRQ the caller must ensure the interrupt is disabled
699  *      on the card it drives before calling this function.
700  *
701  *      This function must not be called from interrupt context.
702  */
703 void free_irq(unsigned int irq, void *dev_id)
704 {
705         struct irqaction * action, **p;
706         unsigned long flags;
707
708         if (irq >= NR_IRQS || !irq_desc[irq].valid) {
709                 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
710                 dump_stack();
711                 return;
712         }
713
714         spin_lock_irqsave(&irq_controller_lock, flags);
715         for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
716                 if (action->dev_id != dev_id)
717                         continue;
718
719                 /* Found it - now free it */
720                 *p = action->next;
721                 break;
722         }
723         spin_unlock_irqrestore(&irq_controller_lock, flags);
724
725         if (!action) {
726                 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
727                 dump_stack();
728         } else {
729                 synchronize_irq(irq);
730                 kfree(action);
731         }
732 }
733
734 EXPORT_SYMBOL(free_irq);
735
736 static DECLARE_MUTEX(probe_sem);
737
738 /* Start the interrupt probing.  Unlike other architectures,
739  * we don't return a mask of interrupts from probe_irq_on,
740  * but return the number of interrupts enabled for the probe.
741  * The interrupts which have been enabled for probing is
742  * instead recorded in the irq_desc structure.
743  */
744 unsigned long probe_irq_on(void)
745 {
746         unsigned int i, irqs = 0;
747         unsigned long delay;
748
749         down(&probe_sem);
750
751         /*
752          * first snaffle up any unassigned but
753          * probe-able interrupts
754          */
755         spin_lock_irq(&irq_controller_lock);
756         for (i = 0; i < NR_IRQS; i++) {
757                 if (!irq_desc[i].probe_ok || irq_desc[i].action)
758                         continue;
759
760                 irq_desc[i].probing = 1;
761                 irq_desc[i].triggered = 0;
762                 if (irq_desc[i].chip->type)
763                         irq_desc[i].chip->type(i, IRQT_PROBE);
764                 irq_desc[i].chip->unmask(i);
765                 irqs += 1;
766         }
767         spin_unlock_irq(&irq_controller_lock);
768
769         /*
770          * wait for spurious interrupts to mask themselves out again
771          */
772         for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
773                 /* min 100ms delay */;
774
775         /*
776          * now filter out any obviously spurious interrupts
777          */
778         spin_lock_irq(&irq_controller_lock);
779         for (i = 0; i < NR_IRQS; i++) {
780                 if (irq_desc[i].probing && irq_desc[i].triggered) {
781                         irq_desc[i].probing = 0;
782                         irqs -= 1;
783                 }
784         }
785         spin_unlock_irq(&irq_controller_lock);
786
787         return irqs;
788 }
789
790 EXPORT_SYMBOL(probe_irq_on);
791
792 unsigned int probe_irq_mask(unsigned long irqs)
793 {
794         unsigned int mask = 0, i;
795
796         spin_lock_irq(&irq_controller_lock);
797         for (i = 0; i < 16 && i < NR_IRQS; i++)
798                 if (irq_desc[i].probing && irq_desc[i].triggered)
799                         mask |= 1 << i;
800         spin_unlock_irq(&irq_controller_lock);
801
802         up(&probe_sem);
803
804         return mask;
805 }
806 EXPORT_SYMBOL(probe_irq_mask);
807
808 /*
809  * Possible return values:
810  *  >= 0 - interrupt number
811  *    -1 - no interrupt/many interrupts
812  */
813 int probe_irq_off(unsigned long irqs)
814 {
815         unsigned int i;
816         int irq_found = NO_IRQ;
817
818         /*
819          * look at the interrupts, and find exactly one
820          * that we were probing has been triggered
821          */
822         spin_lock_irq(&irq_controller_lock);
823         for (i = 0; i < NR_IRQS; i++) {
824                 if (irq_desc[i].probing &&
825                     irq_desc[i].triggered) {
826                         if (irq_found != NO_IRQ) {
827                                 irq_found = NO_IRQ;
828                                 goto out;
829                         }
830                         irq_found = i;
831                 }
832         }
833
834         if (irq_found == -1)
835                 irq_found = NO_IRQ;
836 out:
837         spin_unlock_irq(&irq_controller_lock);
838
839         up(&probe_sem);
840
841         return irq_found;
842 }
843
844 EXPORT_SYMBOL(probe_irq_off);
845
846 void __init init_irq_proc(void)
847 {
848 }
849
850 void __init init_IRQ(void)
851 {
852         struct irqdesc *desc;
853         extern void init_dma(void);
854         int irq;
855
856         for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
857                 *desc = bad_irq_desc;
858                 INIT_LIST_HEAD(&desc->pend);
859         }
860
861         init_arch_irq();
862         init_dma();
863 }