VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / alpha / kernel / irq.c
1 /*
2  *      linux/arch/alpha/kernel/irq.c
3  *
4  *      Copyright (C) 1995 Linus Torvalds
5  *
6  * This file contains the code used by various IRQ handling routines:
7  * asking for different IRQ's should be done through these routines
8  * instead of just grabbing them. Thus setups with different IRQ numbers
9  * shouldn't result in any weird surprises, and installing new handlers
10  * should be easier.
11  */
12
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/ptrace.h>
21 #include <linux/interrupt.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/init.h>
25 #include <linux/irq.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/bitops.h>
32 #include <asm/uaccess.h>
33
34 /*
35  * Controller mappings for all interrupt sources:
36  */
37 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
38         [0 ... NR_IRQS-1] = {
39                 .handler = &no_irq_type,
40                 .lock = SPIN_LOCK_UNLOCKED
41         }
42 };
43
44 static void register_irq_proc(unsigned int irq);
45
46 volatile unsigned long irq_err_count;
47
48 /*
49  * Special irq handlers.
50  */
51
52 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
53 {
54         return IRQ_NONE;
55 }
56
57 /*
58  * Generic no controller code
59  */
60
61 static void no_irq_enable_disable(unsigned int irq) { }
62 static unsigned int no_irq_startup(unsigned int irq) { return 0; }
63
64 static void
65 no_irq_ack(unsigned int irq)
66 {
67         irq_err_count++;
68         printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
69 }
70
71 struct hw_interrupt_type no_irq_type = {
72         .typename       = "none",
73         .startup        = no_irq_startup,
74         .shutdown       = no_irq_enable_disable,
75         .enable         = no_irq_enable_disable,
76         .disable        = no_irq_enable_disable,
77         .ack            = no_irq_ack,
78         .end            = no_irq_enable_disable,
79 };
80
81 int
82 handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
83                  struct irqaction *action)
84 {
85         int status = 1; /* Force the "do bottom halves" bit */
86
87         do {
88                 if (!(action->flags & SA_INTERRUPT))
89                         local_irq_enable();
90                 else
91                         local_irq_disable();
92
93                 status |= action->flags;
94                 action->handler(irq, action->dev_id, regs);
95                 action = action->next;
96         } while (action);
97         if (status & SA_SAMPLE_RANDOM)
98                 add_interrupt_randomness(irq);
99         local_irq_disable();
100
101         return status;
102 }
103
104 /*
105  * Generic enable/disable code: this just calls
106  * down into the PIC-specific version for the actual
107  * hardware disable after having gotten the irq
108  * controller lock. 
109  */
110 void inline
111 disable_irq_nosync(unsigned int irq)
112 {
113         irq_desc_t *desc = irq_desc + irq;
114         unsigned long flags;
115
116         spin_lock_irqsave(&desc->lock, flags);
117         if (!desc->depth++) {
118                 desc->status |= IRQ_DISABLED;
119                 desc->handler->disable(irq);
120         }
121         spin_unlock_irqrestore(&desc->lock, flags);
122 }
123
124 /*
125  * Synchronous version of the above, making sure the IRQ is
126  * no longer running on any other IRQ..
127  */
128 void
129 disable_irq(unsigned int irq)
130 {
131         disable_irq_nosync(irq);
132         synchronize_irq(irq);
133 }
134
135 void
136 enable_irq(unsigned int irq)
137 {
138         irq_desc_t *desc = irq_desc + irq;
139         unsigned long flags;
140
141         spin_lock_irqsave(&desc->lock, flags);
142         switch (desc->depth) {
143         case 1: {
144                 unsigned int status = desc->status & ~IRQ_DISABLED;
145                 desc->status = status;
146                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
147                         desc->status = status | IRQ_REPLAY;
148                         hw_resend_irq(desc->handler,irq);
149                 }
150                 desc->handler->enable(irq);
151                 /* fall-through */
152         }
153         default:
154                 desc->depth--;
155                 break;
156         case 0:
157                 printk(KERN_ERR "enable_irq() unbalanced from %p\n",
158                        __builtin_return_address(0));
159         }
160         spin_unlock_irqrestore(&desc->lock, flags);
161 }
162
163 int
164 setup_irq(unsigned int irq, struct irqaction * new)
165 {
166         int shared = 0;
167         struct irqaction *old, **p;
168         unsigned long flags;
169         irq_desc_t *desc = irq_desc + irq;
170
171         if (desc->handler == &no_irq_type)
172                 return -ENOSYS;
173
174         /*
175          * Some drivers like serial.c use request_irq() heavily,
176          * so we have to be careful not to interfere with a
177          * running system.
178          */
179         if (new->flags & SA_SAMPLE_RANDOM) {
180                 /*
181                  * This function might sleep, we want to call it first,
182                  * outside of the atomic block.
183                  * Yes, this might clear the entropy pool if the wrong
184                  * driver is attempted to be loaded, without actually
185                  * installing a new handler, but is this really a problem,
186                  * only the sysadmin is able to do this.
187                  */
188                 rand_initialize_irq(irq);
189         }
190
191         /*
192          * The following block of code has to be executed atomically
193          */
194         spin_lock_irqsave(&desc->lock,flags);
195         p = &desc->action;
196         if ((old = *p) != NULL) {
197                 /* Can't share interrupts unless both agree to */
198                 if (!(old->flags & new->flags & SA_SHIRQ)) {
199                         spin_unlock_irqrestore(&desc->lock,flags);
200                         return -EBUSY;
201                 }
202
203                 /* add new interrupt at end of irq queue */
204                 do {
205                         p = &old->next;
206                         old = *p;
207                 } while (old);
208                 shared = 1;
209         }
210
211         *p = new;
212
213         if (!shared) {
214                 desc->depth = 0;
215                 desc->status &=
216                     ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
217                 desc->handler->startup(irq);
218         }
219         spin_unlock_irqrestore(&desc->lock,flags);
220
221         return 0;
222 }
223
224 static struct proc_dir_entry * root_irq_dir;
225 static struct proc_dir_entry * irq_dir[NR_IRQS];
226
227 #ifdef CONFIG_SMP 
228 static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
229 static char irq_user_affinity[NR_IRQS];
230 static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
231
232 static void
233 select_smp_affinity(int irq)
234 {
235         static int last_cpu;
236         int cpu = last_cpu + 1;
237
238         if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
239                 return;
240
241         while (!cpu_possible(cpu))
242                 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
243         last_cpu = cpu;
244
245         irq_affinity[irq] = cpumask_of_cpu(cpu);
246         irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
247 }
248
249 static int
250 irq_affinity_read_proc (char *page, char **start, off_t off,
251                         int count, int *eof, void *data)
252 {
253         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
254         if (count - len < 2)
255                 return -EINVAL;
256         len += sprintf(page + len, "\n");
257         return len;
258 }
259
260 static int
261 irq_affinity_write_proc(struct file *file, const char __user *buffer,
262                         unsigned long count, void *data)
263 {
264         int irq = (long) data, full_count = count, err;
265         cpumask_t new_value;
266
267         if (!irq_desc[irq].handler->set_affinity)
268                 return -EIO;
269
270         err = cpumask_parse(buffer, count, new_value);
271
272         /* The special value 0 means release control of the
273            affinity to kernel.  */
274         cpus_and(new_value, new_value, cpu_online_map);
275         if (cpus_empty(new_value)) {
276                 irq_user_affinity[irq] = 0;
277                 select_smp_affinity(irq);
278         }
279         /* Do not allow disabling IRQs completely - it's a too easy
280            way to make the system unusable accidentally :-) At least
281            one online CPU still has to be targeted.  */
282         else {
283                 irq_affinity[irq] = new_value;
284                 irq_user_affinity[irq] = 1;
285                 irq_desc[irq].handler->set_affinity(irq, new_value);
286         }
287
288         return full_count;
289 }
290
291 static int
292 prof_cpu_mask_read_proc(char *page, char **start, off_t off,
293                         int count, int *eof, void *data)
294 {
295         int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
296         if (count - len < 2)
297                 return -EINVAL;
298         len += sprintf(page + len, "\n");
299         return len;
300 }
301
302 static int
303 prof_cpu_mask_write_proc(struct file *file, const char __user *buffer,
304                          unsigned long count, void *data)
305 {
306         unsigned long full_count = count, err;
307         cpumask_t new_value, *mask = (cpumask_t *)data;
308
309         err = cpumask_parse(buffer, count, new_value);
310         if (err)
311                 return err;
312
313         *mask = new_value;
314         return full_count;
315 }
316 #endif /* CONFIG_SMP */
317
318 #define MAX_NAMELEN 10
319
320 static void
321 register_irq_proc (unsigned int irq)
322 {
323         char name [MAX_NAMELEN];
324
325         if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
326             irq_dir[irq])
327                 return;
328
329         memset(name, 0, MAX_NAMELEN);
330         sprintf(name, "%d", irq);
331
332         /* create /proc/irq/1234 */
333         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
334
335 #ifdef CONFIG_SMP 
336         if (irq_desc[irq].handler->set_affinity) {
337                 struct proc_dir_entry *entry;
338                 /* create /proc/irq/1234/smp_affinity */
339                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
340
341                 if (entry) {
342                         entry->nlink = 1;
343                         entry->data = (void *)(long)irq;
344                         entry->read_proc = irq_affinity_read_proc;
345                         entry->write_proc = irq_affinity_write_proc;
346                 }
347
348                 smp_affinity_entry[irq] = entry;
349         }
350 #endif
351 }
352
353 unsigned long prof_cpu_mask = ~0UL;
354
355 void
356 init_irq_proc (void)
357 {
358 #ifdef CONFIG_SMP
359         struct proc_dir_entry *entry;
360 #endif
361         int i;
362
363         /* create /proc/irq */
364         root_irq_dir = proc_mkdir("irq", NULL);
365
366 #ifdef CONFIG_SMP 
367         /* create /proc/irq/prof_cpu_mask */
368         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
369
370         entry->nlink = 1;
371         entry->data = (void *)&prof_cpu_mask;
372         entry->read_proc = prof_cpu_mask_read_proc;
373         entry->write_proc = prof_cpu_mask_write_proc;
374 #endif
375
376         /*
377          * Create entries for all existing IRQs.
378          */
379         for (i = 0; i < ACTUAL_NR_IRQS; i++) {
380                 if (irq_desc[i].handler == &no_irq_type)
381                         continue;
382                 register_irq_proc(i);
383         }
384 }
385
386 int
387 request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
388             unsigned long irqflags, const char * devname, void *dev_id)
389 {
390         int retval;
391         struct irqaction * action;
392
393         if (irq >= ACTUAL_NR_IRQS)
394                 return -EINVAL;
395         if (!handler)
396                 return -EINVAL;
397
398 #if 1
399         /*
400          * Sanity-check: shared interrupts should REALLY pass in
401          * a real dev-ID, otherwise we'll have trouble later trying
402          * to figure out which interrupt is which (messes up the
403          * interrupt freeing logic etc).
404          */
405         if ((irqflags & SA_SHIRQ) && !dev_id) {
406                 printk(KERN_ERR
407                        "Bad boy: %s (at %p) called us without a dev_id!\n",
408                        devname, __builtin_return_address(0));
409         }
410 #endif
411
412         action = (struct irqaction *)
413                         kmalloc(sizeof(struct irqaction), GFP_KERNEL);
414         if (!action)
415                 return -ENOMEM;
416
417         action->handler = handler;
418         action->flags = irqflags;
419         cpus_clear(action->mask);
420         action->name = devname;
421         action->next = NULL;
422         action->dev_id = dev_id;
423
424 #ifdef CONFIG_SMP
425         select_smp_affinity(irq);
426 #endif
427
428         retval = setup_irq(irq, action);
429         if (retval)
430                 kfree(action);
431         return retval;
432 }
433
434 EXPORT_SYMBOL(request_irq);
435
436 void
437 free_irq(unsigned int irq, void *dev_id)
438 {
439         irq_desc_t *desc;
440         struct irqaction **p;
441         unsigned long flags;
442
443         if (irq >= ACTUAL_NR_IRQS) {
444                 printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
445                 return;
446         }
447
448         desc = irq_desc + irq;
449         spin_lock_irqsave(&desc->lock,flags);
450         p = &desc->action;
451         for (;;) {
452                 struct irqaction * action = *p;
453                 if (action) {
454                         struct irqaction **pp = p;
455                         p = &action->next;
456                         if (action->dev_id != dev_id)
457                                 continue;
458
459                         /* Found - now remove it from the list of entries.  */
460                         *pp = action->next;
461                         if (!desc->action) {
462                                 desc->status |= IRQ_DISABLED;
463                                 desc->handler->shutdown(irq);
464                         }
465                         spin_unlock_irqrestore(&desc->lock,flags);
466
467 #ifdef CONFIG_SMP
468                         /* Wait to make sure it's not being used on
469                            another CPU.  */
470                         while (desc->status & IRQ_INPROGRESS)
471                                 barrier();
472 #endif
473                         kfree(action);
474                         return;
475                 }
476                 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
477                 spin_unlock_irqrestore(&desc->lock,flags);
478                 return;
479         }
480 }
481
482 EXPORT_SYMBOL(free_irq);
483
484 int
485 show_interrupts(struct seq_file *p, void *v)
486 {
487 #ifdef CONFIG_SMP
488         int j;
489 #endif
490         int i = *(loff_t *) v;
491         struct irqaction * action;
492         unsigned long flags;
493
494 #ifdef CONFIG_SMP
495         if (i == 0) {
496                 seq_puts(p, "           ");
497                 for (i = 0; i < NR_CPUS; i++)
498                         if (cpu_online(i))
499                                 seq_printf(p, "CPU%d       ", i);
500                 seq_putc(p, '\n');
501         }
502 #endif
503
504         if (i < ACTUAL_NR_IRQS) {
505                 spin_lock_irqsave(&irq_desc[i].lock, flags);
506                 action = irq_desc[i].action;
507                 if (!action) 
508                         goto unlock;
509                 seq_printf(p, "%3d: ",i);
510 #ifndef CONFIG_SMP
511                 seq_printf(p, "%10u ", kstat_irqs(i));
512 #else
513                 for (j = 0; j < NR_CPUS; j++)
514                         if (cpu_online(j))
515                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
516 #endif
517                 seq_printf(p, " %14s", irq_desc[i].handler->typename);
518                 seq_printf(p, "  %c%s",
519                         (action->flags & SA_INTERRUPT)?'+':' ',
520                         action->name);
521
522                 for (action=action->next; action; action = action->next) {
523                         seq_printf(p, ", %c%s",
524                                   (action->flags & SA_INTERRUPT)?'+':' ',
525                                    action->name);
526                 }
527
528                 seq_putc(p, '\n');
529 unlock:
530                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
531         } else if (i == ACTUAL_NR_IRQS) {
532 #ifdef CONFIG_SMP
533                 seq_puts(p, "IPI: ");
534                 for (i = 0; i < NR_CPUS; i++)
535                         if (cpu_online(i))
536                                 seq_printf(p, "%10lu ", cpu_data[i].ipi_count);
537                 seq_putc(p, '\n');
538 #endif
539                 seq_printf(p, "ERR: %10lu\n", irq_err_count);
540         }
541         return 0;
542 }
543
544
545 /*
546  * handle_irq handles all normal device IRQ's (the special
547  * SMP cross-CPU interrupts have their own specific
548  * handlers).
549  */
550
551 #define MAX_ILLEGAL_IRQS 16
552
553 void
554 handle_irq(int irq, struct pt_regs * regs)
555 {       
556         /* 
557          * We ack quickly, we don't want the irq controller
558          * thinking we're snobs just because some other CPU has
559          * disabled global interrupts (we have already done the
560          * INT_ACK cycles, it's too late to try to pretend to the
561          * controller that we aren't taking the interrupt).
562          *
563          * 0 return value means that this irq is already being
564          * handled by some other CPU. (or is disabled)
565          */
566         int cpu = smp_processor_id();
567         irq_desc_t *desc = irq_desc + irq;
568         struct irqaction * action;
569         unsigned int status;
570         static unsigned int illegal_count=0;
571         
572         if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
573                 irq_err_count++;
574                 illegal_count++;
575                 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
576                        irq);
577                 return;
578         }
579
580         irq_enter();
581         kstat_cpu(cpu).irqs[irq]++;
582         spin_lock_irq(&desc->lock); /* mask also the higher prio events */
583         desc->handler->ack(irq);
584         /*
585          * REPLAY is when Linux resends an IRQ that was dropped earlier.
586          * WAITING is used by probe to mark irqs that are being tested.
587          */
588         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
589         status |= IRQ_PENDING; /* we _want_ to handle it */
590
591         /*
592          * If the IRQ is disabled for whatever reason, we cannot
593          * use the action we have.
594          */
595         action = NULL;
596         if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
597                 action = desc->action;
598                 status &= ~IRQ_PENDING; /* we commit to handling */
599                 status |= IRQ_INPROGRESS; /* we are handling it */
600         }
601         desc->status = status;
602
603         /*
604          * If there is no IRQ handler or it was disabled, exit early.
605          * Since we set PENDING, if another processor is handling
606          * a different instance of this same irq, the other processor
607          * will take care of it.
608          */
609         if (!action)
610                 goto out;
611
612         /*
613          * Edge triggered interrupts need to remember pending events.
614          * This applies to any hw interrupts that allow a second
615          * instance of the same irq to arrive while we are in handle_irq
616          * or in the handler. But the code here only handles the _second_
617          * instance of the irq, not the third or fourth. So it is mostly
618          * useful for irq hardware that does not mask cleanly in an
619          * SMP environment.
620          */
621         for (;;) {
622                 spin_unlock(&desc->lock);
623                 handle_IRQ_event(irq, regs, action);
624                 spin_lock(&desc->lock);
625                 
626                 if (!(desc->status & IRQ_PENDING)
627                     || (desc->status & IRQ_LEVEL))
628                         break;
629                 desc->status &= ~IRQ_PENDING;
630         }
631         desc->status &= ~IRQ_INPROGRESS;
632 out:
633         /*
634          * The ->end() handler has to deal with interrupts which got
635          * disabled while the handler was running.
636          */
637         desc->handler->end(irq);
638         spin_unlock(&desc->lock);
639
640         irq_exit();
641 }
642
643 /*
644  * IRQ autodetection code..
645  *
646  * This depends on the fact that any interrupt that
647  * comes in on to an unassigned handler will get stuck
648  * with "IRQ_WAITING" cleared and the interrupt
649  * disabled.
650  */
651 unsigned long
652 probe_irq_on(void)
653 {
654         int i;
655         irq_desc_t *desc;
656         unsigned long delay;
657         unsigned long val;
658
659         /* Something may have generated an irq long ago and we want to
660            flush such a longstanding irq before considering it as spurious. */
661         for (i = NR_IRQS-1; i >= 0; i--) {
662                 desc = irq_desc + i;
663
664                 spin_lock_irq(&desc->lock);
665                 if (!irq_desc[i].action) 
666                         irq_desc[i].handler->startup(i);
667                 spin_unlock_irq(&desc->lock);
668         }
669
670         /* Wait for longstanding interrupts to trigger. */
671         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
672                 /* about 20ms delay */ barrier();
673
674         /* enable any unassigned irqs (we must startup again here because
675            if a longstanding irq happened in the previous stage, it may have
676            masked itself) first, enable any unassigned irqs. */
677         for (i = NR_IRQS-1; i >= 0; i--) {
678                 desc = irq_desc + i;
679
680                 spin_lock_irq(&desc->lock);
681                 if (!desc->action) {
682                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
683                         if (desc->handler->startup(i))
684                                 desc->status |= IRQ_PENDING;
685                 }
686                 spin_unlock_irq(&desc->lock);
687         }
688
689         /*
690          * Wait for spurious interrupts to trigger
691          */
692         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
693                 /* about 100ms delay */ barrier();
694
695         /*
696          * Now filter out any obviously spurious interrupts
697          */
698         val = 0;
699         for (i=0; i<NR_IRQS; i++) {
700                 irq_desc_t *desc = irq_desc + i;
701                 unsigned int status;
702
703                 spin_lock_irq(&desc->lock);
704                 status = desc->status;
705
706                 if (status & IRQ_AUTODETECT) {
707                         /* It triggered already - consider it spurious. */
708                         if (!(status & IRQ_WAITING)) {
709                                 desc->status = status & ~IRQ_AUTODETECT;
710                                 desc->handler->shutdown(i);
711                         } else
712                                 if (i < 32)
713                                         val |= 1 << i;
714                 }
715                 spin_unlock_irq(&desc->lock);
716         }
717
718         return val;
719 }
720
721 EXPORT_SYMBOL(probe_irq_on);
722
723 /*
724  * Return a mask of triggered interrupts (this
725  * can handle only legacy ISA interrupts).
726  */
727 unsigned int
728 probe_irq_mask(unsigned long val)
729 {
730         int i;
731         unsigned int mask;
732
733         mask = 0;
734         for (i = 0; i < NR_IRQS; i++) {
735                 irq_desc_t *desc = irq_desc + i;
736                 unsigned int status;
737
738                 spin_lock_irq(&desc->lock);
739                 status = desc->status;
740
741                 if (status & IRQ_AUTODETECT) {
742                         /* We only react to ISA interrupts */
743                         if (!(status & IRQ_WAITING)) {
744                                 if (i < 16)
745                                         mask |= 1 << i;
746                         }
747
748                         desc->status = status & ~IRQ_AUTODETECT;
749                         desc->handler->shutdown(i);
750                 }
751                 spin_unlock_irq(&desc->lock);
752         }
753
754         return mask & val;
755 }
756
757 /*
758  * Get the result of the IRQ probe.. A negative result means that
759  * we have several candidates (but we return the lowest-numbered
760  * one).
761  */
762
763 int
764 probe_irq_off(unsigned long val)
765 {
766         int i, irq_found, nr_irqs;
767
768         nr_irqs = 0;
769         irq_found = 0;
770         for (i=0; i<NR_IRQS; i++) {
771                 irq_desc_t *desc = irq_desc + i;
772                 unsigned int status;
773
774                 spin_lock_irq(&desc->lock);
775                 status = desc->status;
776
777                 if (status & IRQ_AUTODETECT) {
778                         if (!(status & IRQ_WAITING)) {
779                                 if (!nr_irqs)
780                                         irq_found = i;
781                                 nr_irqs++;
782                         }
783                         desc->status = status & ~IRQ_AUTODETECT;
784                         desc->handler->shutdown(i);
785                 }
786                 spin_unlock_irq(&desc->lock);
787         }
788
789         if (nr_irqs > 1)
790                 irq_found = -irq_found;
791         return irq_found;
792 }
793
794 EXPORT_SYMBOL(probe_irq_off);
795
796 #ifdef CONFIG_SMP
797 void synchronize_irq(unsigned int irq)
798 {
799         /* is there anything to synchronize with? */
800         if (!irq_desc[irq].action)
801                 return;
802
803         while (irq_desc[irq].status & IRQ_INPROGRESS)
804                 barrier();
805 }
806 #endif