patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / alpha / kernel / irq.c
1 /*
2  *      linux/arch/alpha/kernel/irq.c
3  *
4  *      Copyright (C) 1995 Linus Torvalds
5  *
6  * This file contains the code used by various IRQ handling routines:
7  * asking for different IRQ's should be done through these routines
8  * instead of just grabbing them. Thus setups with different IRQ numbers
9  * shouldn't result in any weird surprises, and installing new handlers
10  * should be easier.
11  */
12
13 #include <linux/config.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/errno.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/ptrace.h>
21 #include <linux/interrupt.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/init.h>
25 #include <linux/irq.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28
29 #include <asm/system.h>
30 #include <asm/io.h>
31 #include <asm/bitops.h>
32 #include <asm/uaccess.h>
33
34 /*
35  * Controller mappings for all interrupt sources:
36  */
37 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
38         [0 ... NR_IRQS-1] = {
39                 .handler = &no_irq_type,
40                 .lock = SPIN_LOCK_UNLOCKED
41         }
42 };
43
44 static void register_irq_proc(unsigned int irq);
45
46 volatile unsigned long irq_err_count;
47
48 /*
49  * Special irq handlers.
50  */
51
52 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
53 {
54         return IRQ_NONE;
55 }
56
57 /*
58  * Generic no controller code
59  */
60
61 static void no_irq_enable_disable(unsigned int irq) { }
62 static unsigned int no_irq_startup(unsigned int irq) { return 0; }
63
64 static void
65 no_irq_ack(unsigned int irq)
66 {
67         irq_err_count++;
68         printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
69 }
70
71 struct hw_interrupt_type no_irq_type = {
72         .typename       = "none",
73         .startup        = no_irq_startup,
74         .shutdown       = no_irq_enable_disable,
75         .enable         = no_irq_enable_disable,
76         .disable        = no_irq_enable_disable,
77         .ack            = no_irq_ack,
78         .end            = no_irq_enable_disable,
79 };
80
81 int
82 handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
83                  struct irqaction *action)
84 {
85         int status = 1; /* Force the "do bottom halves" bit */
86
87         do {
88                 if (!(action->flags & SA_INTERRUPT))
89                         local_irq_enable();
90                 else
91                         local_irq_disable();
92
93                 status |= action->flags;
94                 action->handler(irq, action->dev_id, regs);
95                 action = action->next;
96         } while (action);
97         if (status & SA_SAMPLE_RANDOM)
98                 add_interrupt_randomness(irq);
99         local_irq_disable();
100
101         return status;
102 }
103
104 /*
105  * Generic enable/disable code: this just calls
106  * down into the PIC-specific version for the actual
107  * hardware disable after having gotten the irq
108  * controller lock. 
109  */
110 void inline
111 disable_irq_nosync(unsigned int irq)
112 {
113         irq_desc_t *desc = irq_desc + irq;
114         unsigned long flags;
115
116         spin_lock_irqsave(&desc->lock, flags);
117         if (!desc->depth++) {
118                 desc->status |= IRQ_DISABLED;
119                 desc->handler->disable(irq);
120         }
121         spin_unlock_irqrestore(&desc->lock, flags);
122 }
123
124 /*
125  * Synchronous version of the above, making sure the IRQ is
126  * no longer running on any other IRQ..
127  */
128 void
129 disable_irq(unsigned int irq)
130 {
131         disable_irq_nosync(irq);
132         synchronize_irq(irq);
133 }
134
135 void
136 enable_irq(unsigned int irq)
137 {
138         irq_desc_t *desc = irq_desc + irq;
139         unsigned long flags;
140
141         spin_lock_irqsave(&desc->lock, flags);
142         switch (desc->depth) {
143         case 1: {
144                 unsigned int status = desc->status & ~IRQ_DISABLED;
145                 desc->status = status;
146                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
147                         desc->status = status | IRQ_REPLAY;
148                         hw_resend_irq(desc->handler,irq);
149                 }
150                 desc->handler->enable(irq);
151                 /* fall-through */
152         }
153         default:
154                 desc->depth--;
155                 break;
156         case 0:
157                 printk(KERN_ERR "enable_irq() unbalanced from %p\n",
158                        __builtin_return_address(0));
159         }
160         spin_unlock_irqrestore(&desc->lock, flags);
161 }
162
163 int
164 setup_irq(unsigned int irq, struct irqaction * new)
165 {
166         int shared = 0;
167         struct irqaction *old, **p;
168         unsigned long flags;
169         irq_desc_t *desc = irq_desc + irq;
170
171         if (desc->handler == &no_irq_type)
172                 return -ENOSYS;
173
174         /*
175          * Some drivers like serial.c use request_irq() heavily,
176          * so we have to be careful not to interfere with a
177          * running system.
178          */
179         if (new->flags & SA_SAMPLE_RANDOM) {
180                 /*
181                  * This function might sleep, we want to call it first,
182                  * outside of the atomic block.
183                  * Yes, this might clear the entropy pool if the wrong
184                  * driver is attempted to be loaded, without actually
185                  * installing a new handler, but is this really a problem,
186                  * only the sysadmin is able to do this.
187                  */
188                 rand_initialize_irq(irq);
189         }
190
191         /*
192          * The following block of code has to be executed atomically
193          */
194         spin_lock_irqsave(&desc->lock,flags);
195         p = &desc->action;
196         if ((old = *p) != NULL) {
197                 /* Can't share interrupts unless both agree to */
198                 if (!(old->flags & new->flags & SA_SHIRQ)) {
199                         spin_unlock_irqrestore(&desc->lock,flags);
200                         return -EBUSY;
201                 }
202
203                 /* add new interrupt at end of irq queue */
204                 do {
205                         p = &old->next;
206                         old = *p;
207                 } while (old);
208                 shared = 1;
209         }
210
211         *p = new;
212
213         if (!shared) {
214                 desc->depth = 0;
215                 desc->status &=
216                     ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
217                 desc->handler->startup(irq);
218         }
219         spin_unlock_irqrestore(&desc->lock,flags);
220
221         return 0;
222 }
223
224 static struct proc_dir_entry * root_irq_dir;
225 static struct proc_dir_entry * irq_dir[NR_IRQS];
226
227 #ifdef CONFIG_SMP 
228 static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
229 static char irq_user_affinity[NR_IRQS];
230 static unsigned long irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
231
232 static void
233 select_smp_affinity(int irq)
234 {
235         static int last_cpu;
236         int cpu = last_cpu + 1;
237
238         if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
239                 return;
240
241         while (((cpu_present_mask >> cpu) & 1) == 0)
242                 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
243         last_cpu = cpu;
244
245         irq_affinity[irq] = 1UL << cpu;
246         irq_desc[irq].handler->set_affinity(irq, 1UL << cpu);
247 }
248
249 #define HEX_DIGITS 16
250
251 static int
252 irq_affinity_read_proc (char *page, char **start, off_t off,
253                         int count, int *eof, void *data)
254 {
255         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
256         if (count - len < 2)
257                 return -EINVAL;
258         len += sprintf(page + len, "\n");
259         return len;
260 }
261
262 static unsigned int
263 parse_hex_value (const char __user *buffer,
264                  unsigned long count, unsigned long *ret)
265 {
266         unsigned char hexnum [HEX_DIGITS];
267         unsigned long value;
268         unsigned long i;
269
270         if (!count)
271                 return -EINVAL;
272         if (count > HEX_DIGITS)
273                 count = HEX_DIGITS;
274         if (copy_from_user(hexnum, buffer, count))
275                 return -EFAULT;
276
277         /*
278          * Parse the first 8 characters as a hex string, any non-hex char
279          * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
280          */
281         value = 0;
282
283         for (i = 0; i < count; i++) {
284                 unsigned int c = hexnum[i];
285
286                 switch (c) {
287                         case '0' ... '9': c -= '0'; break;
288                         case 'a' ... 'f': c -= 'a'-10; break;
289                         case 'A' ... 'F': c -= 'A'-10; break;
290                 default:
291                         goto out;
292                 }
293                 value = (value << 4) | c;
294         }
295 out:
296         *ret = value;
297         return 0;
298 }
299
300 static int
301 irq_affinity_write_proc(struct file *file, const char __user *buffer,
302                         unsigned long count, void *data)
303 {
304         int irq = (long) data, full_count = count, err;
305         unsigned long new_value;
306
307         if (!irq_desc[irq].handler->set_affinity)
308                 return -EIO;
309
310         err = parse_hex_value(buffer, count, &new_value);
311
312         /* The special value 0 means release control of the
313            affinity to kernel.  */
314         if (new_value == 0) {
315                 irq_user_affinity[irq] = 0;
316                 select_smp_affinity(irq);
317         }
318         /* Do not allow disabling IRQs completely - it's a too easy
319            way to make the system unusable accidentally :-) At least
320            one online CPU still has to be targeted.  */
321         else if (!(new_value & cpu_present_mask))
322                 return -EINVAL;
323         else {
324                 irq_affinity[irq] = new_value;
325                 irq_user_affinity[irq] = 1;
326                 irq_desc[irq].handler->set_affinity(irq, new_value);
327         }
328
329         return full_count;
330 }
331
332 static int
333 prof_cpu_mask_read_proc(char *page, char **start, off_t off,
334                         int count, int *eof, void *data)
335 {
336         int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
337         if (count - len < 2)
338                 return -EINVAL;
339         len += sprintf(page + len, "\n");
340         return len;
341 }
342
343 static int
344 prof_cpu_mask_write_proc(struct file *file, const char __user *buffer,
345                          unsigned long count, void *data)
346 {
347         unsigned long *mask = (unsigned long *) data, full_count = count, err;
348         unsigned long new_value;
349
350         err = parse_hex_value(buffer, count, &new_value);
351         if (err)
352                 return err;
353
354         *mask = new_value;
355         return full_count;
356 }
357 #endif /* CONFIG_SMP */
358
359 #define MAX_NAMELEN 10
360
361 static void
362 register_irq_proc (unsigned int irq)
363 {
364         char name [MAX_NAMELEN];
365
366         if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
367             irq_dir[irq])
368                 return;
369
370         memset(name, 0, MAX_NAMELEN);
371         sprintf(name, "%d", irq);
372
373         /* create /proc/irq/1234 */
374         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
375
376 #ifdef CONFIG_SMP 
377         if (irq_desc[irq].handler->set_affinity) {
378                 struct proc_dir_entry *entry;
379                 /* create /proc/irq/1234/smp_affinity */
380                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
381
382                 if (entry) {
383                         entry->nlink = 1;
384                         entry->data = (void *)(long)irq;
385                         entry->read_proc = irq_affinity_read_proc;
386                         entry->write_proc = irq_affinity_write_proc;
387                 }
388
389                 smp_affinity_entry[irq] = entry;
390         }
391 #endif
392 }
393
394 unsigned long prof_cpu_mask = ~0UL;
395
396 void
397 init_irq_proc (void)
398 {
399 #ifdef CONFIG_SMP
400         struct proc_dir_entry *entry;
401 #endif
402         int i;
403
404         /* create /proc/irq */
405         root_irq_dir = proc_mkdir("irq", 0);
406
407 #ifdef CONFIG_SMP 
408         /* create /proc/irq/prof_cpu_mask */
409         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
410
411         entry->nlink = 1;
412         entry->data = (void *)&prof_cpu_mask;
413         entry->read_proc = prof_cpu_mask_read_proc;
414         entry->write_proc = prof_cpu_mask_write_proc;
415 #endif
416
417         /*
418          * Create entries for all existing IRQs.
419          */
420         for (i = 0; i < ACTUAL_NR_IRQS; i++) {
421                 if (irq_desc[i].handler == &no_irq_type)
422                         continue;
423                 register_irq_proc(i);
424         }
425 }
426
427 int
428 request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
429             unsigned long irqflags, const char * devname, void *dev_id)
430 {
431         int retval;
432         struct irqaction * action;
433
434         if (irq >= ACTUAL_NR_IRQS)
435                 return -EINVAL;
436         if (!handler)
437                 return -EINVAL;
438
439 #if 1
440         /*
441          * Sanity-check: shared interrupts should REALLY pass in
442          * a real dev-ID, otherwise we'll have trouble later trying
443          * to figure out which interrupt is which (messes up the
444          * interrupt freeing logic etc).
445          */
446         if ((irqflags & SA_SHIRQ) && !dev_id) {
447                 printk(KERN_ERR
448                        "Bad boy: %s (at %p) called us without a dev_id!\n",
449                        devname, __builtin_return_address(0));
450         }
451 #endif
452
453         action = (struct irqaction *)
454                         kmalloc(sizeof(struct irqaction), GFP_KERNEL);
455         if (!action)
456                 return -ENOMEM;
457
458         action->handler = handler;
459         action->flags = irqflags;
460         action->mask = 0;
461         action->name = devname;
462         action->next = NULL;
463         action->dev_id = dev_id;
464
465 #ifdef CONFIG_SMP
466         select_smp_affinity(irq);
467 #endif
468
469         retval = setup_irq(irq, action);
470         if (retval)
471                 kfree(action);
472         return retval;
473 }
474
475 EXPORT_SYMBOL(request_irq);
476
477 void
478 free_irq(unsigned int irq, void *dev_id)
479 {
480         irq_desc_t *desc;
481         struct irqaction **p;
482         unsigned long flags;
483
484         if (irq >= ACTUAL_NR_IRQS) {
485                 printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
486                 return;
487         }
488
489         desc = irq_desc + irq;
490         spin_lock_irqsave(&desc->lock,flags);
491         p = &desc->action;
492         for (;;) {
493                 struct irqaction * action = *p;
494                 if (action) {
495                         struct irqaction **pp = p;
496                         p = &action->next;
497                         if (action->dev_id != dev_id)
498                                 continue;
499
500                         /* Found - now remove it from the list of entries.  */
501                         *pp = action->next;
502                         if (!desc->action) {
503                                 desc->status |= IRQ_DISABLED;
504                                 desc->handler->shutdown(irq);
505                         }
506                         spin_unlock_irqrestore(&desc->lock,flags);
507
508 #ifdef CONFIG_SMP
509                         /* Wait to make sure it's not being used on
510                            another CPU.  */
511                         while (desc->status & IRQ_INPROGRESS)
512                                 barrier();
513 #endif
514                         kfree(action);
515                         return;
516                 }
517                 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
518                 spin_unlock_irqrestore(&desc->lock,flags);
519                 return;
520         }
521 }
522
523 EXPORT_SYMBOL(free_irq);
524
525 int
526 show_interrupts(struct seq_file *p, void *v)
527 {
528 #ifdef CONFIG_SMP
529         int j;
530 #endif
531         int i = *(loff_t *) v;
532         struct irqaction * action;
533         unsigned long flags;
534
535 #ifdef CONFIG_SMP
536         if (i == 0) {
537                 seq_puts(p, "           ");
538                 for (i = 0; i < NR_CPUS; i++)
539                         if (cpu_online(i))
540                                 seq_printf(p, "CPU%d       ", i);
541                 seq_putc(p, '\n');
542         }
543 #endif
544
545         if (i < ACTUAL_NR_IRQS) {
546                 spin_lock_irqsave(&irq_desc[i].lock, flags);
547                 action = irq_desc[i].action;
548                 if (!action) 
549                         goto unlock;
550                 seq_printf(p, "%3d: ",i);
551 #ifndef CONFIG_SMP
552                 seq_printf(p, "%10u ", kstat_irqs(i));
553 #else
554                 for (j = 0; j < NR_CPUS; j++)
555                         if (cpu_online(j))
556                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
557 #endif
558                 seq_printf(p, " %14s", irq_desc[i].handler->typename);
559                 seq_printf(p, "  %c%s",
560                         (action->flags & SA_INTERRUPT)?'+':' ',
561                         action->name);
562
563                 for (action=action->next; action; action = action->next) {
564                         seq_printf(p, ", %c%s",
565                                   (action->flags & SA_INTERRUPT)?'+':' ',
566                                    action->name);
567                 }
568
569                 seq_putc(p, '\n');
570 unlock:
571                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
572         } else if (i == ACTUAL_NR_IRQS) {
573 #ifdef CONFIG_SMP
574                 seq_puts(p, "IPI: ");
575                 for (i = 0; i < NR_CPUS; i++)
576                         if (cpu_online(i))
577                                 seq_printf(p, "%10lu ", cpu_data[i].ipi_count);
578                 seq_putc(p, '\n');
579 #endif
580                 seq_printf(p, "ERR: %10lu\n", irq_err_count);
581         }
582         return 0;
583 }
584
585
586 /*
587  * handle_irq handles all normal device IRQ's (the special
588  * SMP cross-CPU interrupts have their own specific
589  * handlers).
590  */
591
592 #define MAX_ILLEGAL_IRQS 16
593
594 void
595 handle_irq(int irq, struct pt_regs * regs)
596 {       
597         /* 
598          * We ack quickly, we don't want the irq controller
599          * thinking we're snobs just because some other CPU has
600          * disabled global interrupts (we have already done the
601          * INT_ACK cycles, it's too late to try to pretend to the
602          * controller that we aren't taking the interrupt).
603          *
604          * 0 return value means that this irq is already being
605          * handled by some other CPU. (or is disabled)
606          */
607         int cpu = smp_processor_id();
608         irq_desc_t *desc = irq_desc + irq;
609         struct irqaction * action;
610         unsigned int status;
611         static unsigned int illegal_count=0;
612         
613         if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
614                 irq_err_count++;
615                 illegal_count++;
616                 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
617                        irq);
618                 return;
619         }
620
621         irq_enter();
622         kstat_cpu(cpu).irqs[irq]++;
623         spin_lock_irq(&desc->lock); /* mask also the higher prio events */
624         desc->handler->ack(irq);
625         /*
626          * REPLAY is when Linux resends an IRQ that was dropped earlier.
627          * WAITING is used by probe to mark irqs that are being tested.
628          */
629         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
630         status |= IRQ_PENDING; /* we _want_ to handle it */
631
632         /*
633          * If the IRQ is disabled for whatever reason, we cannot
634          * use the action we have.
635          */
636         action = NULL;
637         if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
638                 action = desc->action;
639                 status &= ~IRQ_PENDING; /* we commit to handling */
640                 status |= IRQ_INPROGRESS; /* we are handling it */
641         }
642         desc->status = status;
643
644         /*
645          * If there is no IRQ handler or it was disabled, exit early.
646          * Since we set PENDING, if another processor is handling
647          * a different instance of this same irq, the other processor
648          * will take care of it.
649          */
650         if (!action)
651                 goto out;
652
653         /*
654          * Edge triggered interrupts need to remember pending events.
655          * This applies to any hw interrupts that allow a second
656          * instance of the same irq to arrive while we are in handle_irq
657          * or in the handler. But the code here only handles the _second_
658          * instance of the irq, not the third or fourth. So it is mostly
659          * useful for irq hardware that does not mask cleanly in an
660          * SMP environment.
661          */
662         for (;;) {
663                 spin_unlock(&desc->lock);
664                 handle_IRQ_event(irq, regs, action);
665                 spin_lock(&desc->lock);
666                 
667                 if (!(desc->status & IRQ_PENDING)
668                     || (desc->status & IRQ_LEVEL))
669                         break;
670                 desc->status &= ~IRQ_PENDING;
671         }
672         desc->status &= ~IRQ_INPROGRESS;
673 out:
674         /*
675          * The ->end() handler has to deal with interrupts which got
676          * disabled while the handler was running.
677          */
678         desc->handler->end(irq);
679         spin_unlock(&desc->lock);
680
681         irq_exit();
682 }
683
684 /*
685  * IRQ autodetection code..
686  *
687  * This depends on the fact that any interrupt that
688  * comes in on to an unassigned handler will get stuck
689  * with "IRQ_WAITING" cleared and the interrupt
690  * disabled.
691  */
692 unsigned long
693 probe_irq_on(void)
694 {
695         int i;
696         irq_desc_t *desc;
697         unsigned long delay;
698         unsigned long val;
699
700         /* Something may have generated an irq long ago and we want to
701            flush such a longstanding irq before considering it as spurious. */
702         for (i = NR_IRQS-1; i >= 0; i--) {
703                 desc = irq_desc + i;
704
705                 spin_lock_irq(&desc->lock);
706                 if (!irq_desc[i].action) 
707                         irq_desc[i].handler->startup(i);
708                 spin_unlock_irq(&desc->lock);
709         }
710
711         /* Wait for longstanding interrupts to trigger. */
712         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
713                 /* about 20ms delay */ barrier();
714
715         /* enable any unassigned irqs (we must startup again here because
716            if a longstanding irq happened in the previous stage, it may have
717            masked itself) first, enable any unassigned irqs. */
718         for (i = NR_IRQS-1; i >= 0; i--) {
719                 desc = irq_desc + i;
720
721                 spin_lock_irq(&desc->lock);
722                 if (!desc->action) {
723                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
724                         if (desc->handler->startup(i))
725                                 desc->status |= IRQ_PENDING;
726                 }
727                 spin_unlock_irq(&desc->lock);
728         }
729
730         /*
731          * Wait for spurious interrupts to trigger
732          */
733         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
734                 /* about 100ms delay */ barrier();
735
736         /*
737          * Now filter out any obviously spurious interrupts
738          */
739         val = 0;
740         for (i=0; i<NR_IRQS; i++) {
741                 irq_desc_t *desc = irq_desc + i;
742                 unsigned int status;
743
744                 spin_lock_irq(&desc->lock);
745                 status = desc->status;
746
747                 if (status & IRQ_AUTODETECT) {
748                         /* It triggered already - consider it spurious. */
749                         if (!(status & IRQ_WAITING)) {
750                                 desc->status = status & ~IRQ_AUTODETECT;
751                                 desc->handler->shutdown(i);
752                         } else
753                                 if (i < 32)
754                                         val |= 1 << i;
755                 }
756                 spin_unlock_irq(&desc->lock);
757         }
758
759         return val;
760 }
761
762 EXPORT_SYMBOL(probe_irq_on);
763
764 /*
765  * Return a mask of triggered interrupts (this
766  * can handle only legacy ISA interrupts).
767  */
768 unsigned int
769 probe_irq_mask(unsigned long val)
770 {
771         int i;
772         unsigned int mask;
773
774         mask = 0;
775         for (i = 0; i < NR_IRQS; i++) {
776                 irq_desc_t *desc = irq_desc + i;
777                 unsigned int status;
778
779                 spin_lock_irq(&desc->lock);
780                 status = desc->status;
781
782                 if (status & IRQ_AUTODETECT) {
783                         /* We only react to ISA interrupts */
784                         if (!(status & IRQ_WAITING)) {
785                                 if (i < 16)
786                                         mask |= 1 << i;
787                         }
788
789                         desc->status = status & ~IRQ_AUTODETECT;
790                         desc->handler->shutdown(i);
791                 }
792                 spin_unlock_irq(&desc->lock);
793         }
794
795         return mask & val;
796 }
797
798 /*
799  * Get the result of the IRQ probe.. A negative result means that
800  * we have several candidates (but we return the lowest-numbered
801  * one).
802  */
803
804 int
805 probe_irq_off(unsigned long val)
806 {
807         int i, irq_found, nr_irqs;
808
809         nr_irqs = 0;
810         irq_found = 0;
811         for (i=0; i<NR_IRQS; i++) {
812                 irq_desc_t *desc = irq_desc + i;
813                 unsigned int status;
814
815                 spin_lock_irq(&desc->lock);
816                 status = desc->status;
817
818                 if (status & IRQ_AUTODETECT) {
819                         if (!(status & IRQ_WAITING)) {
820                                 if (!nr_irqs)
821                                         irq_found = i;
822                                 nr_irqs++;
823                         }
824                         desc->status = status & ~IRQ_AUTODETECT;
825                         desc->handler->shutdown(i);
826                 }
827                 spin_unlock_irq(&desc->lock);
828         }
829
830         if (nr_irqs > 1)
831                 irq_found = -irq_found;
832         return irq_found;
833 }
834
835 EXPORT_SYMBOL(probe_irq_off);
836
837 #ifdef CONFIG_SMP
838 void synchronize_irq(unsigned int irq)
839 {
840         /* is there anything to synchronize with? */
841         if (!irq_desc[irq].action)
842                 return;
843
844         while (irq_desc[irq].status & IRQ_INPROGRESS)
845                 barrier();
846 }
847 #endif