This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / i386 / kernel / irq.c
1 /*
2  *      linux/arch/i386/kernel/irq.c
3  *
4  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5  *
6  * This file contains the code used by various IRQ handling routines:
7  * asking for different IRQ's should be done through these routines
8  * instead of just grabbing them. Thus setups with different IRQ numbers
9  * shouldn't result in any weird surprises, and installing new handlers
10  * should be easier.
11  */
12
13 /*
14  * (mostly architecture independent, will move to kernel/irq.c in 2.5.)
15  *
16  * IRQs are in fact implemented a bit like signal handlers for the kernel.
17  * Naturally it's not a 1:1 relation, but there are similarities.
18  */
19
20 #include <linux/config.h>
21 #include <linux/errno.h>
22 #include <linux/module.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/ioport.h>
26 #include <linux/interrupt.h>
27 #include <linux/timex.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
30 #include <linux/smp_lock.h>
31 #include <linux/init.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/irq.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/kallsyms.h>
37
38 #include <asm/atomic.h>
39 #include <asm/io.h>
40 #include <asm/smp.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
43 #include <asm/uaccess.h>
44 #include <asm/delay.h>
45 #include <asm/desc.h>
46 #include <asm/irq.h>
47
48 /*
49  * Linux has a controller-independent x86 interrupt architecture.
50  * every controller has a 'controller-template', that is used
51  * by the main code to do the right thing. Each driver-visible
52  * interrupt source is transparently wired to the apropriate
53  * controller. Thus drivers need not be aware of the
54  * interrupt-controller.
55  *
56  * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
57  * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
58  * (IO-APICs assumed to be messaging to Pentium local-APICs)
59  *
60  * the code is designed to be easily extended with new/different
61  * interrupt controllers, without having to do assembly magic.
62  */
63
64 /*
65  * Controller mappings for all interrupt sources:
66  */
67 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
68         [0 ... NR_IRQS-1] = {
69                 .handler = &no_irq_type,
70                 .lock = SPIN_LOCK_UNLOCKED
71         }
72 };
73
74 static void register_irq_proc (unsigned int irq);
75
76 /*
77  * per-CPU IRQ handling stacks
78  */
79 #ifdef CONFIG_IRQSTACKS
80 union irq_ctx *hardirq_ctx[NR_CPUS];
81 union irq_ctx *softirq_ctx[NR_CPUS];
82 #endif
83
84 /*
85  * Special irq handlers.
86  */
87
88 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
89 { return IRQ_NONE; }
90
91 /*
92  * Generic no controller code
93  */
94
95 static void enable_none(unsigned int irq) { }
96 static unsigned int startup_none(unsigned int irq) { return 0; }
97 static void disable_none(unsigned int irq) { }
98 static void ack_none(unsigned int irq)
99 {
100 /*
101  * 'what should we do if we get a hw irq event on an illegal vector'.
102  * each architecture has to answer this themselves, it doesn't deserve
103  * a generic callback i think.
104  */
105 #ifdef CONFIG_X86
106         printk("unexpected IRQ trap at vector %02x\n", irq);
107 #ifdef CONFIG_X86_LOCAL_APIC
108         /*
109          * Currently unexpected vectors happen only on SMP and APIC.
110          * We _must_ ack these because every local APIC has only N
111          * irq slots per priority level, and a 'hanging, unacked' IRQ
112          * holds up an irq slot - in excessive cases (when multiple
113          * unexpected vectors occur) that might lock up the APIC
114          * completely.
115          */
116         ack_APIC_irq();
117 #endif
118 #endif
119 }
120
121 /* startup is the same as "enable", shutdown is same as "disable" */
122 #define shutdown_none   disable_none
123 #define end_none        enable_none
124
125 struct hw_interrupt_type no_irq_type = {
126         "none",
127         startup_none,
128         shutdown_none,
129         enable_none,
130         disable_none,
131         ack_none,
132         end_none
133 };
134
135 atomic_t irq_err_count;
136 #if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
137 atomic_t irq_mis_count;
138 #endif
139
140 /*
141  * Generic, controller-independent functions:
142  */
143
144 int show_interrupts(struct seq_file *p, void *v)
145 {
146         int i = *(loff_t *) v, j;
147         struct irqaction * action;
148         unsigned long flags;
149
150         if (i == 0) {
151                 seq_printf(p, "           ");
152                 for (j=0; j<NR_CPUS; j++)
153                         if (cpu_online(j))
154                                 seq_printf(p, "CPU%d       ",j);
155                 seq_putc(p, '\n');
156         }
157
158         if (i < NR_IRQS) {
159                 spin_lock_irqsave(&irq_desc[i].lock, flags);
160                 action = irq_desc[i].action;
161                 if (!action) 
162                         goto skip;
163                 seq_printf(p, "%3d: ",i);
164 #ifndef CONFIG_SMP
165                 seq_printf(p, "%10u ", kstat_irqs(i));
166 #else
167                 for (j = 0; j < NR_CPUS; j++)
168                         if (cpu_online(j))
169                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
170 #endif
171                 seq_printf(p, " %14s", irq_desc[i].handler->typename);
172                 seq_printf(p, "  %s", action->name);
173
174                 for (action=action->next; action; action = action->next)
175                         seq_printf(p, ", %s", action->name);
176
177                 seq_putc(p, '\n');
178 skip:
179                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
180         } else if (i == NR_IRQS) {
181                 seq_printf(p, "NMI: ");
182                 for (j = 0; j < NR_CPUS; j++)
183                         if (cpu_online(j))
184                                 seq_printf(p, "%10u ", nmi_count(j));
185                 seq_putc(p, '\n');
186 #ifdef CONFIG_X86_LOCAL_APIC
187                 seq_printf(p, "LOC: ");
188                 for (j = 0; j < NR_CPUS; j++)
189                         if (cpu_online(j))
190                                 seq_printf(p, "%10u ", irq_stat[j].apic_timer_irqs);
191                 seq_putc(p, '\n');
192 #endif
193                 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
194 #if defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)
195                 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
196 #endif
197         }
198         return 0;
199 }
200
201
202
203
204 #ifdef CONFIG_SMP
205 inline void synchronize_irq(unsigned int irq)
206 {
207         while (irq_desc[irq].status & IRQ_INPROGRESS)
208                 cpu_relax();
209 }
210 #endif
211
212 /*
213  * This should really return information about whether
214  * we should do bottom half handling etc. Right now we
215  * end up _always_ checking the bottom half, which is a
216  * waste of time and is not what some drivers would
217  * prefer.
218  */
219 asmlinkage int handle_IRQ_event(unsigned int irq,
220                 struct pt_regs *regs, struct irqaction *action)
221 {
222         int status = 1; /* Force the "do bottom halves" bit */
223         int ret, retval = 0;
224
225         if (!(action->flags & SA_INTERRUPT))
226                 local_irq_enable();
227
228         if (!(action->flags & SA_INTERRUPT))
229                 local_irq_enable();
230
231         do {
232                 ret = action->handler(irq, action->dev_id, regs);
233                 if (ret == IRQ_HANDLED)
234                         status |= action->flags;
235                 retval |= ret;
236                 action = action->next;
237         } while (action);
238         if (status & SA_SAMPLE_RANDOM)
239                 add_interrupt_randomness(irq);
240         local_irq_disable();
241         return retval;
242 }
243
244 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
245 {
246         struct irqaction *action;
247
248         if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
249                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
250                                 irq, action_ret);
251         } else {
252                 printk(KERN_ERR "irq %d: nobody cared! (screaming interrupt?)\n", irq);
253                 printk(KERN_ERR "irq %d: Please try booting with acpi=off and report a bug\n", irq);
254         }
255         dump_stack();
256         printk(KERN_ERR "handlers:\n");
257         action = desc->action;
258         do {
259                 printk(KERN_ERR "[<%p>]", action->handler);
260                 print_symbol(" (%s)",
261                         (unsigned long)action->handler);
262                 printk("\n");
263                 action = action->next;
264         } while (action);
265 }
266
267 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
268 {
269         static int count = 100;
270
271         if (count) {
272                 count--;
273                 __report_bad_irq(irq, desc, action_ret);
274         }
275 }
276
277 static int noirqdebug;
278
279 int __init noirqdebug_setup(char *str)
280 {
281         noirqdebug = 1;
282         printk(KERN_INFO "IRQ lockup detection disabled\n");
283         return 1;
284 }
285
286 __setup("noirqdebug", noirqdebug_setup);
287
288 static int irqfixup;
289
290 static int __init irqfixup_setup(char *str)
291 {
292         irqfixup = 1;
293         printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
294         printk(KERN_WARNING "This may impact system performance.\n");
295         return 1;
296 }
297
298 __setup("irqfixup", irqfixup_setup);
299
300 static int __init irqpoll_setup(char *str)
301 {
302         irqfixup = 2;
303         printk(KERN_WARNING "Misrouted IRQ fixup and polling support enabled.\n");
304         printk(KERN_WARNING "This may significantly impact system performance.\n");
305         return 1;
306 }
307
308 __setup("irqpoll", irqpoll_setup);
309
310 /*
311  *      Recovery handler for misrouted interrupts
312  */
313
314 static asmlinkage int misrouted_irq(int irq, struct pt_regs *regs)
315 {
316         int i;
317         irq_desc_t *desc;
318         int ok = 0;
319         int work = 0;   /* Did we do work for a real IRQ */
320         for(i = 1; i < NR_IRQS; i++)
321         {
322                 struct irqaction *action;
323                 if(i == irq)    /* Already tried */
324                         continue;
325                 desc = &irq_desc[i];
326                 spin_lock(&desc->lock);
327                 action = desc->action;
328                 /* Already running on another processor */
329                 if(desc->status & IRQ_INPROGRESS)
330                 {
331                         /* Already running: If it is shared get the other
332                            CPU to go looking for our mystery interrupt too */
333                         if(desc->action && (desc->action->flags & SA_SHIRQ))
334                                 desc->status |= IRQ_PENDING;
335                         spin_unlock(&desc->lock);
336                         continue;
337                 }
338                 /* Honour the normal IRQ locking */
339                 desc->status |= IRQ_INPROGRESS;
340                 spin_unlock(&desc->lock);
341                 while(action)
342                 {
343                         /* Only shared IRQ handlers are safe to call */
344                         if(action->flags & SA_SHIRQ)
345                         {
346                                 if(action->handler(i, action->dev_id, regs) == IRQ_HANDLED)
347                                         ok = 1;
348                         }
349                         action = action->next;
350                 }
351                 local_irq_disable();
352                 /* Now clean up the flags */
353                 spin_lock(&desc->lock);
354                 action = desc->action;
355
356                 /* While we were looking for a fixup someone queued a real
357                    IRQ clashing with our walk */
358
359                 while((desc->status & IRQ_PENDING) && action)
360                 {
361                         /* Perform real IRQ processing for the IRQ we deferred */
362                         work = 1;
363                         spin_unlock(&desc->lock);
364                         handle_IRQ_event(i, regs, action);
365                         spin_lock(&desc->lock);
366                         desc->status &= ~IRQ_PENDING;
367                 }
368                 desc->status &= ~IRQ_INPROGRESS;
369                 /* If we did actual work for the real IRQ line we must
370                    let the IRQ controller clean up too */
371                 if(work)
372                         desc->handler->end(i);
373                 spin_unlock(&desc->lock);
374         }
375         /* So the caller can adjust the irq error counts */
376         return ok;
377 }
378
379 /*
380  * If 99,900 of the previous 100,000 interrupts have not been handled then
381  * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
382  * turn the IRQ off.
383  *
384  * (The other 100-of-100,000 interrupts may have been a correctly-functioning
385  *  device sharing an IRQ with the failing one)
386  *
387  * Called under desc->lock
388  */
389 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret, struct pt_regs *regs)
390 {
391         if (action_ret != IRQ_HANDLED) {
392                 desc->irqs_unhandled++;
393                 if (action_ret != IRQ_NONE)
394                         report_bad_irq(irq, desc, action_ret);
395         }
396         if(unlikely(irqfixup))  /* Don't punish working computers */
397         {
398                 if((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE)
399                 {
400                         int ok;
401                         u32 *isp;
402                         union irq_ctx * curctx;
403                         union irq_ctx * irqctx;
404
405                         curctx = (union irq_ctx *) current_thread_info();
406                         irqctx = hardirq_ctx[smp_processor_id()];
407
408                         spin_unlock(&desc->lock);
409
410                         /*
411                          * this is where we switch to the IRQ stack. However, if we are already using
412                          * the IRQ stack (because we interrupted a hardirq handler) we can't do that
413                          * and just have to keep using the current stack (which is the irq stack already
414                          * after all)
415                          */
416
417                         if (curctx == irqctx)
418                                 ok = misrouted_irq(irq, regs);
419                         else {
420                                 /* build the stack frame on the IRQ stack */
421                                 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
422                                 irqctx->tinfo.task = curctx->tinfo.task;
423                                 irqctx->tinfo.previous_esp = current_stack_pointer();
424
425                                 *--isp = (u32) regs;
426                                 *--isp = (u32) irq;
427
428                                 asm volatile(
429                                 "       xchgl   %%ebx,%%esp     \n"
430                                 "       call    misrouted_irq   \n"
431                                 "       xchgl   %%ebx,%%esp     \n"
432                                 : "=a"(ok)
433                                 : "b"(isp)
434                                 : "memory", "cc", "edx", "ecx"
435                                 );
436                         }
437                         spin_lock(&desc->lock);
438                         if (curctx != irqctx)
439                                 irqctx->tinfo.task = NULL;
440                         if(action_ret == IRQ_NONE)
441                                 desc->irqs_unhandled -= ok;
442                 }
443         }
444
445         desc->irq_count++;
446         if (desc->irq_count < 100000)
447                 return;
448
449         desc->irq_count = 0;
450         if (desc->irqs_unhandled > 99900) {
451                 /*
452                  * The interrupt is stuck
453                  */
454                 __report_bad_irq(irq, desc, action_ret);
455                 /*
456                  * Now kill the IRQ
457                  */
458                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
459                 desc->status |= IRQ_DISABLED;
460                 desc->handler->disable(irq);
461         }
462         desc->irqs_unhandled = 0;
463 }
464
465 /*
466  * Generic enable/disable code: this just calls
467  * down into the PIC-specific version for the actual
468  * hardware disable after having gotten the irq
469  * controller lock. 
470  */
471  
472 /**
473  *      disable_irq_nosync - disable an irq without waiting
474  *      @irq: Interrupt to disable
475  *
476  *      Disable the selected interrupt line.  Disables and Enables are
477  *      nested.
478  *      Unlike disable_irq(), this function does not ensure existing
479  *      instances of the IRQ handler have completed before returning.
480  *
481  *      This function may be called from IRQ context.
482  */
483  
484 inline void disable_irq_nosync(unsigned int irq)
485 {
486         irq_desc_t *desc = irq_desc + irq;
487         unsigned long flags;
488
489         spin_lock_irqsave(&desc->lock, flags);
490         if (!desc->depth++) {
491                 desc->status |= IRQ_DISABLED;
492                 desc->handler->disable(irq);
493         }
494         spin_unlock_irqrestore(&desc->lock, flags);
495 }
496
497 /**
498  *      disable_irq - disable an irq and wait for completion
499  *      @irq: Interrupt to disable
500  *
501  *      Disable the selected interrupt line.  Enables and Disables are
502  *      nested.
503  *      This function waits for any pending IRQ handlers for this interrupt
504  *      to complete before returning. If you use this function while
505  *      holding a resource the IRQ handler may need you will deadlock.
506  *
507  *      This function may be called - with care - from IRQ context.
508  */
509  
510 void disable_irq(unsigned int irq)
511 {
512         irq_desc_t *desc = irq_desc + irq;
513         disable_irq_nosync(irq);
514         if (desc->action)
515                 synchronize_irq(irq);
516 }
517
518 /**
519  *      enable_irq - enable handling of an irq
520  *      @irq: Interrupt to enable
521  *
522  *      Undoes the effect of one call to disable_irq().  If this
523  *      matches the last disable, processing of interrupts on this
524  *      IRQ line is re-enabled.
525  *
526  *      This function may be called from IRQ context.
527  */
528  
529 void enable_irq(unsigned int irq)
530 {
531         irq_desc_t *desc = irq_desc + irq;
532         unsigned long flags;
533
534         spin_lock_irqsave(&desc->lock, flags);
535         switch (desc->depth) {
536         case 1: {
537                 unsigned int status = desc->status & ~IRQ_DISABLED;
538                 desc->status = status;
539                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
540                         desc->status = status | IRQ_REPLAY;
541                         hw_resend_irq(desc->handler,irq);
542                 }
543                 desc->handler->enable(irq);
544                 /* fall-through */
545         }
546         default:
547                 desc->depth--;
548                 break;
549         case 0:
550                 printk("enable_irq(%u) unbalanced from %p\n", irq,
551                        __builtin_return_address(0));
552         }
553         spin_unlock_irqrestore(&desc->lock, flags);
554 }
555
556 /*
557  * do_IRQ handles all normal device IRQ's (the special
558  * SMP cross-CPU interrupts have their own specific
559  * handlers).
560  */
561 asmlinkage unsigned int do_IRQ(struct pt_regs regs)
562 {       
563         /* 
564          * We ack quickly, we don't want the irq controller
565          * thinking we're snobs just because some other CPU has
566          * disabled global interrupts (we have already done the
567          * INT_ACK cycles, it's too late to try to pretend to the
568          * controller that we aren't taking the interrupt).
569          *
570          * 0 return value means that this irq is already being
571          * handled by some other CPU. (or is disabled)
572          */
573         int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code  */
574         irq_desc_t *desc = irq_desc + irq;
575         struct irqaction * action;
576         unsigned int status;
577
578         irq_enter();
579
580 #ifdef CONFIG_DEBUG_STACKOVERFLOW
581         /* Debugging check for stack overflow: is there less than 1KB free? */
582         {
583                 long esp;
584
585                 __asm__ __volatile__("andl %%esp,%0" :
586                                         "=r" (esp) : "0" (THREAD_SIZE - 1));
587                 if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
588                         printk("do_IRQ: stack overflow: %ld\n",
589                                 esp - sizeof(struct thread_info));
590                         dump_stack();
591                 }
592         }
593 #endif
594         kstat_this_cpu.irqs[irq]++;
595         spin_lock(&desc->lock);
596         desc->handler->ack(irq);
597         /*
598            REPLAY is when Linux resends an IRQ that was dropped earlier
599            WAITING is used by probe to mark irqs that are being tested
600            */
601         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
602         status |= IRQ_PENDING; /* we _want_ to handle it */
603
604         /*
605          * If the IRQ is disabled for whatever reason, we cannot
606          * use the action we have.
607          */
608         action = NULL;
609         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
610                 action = desc->action;
611                 status &= ~IRQ_PENDING; /* we commit to handling */
612                 status |= IRQ_INPROGRESS; /* we are handling it */
613         }
614         desc->status = status;
615
616         /*
617          * If there is no IRQ handler or it was disabled, exit early.
618            Since we set PENDING, if another processor is handling
619            a different instance of this same irq, the other processor
620            will take care of it.
621          */
622         if (unlikely(!action))
623                 goto out;
624
625         /*
626          * Edge triggered interrupts need to remember
627          * pending events.
628          * This applies to any hw interrupts that allow a second
629          * instance of the same irq to arrive while we are in do_IRQ
630          * or in the handler. But the code here only handles the _second_
631          * instance of the irq, not the third or fourth. So it is mostly
632          * useful for irq hardware that does not mask cleanly in an
633          * SMP environment.
634          */
635         for (;;) {
636                 irqreturn_t action_ret;
637                 u32 *isp;
638                 union irq_ctx * curctx;
639                 union irq_ctx * irqctx;
640 #ifdef CONFIG_IRQSTACKS
641                 curctx = (union irq_ctx *) current_thread_info();
642                 irqctx = hardirq_ctx[smp_processor_id()];
643 #else
644                 curctx = irqctx = (union irq_ctx *)0;
645 #endif
646                 spin_unlock(&desc->lock);
647
648                 /*
649                  * this is where we switch to the IRQ stack. However, if we are already using
650                  * the IRQ stack (because we interrupted a hardirq handler) we can't do that
651                  * and just have to keep using the current stack (which is the irq stack already
652                  * after all)
653                  */
654
655                 if (curctx == irqctx)
656                         action_ret = handle_IRQ_event(irq, &regs, action);
657                 else {
658                         /* build the stack frame on the IRQ stack */
659                         isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
660                         irqctx->tinfo.task = curctx->tinfo.task;
661                         irqctx->tinfo.previous_esp = current_stack_pointer();
662
663                         *--isp = (u32) action;
664                         *--isp = (u32) &regs;
665                         *--isp = (u32) irq;
666
667                         asm volatile(
668                                 "       xchgl   %%ebx,%%esp     \n"
669                                 "       call    handle_IRQ_event \n"
670                                 "       xchgl   %%ebx,%%esp     \n"
671                                 : "=a"(action_ret)
672                                 : "b"(isp)
673                                 : "memory", "cc", "edx", "ecx"
674                         );
675
676
677                 }
678                 spin_lock(&desc->lock);
679                 if (!noirqdebug)
680                         note_interrupt(irq, desc, action_ret, &regs);
681                 if (curctx != irqctx)
682                         irqctx->tinfo.task = NULL;
683                 if (likely(!(desc->status & IRQ_PENDING)))
684                         break;
685                 desc->status &= ~IRQ_PENDING;
686         }
687         desc->status &= ~IRQ_INPROGRESS;
688
689 out:
690         /*
691          * The ->end() handler has to deal with interrupts which got
692          * disabled while the handler was running.
693          */
694         desc->handler->end(irq);
695         spin_unlock(&desc->lock);
696
697         irq_exit();
698
699         return 1;
700 }
701
702 int can_request_irq(unsigned int irq, unsigned long irqflags)
703 {
704         struct irqaction *action;
705
706         if (irq >= NR_IRQS)
707                 return 0;
708         action = irq_desc[irq].action;
709         if (action) {
710                 if (irqflags & action->flags & SA_SHIRQ)
711                         action = NULL;
712         }
713         return !action;
714 }
715
716 /**
717  *      request_irq - allocate an interrupt line
718  *      @irq: Interrupt line to allocate
719  *      @handler: Function to be called when the IRQ occurs
720  *      @irqflags: Interrupt type flags
721  *      @devname: An ascii name for the claiming device
722  *      @dev_id: A cookie passed back to the handler function
723  *
724  *      This call allocates interrupt resources and enables the
725  *      interrupt line and IRQ handling. From the point this
726  *      call is made your handler function may be invoked. Since
727  *      your handler function must clear any interrupt the board 
728  *      raises, you must take care both to initialise your hardware
729  *      and to set up the interrupt handler in the right order.
730  *
731  *      Dev_id must be globally unique. Normally the address of the
732  *      device data structure is used as the cookie. Since the handler
733  *      receives this value it makes sense to use it.
734  *
735  *      If your interrupt is shared you must pass a non NULL dev_id
736  *      as this is required when freeing the interrupt.
737  *
738  *      Flags:
739  *
740  *      SA_SHIRQ                Interrupt is shared
741  *
742  *      SA_INTERRUPT            Disable local interrupts while processing
743  *
744  *      SA_SAMPLE_RANDOM        The interrupt can be used for entropy
745  *
746  */
747  
748 int request_irq(unsigned int irq, 
749                 irqreturn_t (*handler)(int, void *, struct pt_regs *),
750                 unsigned long irqflags, 
751                 const char * devname,
752                 void *dev_id)
753 {
754         int retval;
755         struct irqaction * action;
756
757 #if 1
758         /*
759          * Sanity-check: shared interrupts should REALLY pass in
760          * a real dev-ID, otherwise we'll have trouble later trying
761          * to figure out which interrupt is which (messes up the
762          * interrupt freeing logic etc).
763          */
764         if (irqflags & SA_SHIRQ) {
765                 if (!dev_id)
766                         printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
767         }
768 #endif
769
770         if (irq >= NR_IRQS)
771                 return -EINVAL;
772         if (!handler)
773                 return -EINVAL;
774
775         action = (struct irqaction *)
776                         kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
777         if (!action)
778                 return -ENOMEM;
779
780         action->handler = handler;
781         action->flags = irqflags;
782         cpus_clear(action->mask);
783         action->name = devname;
784         action->next = NULL;
785         action->dev_id = dev_id;
786
787         retval = setup_irq(irq, action);
788         if (retval)
789                 kfree(action);
790         return retval;
791 }
792
793 EXPORT_SYMBOL(request_irq);
794
795 /**
796  *      free_irq - free an interrupt
797  *      @irq: Interrupt line to free
798  *      @dev_id: Device identity to free
799  *
800  *      Remove an interrupt handler. The handler is removed and if the
801  *      interrupt line is no longer in use by any driver it is disabled.
802  *      On a shared IRQ the caller must ensure the interrupt is disabled
803  *      on the card it drives before calling this function. The function
804  *      does not return until any executing interrupts for this IRQ
805  *      have completed.
806  *
807  *      This function must not be called from interrupt context. 
808  */
809  
810 void free_irq(unsigned int irq, void *dev_id)
811 {
812         irq_desc_t *desc;
813         struct irqaction **p;
814         unsigned long flags;
815
816         if (irq >= NR_IRQS)
817                 return;
818
819         desc = irq_desc + irq;
820         spin_lock_irqsave(&desc->lock,flags);
821         p = &desc->action;
822         for (;;) {
823                 struct irqaction * action = *p;
824                 if (action) {
825                         struct irqaction **pp = p;
826                         p = &action->next;
827                         if (action->dev_id != dev_id)
828                                 continue;
829
830                         /* Found it - now remove it from the list of entries */
831                         *pp = action->next;
832                         if (!desc->action) {
833                                 desc->status |= IRQ_DISABLED;
834                                 desc->handler->shutdown(irq);
835                         }
836                         spin_unlock_irqrestore(&desc->lock,flags);
837
838                         /* Wait to make sure it's not being used on another CPU */
839                         synchronize_irq(irq);
840                         kfree(action);
841                         return;
842                 }
843                 printk("Trying to free free IRQ%d\n",irq);
844                 spin_unlock_irqrestore(&desc->lock,flags);
845                 return;
846         }
847 }
848
849 EXPORT_SYMBOL(free_irq);
850
851 /*
852  * IRQ autodetection code..
853  *
854  * This depends on the fact that any interrupt that
855  * comes in on to an unassigned handler will get stuck
856  * with "IRQ_WAITING" cleared and the interrupt
857  * disabled.
858  */
859
860 static DECLARE_MUTEX(probe_sem);
861
862 /**
863  *      probe_irq_on    - begin an interrupt autodetect
864  *
865  *      Commence probing for an interrupt. The interrupts are scanned
866  *      and a mask of potential interrupt lines is returned.
867  *
868  */
869  
870 unsigned long probe_irq_on(void)
871 {
872         unsigned int i;
873         irq_desc_t *desc;
874         unsigned long val;
875         unsigned long delay;
876
877         down(&probe_sem);
878         /* 
879          * something may have generated an irq long ago and we want to
880          * flush such a longstanding irq before considering it as spurious. 
881          */
882         for (i = NR_IRQS-1; i > 0; i--)  {
883                 desc = irq_desc + i;
884
885                 spin_lock_irq(&desc->lock);
886                 if (!irq_desc[i].action) 
887                         irq_desc[i].handler->startup(i);
888                 spin_unlock_irq(&desc->lock);
889         }
890
891         /* Wait for longstanding interrupts to trigger. */
892         for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
893                 /* about 20ms delay */ barrier();
894
895         /*
896          * enable any unassigned irqs
897          * (we must startup again here because if a longstanding irq
898          * happened in the previous stage, it may have masked itself)
899          */
900         for (i = NR_IRQS-1; i > 0; i--) {
901                 desc = irq_desc + i;
902
903                 spin_lock_irq(&desc->lock);
904                 if (!desc->action) {
905                         desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
906                         if (desc->handler->startup(i))
907                                 desc->status |= IRQ_PENDING;
908                 }
909                 spin_unlock_irq(&desc->lock);
910         }
911
912         /*
913          * Wait for spurious interrupts to trigger
914          */
915         for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
916                 /* about 100ms delay */ barrier();
917
918         /*
919          * Now filter out any obviously spurious interrupts
920          */
921         val = 0;
922         for (i = 0; i < NR_IRQS; i++) {
923                 irq_desc_t *desc = irq_desc + i;
924                 unsigned int status;
925
926                 spin_lock_irq(&desc->lock);
927                 status = desc->status;
928
929                 if (status & IRQ_AUTODETECT) {
930                         /* It triggered already - consider it spurious. */
931                         if (!(status & IRQ_WAITING)) {
932                                 desc->status = status & ~IRQ_AUTODETECT;
933                                 desc->handler->shutdown(i);
934                         } else
935                                 if (i < 32)
936                                         val |= 1 << i;
937                 }
938                 spin_unlock_irq(&desc->lock);
939         }
940
941         return val;
942 }
943
944 EXPORT_SYMBOL(probe_irq_on);
945
946 /*
947  * Return a mask of triggered interrupts (this
948  * can handle only legacy ISA interrupts).
949  */
950  
951 /**
952  *      probe_irq_mask - scan a bitmap of interrupt lines
953  *      @val:   mask of interrupts to consider
954  *
955  *      Scan the ISA bus interrupt lines and return a bitmap of
956  *      active interrupts. The interrupt probe logic state is then
957  *      returned to its previous value.
958  *
959  *      Note: we need to scan all the irq's even though we will
960  *      only return ISA irq numbers - just so that we reset them
961  *      all to a known state.
962  */
963 unsigned int probe_irq_mask(unsigned long val)
964 {
965         int i;
966         unsigned int mask;
967
968         mask = 0;
969         for (i = 0; i < NR_IRQS; i++) {
970                 irq_desc_t *desc = irq_desc + i;
971                 unsigned int status;
972
973                 spin_lock_irq(&desc->lock);
974                 status = desc->status;
975
976                 if (status & IRQ_AUTODETECT) {
977                         if (i < 16 && !(status & IRQ_WAITING))
978                                 mask |= 1 << i;
979
980                         desc->status = status & ~IRQ_AUTODETECT;
981                         desc->handler->shutdown(i);
982                 }
983                 spin_unlock_irq(&desc->lock);
984         }
985         up(&probe_sem);
986
987         return mask & val;
988 }
989
990 /*
991  * Return the one interrupt that triggered (this can
992  * handle any interrupt source).
993  */
994
995 /**
996  *      probe_irq_off   - end an interrupt autodetect
997  *      @val: mask of potential interrupts (unused)
998  *
999  *      Scans the unused interrupt lines and returns the line which
1000  *      appears to have triggered the interrupt. If no interrupt was
1001  *      found then zero is returned. If more than one interrupt is
1002  *      found then minus the first candidate is returned to indicate
1003  *      their is doubt.
1004  *
1005  *      The interrupt probe logic state is returned to its previous
1006  *      value.
1007  *
1008  *      BUGS: When used in a module (which arguably shouldnt happen)
1009  *      nothing prevents two IRQ probe callers from overlapping. The
1010  *      results of this are non-optimal.
1011  */
1012  
1013 int probe_irq_off(unsigned long val)
1014 {
1015         int i, irq_found, nr_irqs;
1016
1017         nr_irqs = 0;
1018         irq_found = 0;
1019         for (i = 0; i < NR_IRQS; i++) {
1020                 irq_desc_t *desc = irq_desc + i;
1021                 unsigned int status;
1022
1023                 spin_lock_irq(&desc->lock);
1024                 status = desc->status;
1025
1026                 if (status & IRQ_AUTODETECT) {
1027                         if (!(status & IRQ_WAITING)) {
1028                                 if (!nr_irqs)
1029                                         irq_found = i;
1030                                 nr_irqs++;
1031                         }
1032                         desc->status = status & ~IRQ_AUTODETECT;
1033                         desc->handler->shutdown(i);
1034                 }
1035                 spin_unlock_irq(&desc->lock);
1036         }
1037         up(&probe_sem);
1038
1039         if (nr_irqs > 1)
1040                 irq_found = -irq_found;
1041         return irq_found;
1042 }
1043
1044 EXPORT_SYMBOL(probe_irq_off);
1045
1046 /* this was setup_x86_irq but it seems pretty generic */
1047 int setup_irq(unsigned int irq, struct irqaction * new)
1048 {
1049         int shared = 0;
1050         unsigned long flags;
1051         struct irqaction *old, **p;
1052         irq_desc_t *desc = irq_desc + irq;
1053
1054         if (desc->handler == &no_irq_type)
1055                 return -ENOSYS;
1056         /*
1057          * Some drivers like serial.c use request_irq() heavily,
1058          * so we have to be careful not to interfere with a
1059          * running system.
1060          */
1061         if (new->flags & SA_SAMPLE_RANDOM) {
1062                 /*
1063                  * This function might sleep, we want to call it first,
1064                  * outside of the atomic block.
1065                  * Yes, this might clear the entropy pool if the wrong
1066                  * driver is attempted to be loaded, without actually
1067                  * installing a new handler, but is this really a problem,
1068                  * only the sysadmin is able to do this.
1069                  */
1070                 rand_initialize_irq(irq);
1071         }
1072
1073         /*
1074          * The following block of code has to be executed atomically
1075          */
1076         spin_lock_irqsave(&desc->lock,flags);
1077         p = &desc->action;
1078         if ((old = *p) != NULL) {
1079                 /* Can't share interrupts unless both agree to */
1080                 if (!(old->flags & new->flags & SA_SHIRQ)) {
1081                         spin_unlock_irqrestore(&desc->lock,flags);
1082                         return -EBUSY;
1083                 }
1084
1085                 /* add new interrupt at end of irq queue */
1086                 do {
1087                         p = &old->next;
1088                         old = *p;
1089                 } while (old);
1090                 shared = 1;
1091         }
1092
1093         *p = new;
1094
1095         if (!shared) {
1096                 desc->depth = 0;
1097                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
1098                 desc->handler->startup(irq);
1099         }
1100         spin_unlock_irqrestore(&desc->lock,flags);
1101
1102         register_irq_proc(irq);
1103         return 0;
1104 }
1105
1106 static struct proc_dir_entry * root_irq_dir;
1107 static struct proc_dir_entry * irq_dir [NR_IRQS];
1108
1109 #ifdef CONFIG_SMP
1110
1111 static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
1112
1113 cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
1114
1115 static int irq_affinity_read_proc(char *page, char **start, off_t off,
1116                         int count, int *eof, void *data)
1117 {
1118         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
1119         if (count - len < 2)
1120                 return -EINVAL;
1121         len += sprintf(page + len, "\n");
1122         return len;
1123 }
1124
1125 int no_irq_affinity;
1126
1127 static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
1128                                         unsigned long count, void *data)
1129 {
1130         int irq = (long)data, full_count = count, err;
1131         cpumask_t new_value, tmp;
1132
1133         if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
1134                 return -EIO;
1135
1136         err = cpumask_parse(buffer, count, new_value);
1137         if (err)
1138                 return err;
1139
1140         /*
1141          * Do not allow disabling IRQs completely - it's a too easy
1142          * way to make the system unusable accidentally :-) At least
1143          * one online CPU still has to be targeted.
1144          */
1145         cpus_and(tmp, new_value, cpu_online_map);
1146         if (cpus_empty(tmp))
1147                 return -EINVAL;
1148
1149         irq_affinity[irq] = new_value;
1150         irq_desc[irq].handler->set_affinity(irq,
1151                                         cpumask_of_cpu(first_cpu(new_value)));
1152
1153         return full_count;
1154 }
1155
1156 #endif
1157 #define MAX_NAMELEN 10
1158
1159 static void register_irq_proc (unsigned int irq)
1160 {
1161         char name [MAX_NAMELEN];
1162
1163         if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
1164                         irq_dir[irq])
1165                 return;
1166
1167         memset(name, 0, MAX_NAMELEN);
1168         sprintf(name, "%d", irq);
1169
1170         /* create /proc/irq/1234 */
1171         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1172
1173 #ifdef CONFIG_SMP
1174         {
1175                 struct proc_dir_entry *entry;
1176
1177                 /* create /proc/irq/1234/smp_affinity */
1178                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1179
1180                 if (entry) {
1181                         entry->nlink = 1;
1182                         entry->data = (void *)(long)irq;
1183                         entry->read_proc = irq_affinity_read_proc;
1184                         entry->write_proc = irq_affinity_write_proc;
1185                 }
1186
1187                 smp_affinity_entry[irq] = entry;
1188         }
1189 #endif
1190 }
1191
1192 void init_irq_proc (void)
1193 {
1194         int i;
1195
1196         /* create /proc/irq */
1197         root_irq_dir = proc_mkdir("irq", NULL);
1198         create_prof_cpu_mask(root_irq_dir);
1199         /*
1200          * Create entries for all existing IRQs.
1201          */
1202         for (i = 0; i < NR_IRQS; i++)
1203                 register_irq_proc(i);
1204 }
1205
1206
1207 #ifdef CONFIG_IRQSTACKS
1208 /*
1209  * These should really be __section__(".bss.page_aligned") as well, but
1210  * gcc's 3.0 and earlier don't handle that correctly.
1211  */
1212 static char softirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
1213 static char hardirq_stack[NR_CPUS * THREAD_SIZE]  __attribute__((__aligned__(THREAD_SIZE)));
1214
1215 /*
1216  * allocate per-cpu stacks for hardirq and for softirq processing
1217  */
1218 void irq_ctx_init(int cpu)
1219 {
1220         union irq_ctx *irqctx;
1221
1222         if (hardirq_ctx[cpu])
1223                 return;
1224
1225         irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
1226         irqctx->tinfo.task              = NULL;
1227         irqctx->tinfo.exec_domain       = NULL;
1228         irqctx->tinfo.cpu               = cpu;
1229         irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
1230         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
1231
1232         hardirq_ctx[cpu] = irqctx;
1233
1234         irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
1235         irqctx->tinfo.task              = NULL;
1236         irqctx->tinfo.exec_domain       = NULL;
1237         irqctx->tinfo.cpu               = cpu;
1238         irqctx->tinfo.preempt_count     = SOFTIRQ_OFFSET;
1239         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
1240
1241         softirq_ctx[cpu] = irqctx;
1242
1243         printk("CPU %u irqstacks, hard=%p soft=%p\n",
1244                 cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
1245 }
1246
1247 extern asmlinkage void __do_softirq(void);
1248
1249 asmlinkage void do_softirq(void)
1250 {
1251         unsigned long flags;
1252         struct thread_info *curctx;
1253         union irq_ctx *irqctx;
1254         u32 *isp;
1255
1256         if (in_interrupt())
1257                 return;
1258
1259         local_irq_save(flags);
1260
1261         if (local_softirq_pending()) {
1262                 curctx = current_thread_info();
1263                 irqctx = softirq_ctx[smp_processor_id()];
1264                 irqctx->tinfo.task = curctx->task;
1265                 irqctx->tinfo.previous_esp = current_stack_pointer();
1266
1267                 /* build the stack frame on the softirq stack */
1268                 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
1269
1270
1271                 asm volatile(
1272                         "       xchgl   %%ebx,%%esp     \n"
1273                         "       call    __do_softirq    \n"
1274                         "       movl    %%ebx,%%esp     \n"
1275                         : "=b"(isp)
1276                         : "0"(isp)
1277                         : "memory", "cc", "edx", "ecx", "eax"
1278                 );
1279         }
1280
1281         local_irq_restore(flags);
1282 }
1283
1284 EXPORT_SYMBOL(do_softirq);
1285 #endif