vserver 1.9.3
[linux-2.6.git] / arch / ppc64 / kernel / irq.c
1 /*
2  *  arch/ppc/kernel/irq.c
3  *
4  *  Derived from arch/i386/kernel/irq.c
5  *    Copyright (C) 1992 Linus Torvalds
6  *  Adapted from arch/i386 by Gary Thomas
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9  *    Copyright (C) 1996 Cort Dougan
10  *  Adapted for Power Macintosh by Paul Mackerras
11  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13  * 
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version
17  * 2 of the License, or (at your option) any later version.
18  *
19  * This file contains the code used by various IRQ handling routines:
20  * asking for different IRQ's should be done through these routines
21  * instead of just grabbing them. Thus setups with different IRQ numbers
22  * shouldn't result in any weird surprises, and installing new handlers
23  * should be easier.
24  */
25
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/threads.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/ioport.h>
33 #include <linux/interrupt.h>
34 #include <linux/timex.h>
35 #include <linux/config.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/pci.h>
39 #include <linux/delay.h>
40 #include <linux/irq.h>
41 #include <linux/proc_fs.h>
42 #include <linux/random.h>
43 #include <linux/kallsyms.h>
44 #include <linux/profile.h>
45
46 #include <asm/uaccess.h>
47 #include <asm/bitops.h>
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/pgtable.h>
51 #include <asm/irq.h>
52 #include <asm/cache.h>
53 #include <asm/prom.h>
54 #include <asm/ptrace.h>
55 #include <asm/iSeries/LparData.h>
56 #include <asm/machdep.h>
57 #include <asm/paca.h>
58
59 #ifdef CONFIG_SMP
60 extern void iSeries_smp_message_recv( struct pt_regs * );
61 #endif
62
63 static void register_irq_proc (unsigned int irq);
64
65 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
66         [0 ... NR_IRQS-1] = {
67                 .lock = SPIN_LOCK_UNLOCKED
68         }
69 };
70
71 int __irq_offset_value;
72 int ppc_spurious_interrupts;
73 unsigned long lpevent_count;
74
75 int
76 setup_irq(unsigned int irq, struct irqaction * new)
77 {
78         int shared = 0;
79         unsigned long flags;
80         struct irqaction *old, **p;
81         irq_desc_t *desc = get_irq_desc(irq);
82
83         /*
84          * Some drivers like serial.c use request_irq() heavily,
85          * so we have to be careful not to interfere with a
86          * running system.
87          */
88         if (new->flags & SA_SAMPLE_RANDOM) {
89                 /*
90                  * This function might sleep, we want to call it first,
91                  * outside of the atomic block.
92                  * Yes, this might clear the entropy pool if the wrong
93                  * driver is attempted to be loaded, without actually
94                  * installing a new handler, but is this really a problem,
95                  * only the sysadmin is able to do this.
96                  */
97                 rand_initialize_irq(irq);
98         }
99
100         /*
101          * The following block of code has to be executed atomically
102          */
103         spin_lock_irqsave(&desc->lock,flags);
104         p = &desc->action;
105         if ((old = *p) != NULL) {
106                 /* Can't share interrupts unless both agree to */
107                 if (!(old->flags & new->flags & SA_SHIRQ)) {
108                         spin_unlock_irqrestore(&desc->lock,flags);
109                         return -EBUSY;
110                 }
111
112                 /* add new interrupt at end of irq queue */
113                 do {
114                         p = &old->next;
115                         old = *p;
116                 } while (old);
117                 shared = 1;
118         }
119
120         *p = new;
121
122         if (!shared) {
123                 desc->depth = 0;
124                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
125                 if (desc->handler && desc->handler->startup)
126                         desc->handler->startup(irq);
127                 unmask_irq(irq);
128         }
129         spin_unlock_irqrestore(&desc->lock,flags);
130
131         register_irq_proc(irq);
132         return 0;
133 }
134
135 #ifdef CONFIG_SMP
136
137 inline void synchronize_irq(unsigned int irq)
138 {
139         while (get_irq_desc(irq)->status & IRQ_INPROGRESS)
140                 cpu_relax();
141 }
142
143 EXPORT_SYMBOL(synchronize_irq);
144
145 #endif /* CONFIG_SMP */
146
147 int request_irq(unsigned int irq,
148         irqreturn_t (*handler)(int, void *, struct pt_regs *),
149         unsigned long irqflags, const char * devname, void *dev_id)
150 {
151         struct irqaction *action;
152         int retval;
153
154         if (irq >= NR_IRQS)
155                 return -EINVAL;
156         if (!handler)
157                 return -EINVAL;
158
159         action = (struct irqaction *)
160                 kmalloc(sizeof(struct irqaction), GFP_KERNEL);
161         if (!action) {
162                 printk(KERN_ERR "kmalloc() failed for irq %d !\n", irq);
163                 return -ENOMEM;
164         }
165
166         action->handler = handler;
167         action->flags = irqflags;
168         cpus_clear(action->mask);
169         action->name = devname;
170         action->dev_id = dev_id;
171         action->next = NULL;
172
173         retval = setup_irq(irq, action);
174         if (retval)
175                 kfree(action);
176
177         return 0;
178 }
179
180 EXPORT_SYMBOL(request_irq);
181
182 void free_irq(unsigned int irq, void *dev_id)
183 {
184         irq_desc_t *desc = get_irq_desc(irq);
185         struct irqaction **p;
186         unsigned long flags;
187
188         spin_lock_irqsave(&desc->lock,flags);
189         p = &desc->action;
190         for (;;) {
191                 struct irqaction * action = *p;
192                 if (action) {
193                         struct irqaction **pp = p;
194                         p = &action->next;
195                         if (action->dev_id != dev_id)
196                                 continue;
197
198                         /* Found it - now remove it from the list of entries */
199                         *pp = action->next;
200                         if (!desc->action) {
201                                 desc->status |= IRQ_DISABLED;
202                                 mask_irq(irq);
203                         }
204                         spin_unlock_irqrestore(&desc->lock,flags);
205
206                         /* Wait to make sure it's not being used on another CPU */
207                         synchronize_irq(irq);
208                         kfree(action);
209                         return;
210                 }
211                 printk("Trying to free free IRQ%d\n",irq);
212                 spin_unlock_irqrestore(&desc->lock,flags);
213                 break;
214         }
215         return;
216 }
217
218 EXPORT_SYMBOL(free_irq);
219
220 /*
221  * Generic enable/disable code: this just calls
222  * down into the PIC-specific version for the actual
223  * hardware disable after having gotten the irq
224  * controller lock. 
225  */
226  
227 /**
228  *      disable_irq_nosync - disable an irq without waiting
229  *      @irq: Interrupt to disable
230  *
231  *      Disable the selected interrupt line. Disables of an interrupt
232  *      stack. Unlike disable_irq(), this function does not ensure existing
233  *      instances of the IRQ handler have completed before returning.
234  *
235  *      This function may be called from IRQ context.
236  */
237  
238 inline void disable_irq_nosync(unsigned int irq)
239 {
240         irq_desc_t *desc = get_irq_desc(irq);
241         unsigned long flags;
242
243         spin_lock_irqsave(&desc->lock, flags);
244         if (!desc->depth++) {
245                 if (!(desc->status & IRQ_PER_CPU))
246                         desc->status |= IRQ_DISABLED;
247                 mask_irq(irq);
248         }
249         spin_unlock_irqrestore(&desc->lock, flags);
250 }
251
252 EXPORT_SYMBOL(disable_irq_nosync);
253
254 /**
255  *      disable_irq - disable an irq and wait for completion
256  *      @irq: Interrupt to disable
257  *
258  *      Disable the selected interrupt line. Disables of an interrupt
259  *      stack. That is for two disables you need two enables. This
260  *      function waits for any pending IRQ handlers for this interrupt
261  *      to complete before returning. If you use this function while
262  *      holding a resource the IRQ handler may need you will deadlock.
263  *
264  *      This function may be called - with care - from IRQ context.
265  */
266  
267 void disable_irq(unsigned int irq)
268 {
269         irq_desc_t *desc = get_irq_desc(irq);
270         disable_irq_nosync(irq);
271         if (desc->action)
272                 synchronize_irq(irq);
273 }
274
275 EXPORT_SYMBOL(disable_irq);
276
277 /**
278  *      enable_irq - enable interrupt handling on an irq
279  *      @irq: Interrupt to enable
280  *
281  *      Re-enables the processing of interrupts on this IRQ line
282  *      providing no disable_irq calls are now in effect.
283  *
284  *      This function may be called from IRQ context.
285  */
286  
287 void enable_irq(unsigned int irq)
288 {
289         irq_desc_t *desc = get_irq_desc(irq);
290         unsigned long flags;
291
292         spin_lock_irqsave(&desc->lock, flags);
293         switch (desc->depth) {
294         case 1: {
295                 unsigned int status = desc->status & ~IRQ_DISABLED;
296                 desc->status = status;
297                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
298                         desc->status = status | IRQ_REPLAY;
299                         hw_resend_irq(desc->handler,irq);
300                 }
301                 unmask_irq(irq);
302                 /* fall-through */
303         }
304         default:
305                 desc->depth--;
306                 break;
307         case 0:
308                 printk("enable_irq(%u) unbalanced from %p\n", irq,
309                        __builtin_return_address(0));
310         }
311         spin_unlock_irqrestore(&desc->lock, flags);
312 }
313
314 EXPORT_SYMBOL(enable_irq);
315
316 int show_interrupts(struct seq_file *p, void *v)
317 {
318         int i = *(loff_t *) v, j;
319         struct irqaction * action;
320         irq_desc_t *desc;
321         unsigned long flags;
322
323         if (i == 0) {
324                 seq_printf(p, "           ");
325                 for (j=0; j<NR_CPUS; j++) {
326                         if (cpu_online(j))
327                                 seq_printf(p, "CPU%d       ",j);
328                 }
329                 seq_putc(p, '\n');
330         }
331
332         if (i < NR_IRQS) {
333                 desc = get_irq_desc(i);
334                 spin_lock_irqsave(&desc->lock, flags);
335                 action = desc->action;
336                 if (!action || !action->handler)
337                         goto skip;
338                 seq_printf(p, "%3d: ", i);
339 #ifdef CONFIG_SMP
340                 for (j = 0; j < NR_CPUS; j++) {
341                         if (cpu_online(j))
342                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
343                 }
344 #else
345                 seq_printf(p, "%10u ", kstat_irqs(i));
346 #endif /* CONFIG_SMP */
347                 if (desc->handler)
348                         seq_printf(p, " %s ", desc->handler->typename );
349                 else
350                         seq_printf(p, "  None      ");
351                 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge  ");
352                 seq_printf(p, "    %s",action->name);
353                 for (action=action->next; action; action = action->next)
354                         seq_printf(p, ", %s", action->name);
355                 seq_putc(p, '\n');
356 skip:
357                 spin_unlock_irqrestore(&desc->lock, flags);
358         } else if (i == NR_IRQS)
359                 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
360         return 0;
361 }
362
363 int handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
364 {
365         int status = 0;
366         int ret, retval = 0;
367
368         if (!(action->flags & SA_INTERRUPT))
369                 local_irq_enable();
370
371         do {
372                 ret = action->handler(irq, action->dev_id, regs);
373                 if (ret == IRQ_HANDLED)
374                         status |= action->flags;
375                 retval |= ret;
376                 action = action->next;
377         } while (action);
378         if (status & SA_SAMPLE_RANDOM)
379                 add_interrupt_randomness(irq);
380         local_irq_disable();
381         return retval;
382 }
383
384 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
385 {
386         struct irqaction *action;
387
388         if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
389                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
390                                 irq, action_ret);
391         } else {
392                 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
393         }
394         dump_stack();
395         printk(KERN_ERR "handlers:\n");
396         action = desc->action;
397         do {
398                 printk(KERN_ERR "[<%p>]", action->handler);
399                 print_symbol(" (%s)",
400                         (unsigned long)action->handler);
401                 printk("\n");
402                 action = action->next;
403         } while (action);
404 }
405
406 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
407 {
408         static int count = 100;
409
410         if (count) {
411                 count--;
412                 __report_bad_irq(irq, desc, action_ret);
413         }
414 }
415
416 static int noirqdebug;
417
418 static int __init noirqdebug_setup(char *str)
419 {
420         noirqdebug = 1;
421         printk("IRQ lockup detection disabled\n");
422         return 1;
423 }
424
425 __setup("noirqdebug", noirqdebug_setup);
426
427 /*
428  * If 99,900 of the previous 100,000 interrupts have not been handled then
429  * assume that the IRQ is stuck in some manner.  Drop a diagnostic and try to
430  * turn the IRQ off.
431  *
432  * (The other 100-of-100,000 interrupts may have been a correctly-functioning
433  *  device sharing an IRQ with the failing one)
434  *
435  * Called under desc->lock
436  */
437 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
438 {
439         if (action_ret != IRQ_HANDLED) {
440                 desc->irqs_unhandled++;
441                 if (action_ret != IRQ_NONE)
442                         report_bad_irq(irq, desc, action_ret);
443         }
444
445         desc->irq_count++;
446         if (desc->irq_count < 100000)
447                 return;
448
449         desc->irq_count = 0;
450         if (desc->irqs_unhandled > 99900) {
451                 /*
452                  * The interrupt is stuck
453                  */
454                 __report_bad_irq(irq, desc, action_ret);
455                 /*
456                  * Now kill the IRQ
457                  */
458                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
459                 desc->status |= IRQ_DISABLED;
460                 desc->handler->disable(irq);
461         }
462         desc->irqs_unhandled = 0;
463 }
464
465 /*
466  * Eventually, this should take an array of interrupts and an array size
467  * so it can dispatch multiple interrupts.
468  */
469 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
470 {
471         int status;
472         struct irqaction *action;
473         int cpu = smp_processor_id();
474         irq_desc_t *desc = get_irq_desc(irq);
475         irqreturn_t action_ret;
476 #ifdef CONFIG_IRQSTACKS
477         struct thread_info *curtp, *irqtp;
478 #endif
479
480         kstat_cpu(cpu).irqs[irq]++;
481
482         if (desc->status & IRQ_PER_CPU) {
483                 /* no locking required for CPU-local interrupts: */
484                 ack_irq(irq);
485                 action_ret = handle_irq_event(irq, regs, desc->action);
486                 desc->handler->end(irq);
487                 return;
488         }
489
490         spin_lock(&desc->lock);
491         ack_irq(irq);   
492         /*
493            REPLAY is when Linux resends an IRQ that was dropped earlier
494            WAITING is used by probe to mark irqs that are being tested
495            */
496         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
497         status |= IRQ_PENDING; /* we _want_ to handle it */
498
499         /*
500          * If the IRQ is disabled for whatever reason, we cannot
501          * use the action we have.
502          */
503         action = NULL;
504         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
505                 action = desc->action;
506                 if (!action || !action->handler) {
507                         ppc_spurious_interrupts++;
508                         printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
509                         /* We can't call disable_irq here, it would deadlock */
510                         if (!desc->depth)
511                                 desc->depth = 1;
512                         desc->status |= IRQ_DISABLED;
513                         /* This is not a real spurrious interrupt, we
514                          * have to eoi it, so we jump to out
515                          */
516                         mask_irq(irq);
517                         goto out;
518                 }
519                 status &= ~IRQ_PENDING; /* we commit to handling */
520                 status |= IRQ_INPROGRESS; /* we are handling it */
521         }
522         desc->status = status;
523
524         /*
525          * If there is no IRQ handler or it was disabled, exit early.
526            Since we set PENDING, if another processor is handling
527            a different instance of this same irq, the other processor
528            will take care of it.
529          */
530         if (unlikely(!action))
531                 goto out;
532
533         /*
534          * Edge triggered interrupts need to remember
535          * pending events.
536          * This applies to any hw interrupts that allow a second
537          * instance of the same irq to arrive while we are in do_IRQ
538          * or in the handler. But the code here only handles the _second_
539          * instance of the irq, not the third or fourth. So it is mostly
540          * useful for irq hardware that does not mask cleanly in an
541          * SMP environment.
542          */
543         for (;;) {
544                 spin_unlock(&desc->lock);
545
546 #ifdef CONFIG_IRQSTACKS
547                 /* Switch to the irq stack to handle this */
548                 curtp = current_thread_info();
549                 irqtp = hardirq_ctx[smp_processor_id()];
550                 if (curtp != irqtp) {
551                         irqtp->task = curtp->task;
552                         irqtp->flags = 0;
553                         action_ret = call_handle_irq_event(irq, regs, action, irqtp);
554                         irqtp->task = NULL;
555                         if (irqtp->flags)
556                                 set_bits(irqtp->flags, &curtp->flags);
557                 } else
558 #endif
559                         action_ret = handle_irq_event(irq, regs, action);
560
561                 spin_lock(&desc->lock);
562                 if (!noirqdebug)
563                         note_interrupt(irq, desc, action_ret);
564                 if (likely(!(desc->status & IRQ_PENDING)))
565                         break;
566                 desc->status &= ~IRQ_PENDING;
567         }
568 out:
569         desc->status &= ~IRQ_INPROGRESS;
570         /*
571          * The ->end() handler has to deal with interrupts which got
572          * disabled while the handler was running.
573          */
574         if (desc->handler) {
575                 if (desc->handler->end)
576                         desc->handler->end(irq);
577                 else if (desc->handler->enable)
578                         desc->handler->enable(irq);
579         }
580         spin_unlock(&desc->lock);
581 }
582
583 #ifdef CONFIG_PPC_ISERIES
584 void do_IRQ(struct pt_regs *regs)
585 {
586         struct paca_struct *lpaca;
587         struct ItLpQueue *lpq;
588
589         irq_enter();
590
591 #ifdef CONFIG_DEBUG_STACKOVERFLOW
592         /* Debugging check for stack overflow: is there less than 2KB free? */
593         {
594                 long sp;
595
596                 sp = __get_SP() & (THREAD_SIZE-1);
597
598                 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
599                         printk("do_IRQ: stack overflow: %ld\n",
600                                 sp - sizeof(struct thread_info));
601                         dump_stack();
602                 }
603         }
604 #endif
605
606         lpaca = get_paca();
607 #ifdef CONFIG_SMP
608         if (lpaca->lppaca.xIntDword.xFields.xIpiCnt) {
609                 lpaca->lppaca.xIntDword.xFields.xIpiCnt = 0;
610                 iSeries_smp_message_recv(regs);
611         }
612 #endif /* CONFIG_SMP */
613         lpq = lpaca->lpqueue_ptr;
614         if (lpq && ItLpQueue_isLpIntPending(lpq))
615                 lpevent_count += ItLpQueue_process(lpq, regs);
616
617         irq_exit();
618
619         if (lpaca->lppaca.xIntDword.xFields.xDecrInt) {
620                 lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;
621                 /* Signal a fake decrementer interrupt */
622                 timer_interrupt(regs);
623         }
624 }
625
626 #else   /* CONFIG_PPC_ISERIES */
627
628 void do_IRQ(struct pt_regs *regs)
629 {
630         int irq;
631
632         irq_enter();
633
634 #ifdef CONFIG_DEBUG_STACKOVERFLOW
635         /* Debugging check for stack overflow: is there less than 2KB free? */
636         {
637                 long sp;
638
639                 sp = __get_SP() & (THREAD_SIZE-1);
640
641                 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
642                         printk("do_IRQ: stack overflow: %ld\n",
643                                 sp - sizeof(struct thread_info));
644                         dump_stack();
645                 }
646         }
647 #endif
648
649         irq = ppc_md.get_irq(regs);
650
651         if (irq >= 0)
652                 ppc_irq_dispatch_handler(regs, irq);
653         else
654                 /* That's not SMP safe ... but who cares ? */
655                 ppc_spurious_interrupts++;
656
657         irq_exit();
658 }
659 #endif  /* CONFIG_PPC_ISERIES */
660
661 unsigned long probe_irq_on (void)
662 {
663         return 0;
664 }
665
666 EXPORT_SYMBOL(probe_irq_on);
667
668 int probe_irq_off (unsigned long irqs)
669 {
670         return 0;
671 }
672
673 EXPORT_SYMBOL(probe_irq_off);
674
675 unsigned int probe_irq_mask(unsigned long irqs)
676 {
677         return 0;
678 }
679
680 EXPORT_SYMBOL(probe_irq_mask);
681
682 void __init init_IRQ(void)
683 {
684         static int once = 0;
685
686         if (once)
687                 return;
688
689         once++;
690
691         ppc_md.init_IRQ();
692         irq_ctx_init();
693 }
694
695 static struct proc_dir_entry * root_irq_dir;
696 static struct proc_dir_entry * irq_dir [NR_IRQS];
697 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
698
699 /* Protected by get_irq_desc(irq)->lock. */
700 #ifdef CONFIG_IRQ_ALL_CPUS
701 cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
702 #else  /* CONFIG_IRQ_ALL_CPUS */
703 cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };
704 #endif /* CONFIG_IRQ_ALL_CPUS */
705
706 static int irq_affinity_read_proc (char *page, char **start, off_t off,
707                         int count, int *eof, void *data)
708 {
709         int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
710         if (count - len < 2)
711                 return -EINVAL;
712         len += sprintf(page + len, "\n");
713         return len;
714 }
715
716 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
717                                         unsigned long count, void *data)
718 {
719         unsigned int irq = (long)data;
720         irq_desc_t *desc = get_irq_desc(irq);
721         int ret;
722         cpumask_t new_value, tmp;
723
724         if (!desc->handler->set_affinity)
725                 return -EIO;
726
727         ret = cpumask_parse(buffer, count, new_value);
728         if (ret != 0)
729                 return ret;
730
731         /*
732          * We check for CPU_MASK_ALL in xics to send irqs to all cpus.
733          * In some cases CPU_MASK_ALL is smaller than the cpumask (eg
734          * NR_CPUS == 32 and cpumask is a long), so we mask it here to
735          * be consistent.
736          */
737         cpus_and(new_value, new_value, CPU_MASK_ALL);
738
739         /*
740          * Grab lock here so cpu_online_map can't change, and also
741          * protect irq_affinity[].
742          */
743         spin_lock(&desc->lock);
744
745         /*
746          * Do not allow disabling IRQs completely - it's a too easy
747          * way to make the system unusable accidentally :-) At least
748          * one online CPU still has to be targeted.
749          */
750         cpus_and(tmp, new_value, cpu_online_map);
751         if (cpus_empty(tmp)) {
752                 ret = -EINVAL;
753                 goto out;
754         }
755
756         irq_affinity[irq] = new_value;
757         desc->handler->set_affinity(irq, new_value);
758         ret = count;
759
760 out:
761         spin_unlock(&desc->lock);
762         return ret;
763 }
764
765 #define MAX_NAMELEN 10
766
767 static void register_irq_proc (unsigned int irq)
768 {
769         struct proc_dir_entry *entry;
770         char name [MAX_NAMELEN];
771
772         if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
773                 return;
774
775         memset(name, 0, MAX_NAMELEN);
776         sprintf(name, "%d", irq);
777
778         /* create /proc/irq/1234 */
779         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
780
781         /* create /proc/irq/1234/smp_affinity */
782         entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
783
784         if (entry) {
785                 entry->nlink = 1;
786                 entry->data = (void *)(long)irq;
787                 entry->read_proc = irq_affinity_read_proc;
788                 entry->write_proc = irq_affinity_write_proc;
789         }
790
791         smp_affinity_entry[irq] = entry;
792 }
793
794 void init_irq_proc (void)
795 {
796         int i;
797
798         /* create /proc/irq */
799         root_irq_dir = proc_mkdir("irq", NULL);
800
801         /* create /proc/irq/prof_cpu_mask */
802         create_prof_cpu_mask(root_irq_dir);
803
804         /*
805          * Create entries for all existing IRQs.
806          */
807         for_each_irq(i) {
808                 if (get_irq_desc(i)->handler == NULL)
809                         continue;
810                 register_irq_proc(i);
811         }
812 }
813
814 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
815 {
816         return IRQ_NONE;
817 }
818
819 #ifndef CONFIG_PPC_ISERIES
820 /*
821  * Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
822  */
823
824 #define UNDEFINED_IRQ 0xffffffff
825 unsigned int virt_irq_to_real_map[NR_IRQS];
826
827 /*
828  * Don't use virtual irqs 0, 1, 2 for devices.
829  * The pcnet32 driver considers interrupt numbers < 2 to be invalid,
830  * and 2 is the XICS IPI interrupt.
831  * We limit virtual irqs to 17 less than NR_IRQS so that when we
832  * offset them by 16 (to reserve the first 16 for ISA interrupts)
833  * we don't end up with an interrupt number >= NR_IRQS.
834  */
835 #define MIN_VIRT_IRQ    3
836 #define MAX_VIRT_IRQ    (NR_IRQS - NUM_ISA_INTERRUPTS - 1)
837 #define NR_VIRT_IRQS    (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)
838
839 void
840 virt_irq_init(void)
841 {
842         int i;
843         for (i = 0; i < NR_IRQS; i++)
844                 virt_irq_to_real_map[i] = UNDEFINED_IRQ;
845 }
846
847 /* Create a mapping for a real_irq if it doesn't already exist.
848  * Return the virtual irq as a convenience.
849  */
850 int virt_irq_create_mapping(unsigned int real_irq)
851 {
852         unsigned int virq, first_virq;
853         static int warned;
854
855         if (naca->interrupt_controller == IC_OPEN_PIC)
856                 return real_irq;        /* no mapping for openpic (for now) */
857
858         /* don't map interrupts < MIN_VIRT_IRQ */
859         if (real_irq < MIN_VIRT_IRQ) {
860                 virt_irq_to_real_map[real_irq] = real_irq;
861                 return real_irq;
862         }
863
864         /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */
865         virq = real_irq;
866         if (virq > MAX_VIRT_IRQ)
867                 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
868
869         /* search for this number or a free slot */
870         first_virq = virq;
871         while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {
872                 if (virt_irq_to_real_map[virq] == real_irq)
873                         return virq;
874                 if (++virq > MAX_VIRT_IRQ)
875                         virq = MIN_VIRT_IRQ;
876                 if (virq == first_virq)
877                         goto nospace;   /* oops, no free slots */
878         }
879
880         virt_irq_to_real_map[virq] = real_irq;
881         return virq;
882
883  nospace:
884         if (!warned) {
885                 printk(KERN_CRIT "Interrupt table is full\n");
886                 printk(KERN_CRIT "Increase NR_IRQS (currently %d) "
887                        "in your kernel sources and rebuild.\n", NR_IRQS);
888                 warned = 1;
889         }
890         return NO_IRQ;
891 }
892
893 /*
894  * In most cases will get a hit on the very first slot checked in the
895  * virt_irq_to_real_map.  Only when there are a large number of
896  * IRQs will this be expensive.
897  */
898 unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
899 {
900         unsigned int virq;
901         unsigned int first_virq;
902
903         virq = real_irq;
904
905         if (virq > MAX_VIRT_IRQ)
906                 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;
907
908         first_virq = virq;
909
910         do {
911                 if (virt_irq_to_real_map[virq] == real_irq)
912                         return virq;
913
914                 virq++;
915
916                 if (virq >= MAX_VIRT_IRQ)
917                         virq = 0;
918
919         } while (first_virq != virq);
920
921         return NO_IRQ;
922
923 }
924
925 #endif /* CONFIG_PPC_ISERIES */
926
927 #ifdef CONFIG_IRQSTACKS
928 struct thread_info *softirq_ctx[NR_CPUS];
929 struct thread_info *hardirq_ctx[NR_CPUS];
930
931 void irq_ctx_init(void)
932 {
933         struct thread_info *tp;
934         int i;
935
936         for_each_cpu(i) {
937                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
938                 tp = softirq_ctx[i];
939                 tp->cpu = i;
940                 tp->preempt_count = SOFTIRQ_OFFSET;
941
942                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
943                 tp = hardirq_ctx[i];
944                 tp->cpu = i;
945                 tp->preempt_count = HARDIRQ_OFFSET;
946         }
947 }
948
949 void do_softirq(void)
950 {
951         unsigned long flags;
952         struct thread_info *curtp, *irqtp;
953
954         if (in_interrupt())
955                 return;
956
957         local_irq_save(flags);
958
959         if (local_softirq_pending()) {
960                 curtp = current_thread_info();
961                 irqtp = softirq_ctx[smp_processor_id()];
962                 irqtp->task = curtp->task;
963                 call_do_softirq(irqtp);
964                 irqtp->task = NULL;
965         }
966
967         local_irq_restore(flags);
968 }
969 EXPORT_SYMBOL(do_softirq);
970
971 #endif /* CONFIG_IRQSTACKS */
972