VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / sparc64 / kernel / irq.c
1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2  * irq.c: UltraSparc IRQ handling/init/registry.
3  *
4  * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
6  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
29 #include <asm/irq.h>
30 #include <asm/sbus.h>
31 #include <asm/iommu.h>
32 #include <asm/upa.h>
33 #include <asm/oplib.h>
34 #include <asm/timer.h>
35 #include <asm/smp.h>
36 #include <asm/hardirq.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
41
42 #ifdef CONFIG_SMP
43 static void distribute_irqs(void);
44 #endif
45
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47  * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48  * delivered.  We must translate this into a non-vector IRQ so we can
49  * set the softint on this cpu.
50  *
51  * To make processing these packets efficient and race free we use
52  * an array of irq buckets below.  The interrupt vector handler in
53  * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54  * The IVEC handler does not need to act atomically, the PIL dispatch
55  * code uses CAS to get an atomic snapshot of the list and clear it
56  * at the same time.
57  */
58
59 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
60
61 /* This has to be in the main kernel image, it cannot be
62  * turned into per-cpu data.  The reason is that the main
63  * kernel image is locked into the TLB and this structure
64  * is accessed from the vectored interrupt trap handler.  If
65  * access to this structure takes a TLB miss it could cause
66  * the 5-level sparc v9 trap stack to overflow.
67  */
68 struct irq_work_struct {
69         unsigned int    irq_worklists[16];
70 };
71 struct irq_work_struct __irq_work[NR_CPUS];
72 #define irq_work(__cpu, __pil)  &(__irq_work[(__cpu)].irq_worklists[(__pil)])
73
74 #ifdef CONFIG_PCI
75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76  * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77  * for devices behind busses other than APB on Sabre systems.
78  *
79  * Currently these physical addresses are just config space accesses
80  * to the command register for that device.
81  */
82 unsigned long pci_dma_wsync;
83 unsigned long dma_sync_reg_table[256];
84 unsigned char dma_sync_reg_table_entry = 0;
85 #endif
86
87 /* This is based upon code in the 32-bit Sparc kernel written mostly by
88  * David Redman (djhr@tadpole.co.uk).
89  */
90 #define MAX_STATIC_ALLOC        4
91 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92 static int static_irq_count;
93
94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
95 struct irqaction *irq_action[NR_IRQS+1] = {
96           NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97           NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98 };
99
100 /* This only synchronizes entities which modify IRQ handler
101  * state and some selected user-level spots that want to
102  * read things in the table.  IRQ handler processing orders
103  * its' accesses such that no locking is needed.
104  */
105 static spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
106
107 static void register_irq_proc (unsigned int irq);
108
109 /*
110  * Upper 2b of irqaction->flags holds the ino.
111  * irqaction->mask holds the smp affinity information.
112  */
113 #define put_ino_in_irqaction(action, irq) \
114         action->flags &= 0xffffffffffffUL; \
115         if (__bucket(irq) == &pil0_dummy_bucket) \
116                 action->flags |= 0xdeadUL << 48;  \
117         else \
118                 action->flags |= __irq_ino(irq) << 48;
119 #define get_ino_in_irqaction(action)    (action->flags >> 48)
120
121 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
122 #define get_smpaff_in_irqaction(action)         ((action)->mask)
123
124 int show_interrupts(struct seq_file *p, void *v)
125 {
126         unsigned long flags;
127         int i = *(loff_t *) v;
128         struct irqaction *action;
129 #ifdef CONFIG_SMP
130         int j;
131 #endif
132
133         spin_lock_irqsave(&irq_action_lock, flags);
134         if (i <= NR_IRQS) {
135                 if (!(action = *(i + irq_action)))
136                         goto out_unlock;
137                 seq_printf(p, "%3d: ", i);
138 #ifndef CONFIG_SMP
139                 seq_printf(p, "%10u ", kstat_irqs(i));
140 #else
141                 for (j = 0; j < NR_CPUS; j++) {
142                         if (!cpu_online(j))
143                                 continue;
144                         seq_printf(p, "%10u ",
145                                    kstat_cpu(j).irqs[i]);
146                 }
147 #endif
148                 seq_printf(p, " %s:%lx", action->name,
149                            get_ino_in_irqaction(action));
150                 for (action = action->next; action; action = action->next) {
151                         seq_printf(p, ", %s:%lx", action->name,
152                                    get_ino_in_irqaction(action));
153                 }
154                 seq_putc(p, '\n');
155         }
156 out_unlock:
157         spin_unlock_irqrestore(&irq_action_lock, flags);
158
159         return 0;
160 }
161
162 /* Now these are always passed a true fully specified sun4u INO. */
163 void enable_irq(unsigned int irq)
164 {
165         struct ino_bucket *bucket = __bucket(irq);
166         unsigned long imap;
167         unsigned long tid;
168
169         imap = bucket->imap;
170         if (imap == 0UL)
171                 return;
172
173         preempt_disable();
174
175         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
176                 unsigned long ver;
177
178                 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
179                 if ((ver >> 32) == 0x003e0016) {
180                         /* We set it to our JBUS ID. */
181                         __asm__ __volatile__("ldxa [%%g0] %1, %0"
182                                              : "=r" (tid)
183                                              : "i" (ASI_JBUS_CONFIG));
184                         tid = ((tid & (0x1fUL<<17)) << 9);
185                         tid &= IMAP_TID_JBUS;
186                 } else {
187                         /* We set it to our Safari AID. */
188                         __asm__ __volatile__("ldxa [%%g0] %1, %0"
189                                              : "=r" (tid)
190                                              : "i" (ASI_SAFARI_CONFIG));
191                         tid = ((tid & (0x3ffUL<<17)) << 9);
192                         tid &= IMAP_AID_SAFARI;
193                 }
194         } else if (this_is_starfire == 0) {
195                 /* We set it to our UPA MID. */
196                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
197                                      : "=r" (tid)
198                                      : "i" (ASI_UPA_CONFIG));
199                 tid = ((tid & UPA_CONFIG_MID) << 9);
200                 tid &= IMAP_TID_UPA;
201         } else {
202                 tid = (starfire_translate(imap, smp_processor_id()) << 26);
203                 tid &= IMAP_TID_UPA;
204         }
205
206         /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
207          * of this SYSIO's preconfigured IGN in the SYSIO Control
208          * Register, the hardware just mirrors that value here.
209          * However for Graphics and UPA Slave devices the full
210          * IMAP_INR field can be set by the programmer here.
211          *
212          * Things like FFB can now be handled via the new IRQ mechanism.
213          */
214         upa_writel(tid | IMAP_VALID, imap);
215
216         preempt_enable();
217 }
218
219 /* This now gets passed true ino's as well. */
220 void disable_irq(unsigned int irq)
221 {
222         struct ino_bucket *bucket = __bucket(irq);
223         unsigned long imap;
224
225         imap = bucket->imap;
226         if (imap != 0UL) {
227                 u32 tmp;
228
229                 /* NOTE: We do not want to futz with the IRQ clear registers
230                  *       and move the state to IDLE, the SCSI code does call
231                  *       disable_irq() to assure atomicity in the queue cmd
232                  *       SCSI adapter driver code.  Thus we'd lose interrupts.
233                  */
234                 tmp = upa_readl(imap);
235                 tmp &= ~IMAP_VALID;
236                 upa_writel(tmp, imap);
237         }
238 }
239
240 /* The timer is the one "weird" interrupt which is generated by
241  * the CPU %tick register and not by some normal vectored interrupt
242  * source.  To handle this special case, we use this dummy INO bucket.
243  */
244 static struct ino_bucket pil0_dummy_bucket = {
245         0,      /* irq_chain */
246         0,      /* pil */
247         0,      /* pending */
248         0,      /* flags */
249         0,      /* __unused */
250         NULL,   /* irq_info */
251         0UL,    /* iclr */
252         0UL,    /* imap */
253 };
254
255 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
256 {
257         struct ino_bucket *bucket;
258         int ino;
259
260         if (pil == 0) {
261                 if (iclr != 0UL || imap != 0UL) {
262                         prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
263                                     iclr, imap);
264                         prom_halt();
265                 }
266                 return __irq(&pil0_dummy_bucket);
267         }
268
269         /* RULE: Both must be specified in all other cases. */
270         if (iclr == 0UL || imap == 0UL) {
271                 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
272                             pil, inofixup, iclr, imap);
273                 prom_halt();
274         }
275         
276         ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
277         if (ino > NUM_IVECS) {
278                 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
279                             ino, pil, inofixup, iclr, imap);
280                 prom_halt();
281         }
282
283         /* Ok, looks good, set it up.  Don't touch the irq_chain or
284          * the pending flag.
285          */
286         bucket = &ivector_table[ino];
287         if ((bucket->flags & IBF_ACTIVE) ||
288             (bucket->irq_info != NULL)) {
289                 /* This is a gross fatal error if it happens here. */
290                 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
291                 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
292                             ino, pil, inofixup, iclr, imap);
293                 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
294                             bucket->pil, bucket->iclr, bucket->imap);
295                 prom_printf("IRQ: Cannot continue, halting...\n");
296                 prom_halt();
297         }
298         bucket->imap  = imap;
299         bucket->iclr  = iclr;
300         bucket->pil   = pil;
301         bucket->flags = 0;
302
303         bucket->irq_info = NULL;
304
305         return __irq(bucket);
306 }
307
308 static void atomic_bucket_insert(struct ino_bucket *bucket)
309 {
310         unsigned long pstate;
311         unsigned int *ent;
312
313         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
314         __asm__ __volatile__("wrpr %0, %1, %%pstate"
315                              : : "r" (pstate), "i" (PSTATE_IE));
316         ent = irq_work(smp_processor_id(), bucket->pil);
317         bucket->irq_chain = *ent;
318         *ent = __irq(bucket);
319         __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
320 }
321
322 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323                 unsigned long irqflags, const char *name, void *dev_id)
324 {
325         struct irqaction *action, *tmp = NULL;
326         struct ino_bucket *bucket = __bucket(irq);
327         unsigned long flags;
328         int pending = 0;
329
330         if ((bucket != &pil0_dummy_bucket) &&
331             (bucket < &ivector_table[0] ||
332              bucket >= &ivector_table[NUM_IVECS])) {
333                 unsigned int *caller;
334
335                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336                 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337                        "from %p, irq %08x.\n", caller, irq);
338                 return -EINVAL;
339         }       
340         if (!handler)
341             return -EINVAL;
342
343         if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
344                 /*
345                  * This function might sleep, we want to call it first,
346                  * outside of the atomic block. In SA_STATIC_ALLOC case,
347                  * random driver's kmalloc will fail, but it is safe.
348                  * If already initialized, random driver will not reinit.
349                  * Yes, this might clear the entropy pool if the wrong
350                  * driver is attempted to be loaded, without actually
351                  * installing a new handler, but is this really a problem,
352                  * only the sysadmin is able to do this.
353                  */
354                 rand_initialize_irq(irq);
355         }
356
357         spin_lock_irqsave(&irq_action_lock, flags);
358
359         action = *(bucket->pil + irq_action);
360         if (action) {
361                 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
362                         for (tmp = action; tmp->next; tmp = tmp->next)
363                                 ;
364                 else {
365                         spin_unlock_irqrestore(&irq_action_lock, flags);
366                         return -EBUSY;
367                 }
368                 action = NULL;          /* Or else! */
369         }
370
371         /* If this is flagged as statically allocated then we use our
372          * private struct which is never freed.
373          */
374         if (irqflags & SA_STATIC_ALLOC) {
375             if (static_irq_count < MAX_STATIC_ALLOC)
376                 action = &static_irqaction[static_irq_count++];
377             else
378                 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379                        "using kmalloc\n", irq, name);
380         }       
381         if (action == NULL)
382             action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
383                                                  GFP_ATOMIC);
384         
385         if (!action) { 
386                 spin_unlock_irqrestore(&irq_action_lock, flags);
387                 return -ENOMEM;
388         }
389
390         if (bucket == &pil0_dummy_bucket) {
391                 bucket->irq_info = action;
392                 bucket->flags |= IBF_ACTIVE;
393         } else {
394                 if ((bucket->flags & IBF_ACTIVE) != 0) {
395                         void *orig = bucket->irq_info;
396                         void **vector = NULL;
397
398                         if ((bucket->flags & IBF_PCI) == 0) {
399                                 printk("IRQ: Trying to share non-PCI bucket.\n");
400                                 goto free_and_ebusy;
401                         }
402                         if ((bucket->flags & IBF_MULTI) == 0) {
403                                 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
404                                 if (vector == NULL)
405                                         goto free_and_enomem;
406
407                                 /* We might have slept. */
408                                 if ((bucket->flags & IBF_MULTI) != 0) {
409                                         int ent;
410
411                                         kfree(vector);
412                                         vector = (void **)bucket->irq_info;
413                                         for(ent = 0; ent < 4; ent++) {
414                                                 if (vector[ent] == NULL) {
415                                                         vector[ent] = action;
416                                                         break;
417                                                 }
418                                         }
419                                         if (ent == 4)
420                                                 goto free_and_ebusy;
421                                 } else {
422                                         vector[0] = orig;
423                                         vector[1] = action;
424                                         vector[2] = NULL;
425                                         vector[3] = NULL;
426                                         bucket->irq_info = vector;
427                                         bucket->flags |= IBF_MULTI;
428                                 }
429                         } else {
430                                 int ent;
431
432                                 vector = (void **)orig;
433                                 for (ent = 0; ent < 4; ent++) {
434                                         if (vector[ent] == NULL) {
435                                                 vector[ent] = action;
436                                                 break;
437                                         }
438                                 }
439                                 if (ent == 4)
440                                         goto free_and_ebusy;
441                         }
442                 } else {
443                         bucket->irq_info = action;
444                         bucket->flags |= IBF_ACTIVE;
445                 }
446                 pending = bucket->pending;
447                 if (pending)
448                         bucket->pending = 0;
449         }
450
451         action->handler = handler;
452         action->flags = irqflags;
453         action->name = name;
454         action->next = NULL;
455         action->dev_id = dev_id;
456         put_ino_in_irqaction(action, irq);
457         put_smpaff_in_irqaction(action, CPU_MASK_NONE);
458
459         if (tmp)
460                 tmp->next = action;
461         else
462                 *(bucket->pil + irq_action) = action;
463
464         enable_irq(irq);
465
466         /* We ate the IVEC already, this makes sure it does not get lost. */
467         if (pending) {
468                 atomic_bucket_insert(bucket);
469                 set_softint(1 << bucket->pil);
470         }
471         spin_unlock_irqrestore(&irq_action_lock, flags);
472         if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
473                 register_irq_proc(__irq_ino(irq));
474
475 #ifdef CONFIG_SMP
476         distribute_irqs();
477 #endif
478         return 0;
479
480 free_and_ebusy:
481         kfree(action);
482         spin_unlock_irqrestore(&irq_action_lock, flags);
483         return -EBUSY;
484
485 free_and_enomem:
486         kfree(action);
487         spin_unlock_irqrestore(&irq_action_lock, flags);
488         return -ENOMEM;
489 }
490
491 EXPORT_SYMBOL(request_irq);
492
493 void free_irq(unsigned int irq, void *dev_id)
494 {
495         struct irqaction *action;
496         struct irqaction *tmp = NULL;
497         unsigned long flags;
498         struct ino_bucket *bucket = __bucket(irq), *bp;
499
500         if ((bucket != &pil0_dummy_bucket) &&
501             (bucket < &ivector_table[0] ||
502              bucket >= &ivector_table[NUM_IVECS])) {
503                 unsigned int *caller;
504
505                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
506                 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
507                        "from %p, irq %08x.\n", caller, irq);
508                 return;
509         }
510         
511         spin_lock_irqsave(&irq_action_lock, flags);
512
513         action = *(bucket->pil + irq_action);
514         if (!action->handler) {
515                 printk("Freeing free IRQ %d\n", bucket->pil);
516                 return;
517         }
518         if (dev_id) {
519                 for ( ; action; action = action->next) {
520                         if (action->dev_id == dev_id)
521                                 break;
522                         tmp = action;
523                 }
524                 if (!action) {
525                         printk("Trying to free free shared IRQ %d\n", bucket->pil);
526                         spin_unlock_irqrestore(&irq_action_lock, flags);
527                         return;
528                 }
529         } else if (action->flags & SA_SHIRQ) {
530                 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531                 spin_unlock_irqrestore(&irq_action_lock, flags);
532                 return;
533         }
534
535         if (action->flags & SA_STATIC_ALLOC) {
536                 printk("Attempt to free statically allocated IRQ %d (%s)\n",
537                        bucket->pil, action->name);
538                 spin_unlock_irqrestore(&irq_action_lock, flags);
539                 return;
540         }
541
542         if (action && tmp)
543                 tmp->next = action->next;
544         else
545                 *(bucket->pil + irq_action) = action->next;
546
547         spin_unlock_irqrestore(&irq_action_lock, flags);
548
549         synchronize_irq(irq);
550
551         spin_lock_irqsave(&irq_action_lock, flags);
552
553         if (bucket != &pil0_dummy_bucket) {
554                 unsigned long imap = bucket->imap;
555                 void **vector, *orig;
556                 int ent;
557
558                 orig = bucket->irq_info;
559                 vector = (void **)orig;
560
561                 if ((bucket->flags & IBF_MULTI) != 0) {
562                         int other = 0;
563                         void *orphan = NULL;
564                         for (ent = 0; ent < 4; ent++) {
565                                 if (vector[ent] == action)
566                                         vector[ent] = NULL;
567                                 else if (vector[ent] != NULL) {
568                                         orphan = vector[ent];
569                                         other++;
570                                 }
571                         }
572
573                         /* Only free when no other shared irq
574                          * uses this bucket.
575                          */
576                         if (other) {
577                                 if (other == 1) {
578                                         /* Convert back to non-shared bucket. */
579                                         bucket->irq_info = orphan;
580                                         bucket->flags &= ~(IBF_MULTI);
581                                         kfree(vector);
582                                 }
583                                 goto out;
584                         }
585                 } else {
586                         bucket->irq_info = NULL;
587                 }
588
589                 /* This unique interrupt source is now inactive. */
590                 bucket->flags &= ~IBF_ACTIVE;
591
592                 /* See if any other buckets share this bucket's IMAP
593                  * and are still active.
594                  */
595                 for (ent = 0; ent < NUM_IVECS; ent++) {
596                         bp = &ivector_table[ent];
597                         if (bp != bucket        &&
598                             bp->imap == imap    &&
599                             (bp->flags & IBF_ACTIVE) != 0)
600                                 break;
601                 }
602
603                 /* Only disable when no other sub-irq levels of
604                  * the same IMAP are active.
605                  */
606                 if (ent == NUM_IVECS)
607                         disable_irq(irq);
608         }
609
610 out:
611         kfree(action);
612         spin_unlock_irqrestore(&irq_action_lock, flags);
613 }
614
615 EXPORT_SYMBOL(free_irq);
616
617 #ifdef CONFIG_SMP
618 void synchronize_irq(unsigned int irq)
619 {
620         struct ino_bucket *bucket = __bucket(irq);
621
622 #if 0
623         /* The following is how I wish I could implement this.
624          * Unfortunately the ICLR registers are read-only, you can
625          * only write ICLR_foo values to them.  To get the current
626          * IRQ status you would need to get at the IRQ diag registers
627          * in the PCI/SBUS controller and the layout of those vary
628          * from one controller to the next, sigh... -DaveM
629          */
630         unsigned long iclr = bucket->iclr;
631
632         while (1) {
633                 u32 tmp = upa_readl(iclr);
634                 
635                 if (tmp == ICLR_TRANSMIT ||
636                     tmp == ICLR_PENDING) {
637                         cpu_relax();
638                         continue;
639                 }
640                 break;
641         }
642 #else
643         /* So we have to do this with a INPROGRESS bit just like x86.  */
644         while (bucket->flags & IBF_INPROGRESS)
645                 cpu_relax();
646 #endif
647 }
648 #endif /* CONFIG_SMP */
649
650 void catch_disabled_ivec(struct pt_regs *regs)
651 {
652         int cpu = smp_processor_id();
653         struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
654
655         /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656          * to other devices.  Here a single IMAP enabled potentially multiple
657          * unique interrupt sources (which each do have a unique ICLR register.
658          *
659          * So what we do is just register that the IVEC arrived, when registered
660          * for real the request_irq() code will check the bit and signal
661          * a local CPU interrupt for it.
662          */
663 #if 0
664         printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665                bucket - &ivector_table[0], regs->tpc);
666 #endif
667         *irq_work(cpu, 0) = 0;
668         bucket->pending = 1;
669 }
670
671 /* Tune this... */
672 #define FORWARD_VOLUME          12
673
674 #ifdef CONFIG_SMP
675
676 static inline void redirect_intr(int cpu, struct ino_bucket *bp)
677 {
678         /* Ok, here is what is going on:
679          * 1) Retargeting IRQs on Starfire is very
680          *    expensive so just forget about it on them.
681          * 2) Moving around very high priority interrupts
682          *    is a losing game.
683          * 3) If the current cpu is idle, interrupts are
684          *    useful work, so keep them here.  But do not
685          *    pass to our neighbour if he is not very idle.
686          * 4) If sysadmin explicitly asks for directed intrs,
687          *    Just Do It.
688          */
689         struct irqaction *ap = bp->irq_info;
690         cpumask_t cpu_mask;
691         unsigned int buddy, ticks;
692
693         cpu_mask = get_smpaff_in_irqaction(ap);
694         cpus_and(cpu_mask, cpu_mask, cpu_online_map);
695         if (cpus_empty(cpu_mask))
696                 cpu_mask = cpu_online_map;
697
698         if (this_is_starfire != 0 ||
699             bp->pil >= 10 || current->pid == 0)
700                 goto out;
701
702         /* 'cpu' is the MID (ie. UPAID), calculate the MID
703          * of our buddy.
704          */
705         buddy = cpu + 1;
706         if (buddy >= NR_CPUS)
707                 buddy = 0;
708
709         ticks = 0;
710         while (!cpu_isset(buddy, cpu_mask)) {
711                 if (++buddy >= NR_CPUS)
712                         buddy = 0;
713                 if (++ticks > NR_CPUS) {
714                         put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
715                         goto out;
716                 }
717         }
718
719         if (buddy == cpu)
720                 goto out;
721
722         /* Voo-doo programming. */
723         if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
724                 goto out;
725
726         /* This just so happens to be correct on Cheetah
727          * at the moment.
728          */
729         buddy <<= 26;
730
731         /* Push it to our buddy. */
732         upa_writel(buddy | IMAP_VALID, bp->imap);
733
734 out:
735         return;
736 }
737
738 #endif
739
740 void handler_irq(int irq, struct pt_regs *regs)
741 {
742         struct ino_bucket *bp, *nbp;
743         int cpu = smp_processor_id();
744
745 #ifndef CONFIG_SMP
746         /*
747          * Check for TICK_INT on level 14 softint.
748          */
749         {
750                 unsigned long clr_mask = 1 << irq;
751                 unsigned long tick_mask = tick_ops->softint_mask;
752
753                 if ((irq == 14) && (get_softint() & tick_mask)) {
754                         irq = 0;
755                         clr_mask = tick_mask;
756                 }
757                 clear_softint(clr_mask);
758         }
759 #else
760         int should_forward = 1;
761
762         clear_softint(1 << irq);
763 #endif
764
765         irq_enter();
766         kstat_this_cpu.irqs[irq]++;
767
768         /* Sliiiick... */
769 #ifndef CONFIG_SMP
770         bp = ((irq != 0) ?
771               __bucket(xchg32(irq_work(cpu, irq), 0)) :
772               &pil0_dummy_bucket);
773 #else
774         bp = __bucket(xchg32(irq_work(cpu, irq), 0));
775 #endif
776         for ( ; bp != NULL; bp = nbp) {
777                 unsigned char flags = bp->flags;
778                 unsigned char random = 0;
779
780                 nbp = __bucket(bp->irq_chain);
781                 bp->irq_chain = 0;
782
783                 bp->flags |= IBF_INPROGRESS;
784
785                 if ((flags & IBF_ACTIVE) != 0) {
786 #ifdef CONFIG_PCI
787                         if ((flags & IBF_DMA_SYNC) != 0) {
788                                 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
789                                 upa_readq(pci_dma_wsync);
790                         }
791 #endif
792                         if ((flags & IBF_MULTI) == 0) {
793                                 struct irqaction *ap = bp->irq_info;
794                                 ap->handler(__irq(bp), ap->dev_id, regs);
795                                 random |= ap->flags & SA_SAMPLE_RANDOM;
796                         } else {
797                                 void **vector = (void **)bp->irq_info;
798                                 int ent;
799                                 for (ent = 0; ent < 4; ent++) {
800                                         struct irqaction *ap = vector[ent];
801                                         if (ap != NULL) {
802                                                 ap->handler(__irq(bp), ap->dev_id, regs);
803                                                 random |= ap->flags & SA_SAMPLE_RANDOM;
804                                         }
805                                 }
806                         }
807                         /* Only the dummy bucket lacks IMAP/ICLR. */
808                         if (bp->pil != 0) {
809 #ifdef CONFIG_SMP
810                                 if (should_forward) {
811                                         redirect_intr(cpu, bp);
812                                         should_forward = 0;
813                                 }
814 #endif
815                                 upa_writel(ICLR_IDLE, bp->iclr);
816                                 /* Test and add entropy */
817                                 if (random)
818                                         add_interrupt_randomness(irq);
819                         }
820                 } else
821                         bp->pending = 1;
822
823                 bp->flags &= ~IBF_INPROGRESS;
824         }
825         irq_exit();
826 }
827
828 #ifdef CONFIG_BLK_DEV_FD
829 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
830
831 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
832 {
833         struct irqaction *action = *(irq + irq_action);
834         struct ino_bucket *bucket;
835         int cpu = smp_processor_id();
836
837         irq_enter();
838         kstat_this_cpu.irqs[irq]++;
839
840         *(irq_work(cpu, irq)) = 0;
841         bucket = get_ino_in_irqaction(action) + ivector_table;
842
843         bucket->flags |= IBF_INPROGRESS;
844
845         floppy_interrupt(irq, dev_cookie, regs);
846         upa_writel(ICLR_IDLE, bucket->iclr);
847
848         bucket->flags &= ~IBF_INPROGRESS;
849
850         irq_exit();
851 }
852 #endif
853
854 /* The following assumes that the branch lies before the place we
855  * are branching to.  This is the case for a trap vector...
856  * You have been warned.
857  */
858 #define SPARC_BRANCH(dest_addr, inst_addr) \
859           (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
860
861 #define SPARC_NOP (0x01000000)
862
863 static void install_fast_irq(unsigned int cpu_irq,
864                              irqreturn_t (*handler)(int, void *, struct pt_regs *))
865 {
866         extern unsigned long sparc64_ttable_tl0;
867         unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
868         unsigned int *insns;
869
870         ttent += 0x820;
871         ttent += (cpu_irq - 1) << 5;
872         insns = (unsigned int *) ttent;
873         insns[0] = SPARC_BRANCH(((unsigned long) handler),
874                                 ((unsigned long)&insns[0]));
875         insns[1] = SPARC_NOP;
876         __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
877 }
878
879 int request_fast_irq(unsigned int irq,
880                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
881                      unsigned long irqflags, const char *name, void *dev_id)
882 {
883         struct irqaction *action;
884         struct ino_bucket *bucket = __bucket(irq);
885         unsigned long flags;
886
887         /* No pil0 dummy buckets allowed here. */
888         if (bucket < &ivector_table[0] ||
889             bucket >= &ivector_table[NUM_IVECS]) {
890                 unsigned int *caller;
891
892                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
893                 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
894                        "from %p, irq %08x.\n", caller, irq);
895                 return -EINVAL;
896         }       
897         
898         if (!handler)
899                 return -EINVAL;
900
901         if ((bucket->pil == 0) || (bucket->pil == 14)) {
902                 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
903                 return -EBUSY;
904         }
905
906         spin_lock_irqsave(&irq_action_lock, flags);
907
908         action = *(bucket->pil + irq_action);
909         if (action) {
910                 if (action->flags & SA_SHIRQ)
911                         panic("Trying to register fast irq when already shared.\n");
912                 if (irqflags & SA_SHIRQ)
913                         panic("Trying to register fast irq as shared.\n");
914                 printk("request_fast_irq: Trying to register yet already owned.\n");
915                 spin_unlock_irqrestore(&irq_action_lock, flags);
916                 return -EBUSY;
917         }
918
919         /*
920          * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
921          * support smp intr affinity in this path.
922          */
923         if (irqflags & SA_STATIC_ALLOC) {
924                 if (static_irq_count < MAX_STATIC_ALLOC)
925                         action = &static_irqaction[static_irq_count++];
926                 else
927                         printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
928                                "using kmalloc\n", bucket->pil, name);
929         }
930         if (action == NULL)
931                 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
932                                                      GFP_ATOMIC);
933         if (!action) {
934                 spin_unlock_irqrestore(&irq_action_lock, flags);
935                 return -ENOMEM;
936         }
937         install_fast_irq(bucket->pil, handler);
938
939         bucket->irq_info = action;
940         bucket->flags |= IBF_ACTIVE;
941
942         action->handler = handler;
943         action->flags = irqflags;
944         action->dev_id = NULL;
945         action->name = name;
946         action->next = NULL;
947         put_ino_in_irqaction(action, irq);
948         put_smpaff_in_irqaction(action, CPU_MASK_NONE);
949
950         *(bucket->pil + irq_action) = action;
951         enable_irq(irq);
952
953         spin_unlock_irqrestore(&irq_action_lock, flags);
954
955 #ifdef CONFIG_SMP
956         distribute_irqs();
957 #endif
958         return 0;
959 }
960
961 /* We really don't need these at all on the Sparc.  We only have
962  * stubs here because they are exported to modules.
963  */
964 unsigned long probe_irq_on(void)
965 {
966         return 0;
967 }
968
969 EXPORT_SYMBOL(probe_irq_on);
970
971 int probe_irq_off(unsigned long mask)
972 {
973         return 0;
974 }
975
976 EXPORT_SYMBOL(probe_irq_off);
977
978 #ifdef CONFIG_SMP
979 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
980 {
981         struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
982         unsigned long imap = bucket->imap;
983         unsigned int tid;
984
985         while (!cpu_online(goal_cpu)) {
986                 if (++goal_cpu >= NR_CPUS)
987                         goal_cpu = 0;
988         }
989
990         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
991                 tid = goal_cpu << 26;
992                 tid &= IMAP_AID_SAFARI;
993         } else if (this_is_starfire == 0) {
994                 tid = goal_cpu << 26;
995                 tid &= IMAP_TID_UPA;
996         } else {
997                 tid = (starfire_translate(imap, goal_cpu) << 26);
998                 tid &= IMAP_TID_UPA;
999         }
1000         upa_writel(tid | IMAP_VALID, imap);
1001
1002         while (!cpu_online(goal_cpu)) {
1003                 if (++goal_cpu >= NR_CPUS)
1004                         goal_cpu = 0;
1005         }
1006
1007         return goal_cpu;
1008 }
1009
1010 /* Called from request_irq. */
1011 static void distribute_irqs(void)
1012 {
1013         unsigned long flags;
1014         int cpu, level;
1015
1016         spin_lock_irqsave(&irq_action_lock, flags);
1017         cpu = 0;
1018
1019         /*
1020          * Skip the timer at [0], and very rare error/power intrs at [15].
1021          * Also level [12], it causes problems on Ex000 systems.
1022          */
1023         for (level = 1; level < NR_IRQS; level++) {
1024                 struct irqaction *p = irq_action[level];
1025                 if (level == 12) continue;
1026                 while(p) {
1027                         cpu = retarget_one_irq(p, cpu);
1028                         p = p->next;
1029                 }
1030         }
1031         spin_unlock_irqrestore(&irq_action_lock, flags);
1032 }
1033 #endif
1034
1035
1036 struct sun5_timer *prom_timers;
1037 static u64 prom_limit0, prom_limit1;
1038
1039 static void map_prom_timers(void)
1040 {
1041         unsigned int addr[3];
1042         int tnode, err;
1043
1044         /* PROM timer node hangs out in the top level of device siblings... */
1045         tnode = prom_finddevice("/counter-timer");
1046
1047         /* Assume if node is not present, PROM uses different tick mechanism
1048          * which we should not care about.
1049          */
1050         if (tnode == 0 || tnode == -1) {
1051                 prom_timers = (struct sun5_timer *) 0;
1052                 return;
1053         }
1054
1055         /* If PROM is really using this, it must be mapped by him. */
1056         err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1057         if (err == -1) {
1058                 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1059                 prom_timers = (struct sun5_timer *) 0;
1060                 return;
1061         }
1062         prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1063 }
1064
1065 static void kill_prom_timer(void)
1066 {
1067         if (!prom_timers)
1068                 return;
1069
1070         /* Save them away for later. */
1071         prom_limit0 = prom_timers->limit0;
1072         prom_limit1 = prom_timers->limit1;
1073
1074         /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1075          * We turn both off here just to be paranoid.
1076          */
1077         prom_timers->limit0 = 0;
1078         prom_timers->limit1 = 0;
1079
1080         /* Wheee, eat the interrupt packet too... */
1081         __asm__ __volatile__(
1082 "       mov     0x40, %%g2\n"
1083 "       ldxa    [%%g0] %0, %%g1\n"
1084 "       ldxa    [%%g2] %1, %%g1\n"
1085 "       stxa    %%g0, [%%g0] %0\n"
1086 "       membar  #Sync\n"
1087         : /* no outputs */
1088         : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1089         : "g1", "g2");
1090 }
1091
1092 void enable_prom_timer(void)
1093 {
1094         if (!prom_timers)
1095                 return;
1096
1097         /* Set it to whatever was there before. */
1098         prom_timers->limit1 = prom_limit1;
1099         prom_timers->count1 = 0;
1100         prom_timers->limit0 = prom_limit0;
1101         prom_timers->count0 = 0;
1102 }
1103
1104 void init_irqwork_curcpu(void)
1105 {
1106         register struct irq_work_struct *workp asm("o2");
1107         unsigned long tmp;
1108         int cpu = hard_smp_processor_id();
1109
1110         memset(__irq_work + cpu, 0, sizeof(*workp));
1111
1112         /* Make sure we are called with PSTATE_IE disabled.  */
1113         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1114                              : "=r" (tmp));
1115         if (tmp & PSTATE_IE) {
1116                 prom_printf("BUG: init_irqwork_curcpu() called with "
1117                             "PSTATE_IE enabled, bailing.\n");
1118                 __asm__ __volatile__("mov       %%i7, %0\n\t"
1119                                      : "=r" (tmp));
1120                 prom_printf("BUG: Called from %lx\n", tmp);
1121                 prom_halt();
1122         }
1123
1124         /* Set interrupt globals.  */
1125         workp = &__irq_work[cpu];
1126         __asm__ __volatile__(
1127         "rdpr   %%pstate, %0\n\t"
1128         "wrpr   %0, %1, %%pstate\n\t"
1129         "mov    %2, %%g6\n\t"
1130         "wrpr   %0, 0x0, %%pstate\n\t"
1131         : "=&r" (tmp)
1132         : "i" (PSTATE_IG), "r" (workp));
1133 }
1134
1135 /* Only invoked on boot processor. */
1136 void __init init_IRQ(void)
1137 {
1138         map_prom_timers();
1139         kill_prom_timer();
1140         memset(&ivector_table[0], 0, sizeof(ivector_table));
1141
1142         /* We need to clear any IRQ's pending in the soft interrupt
1143          * registers, a spurious one could be left around from the
1144          * PROM timer which we just disabled.
1145          */
1146         clear_softint(get_softint());
1147
1148         /* Now that ivector table is initialized, it is safe
1149          * to receive IRQ vector traps.  We will normally take
1150          * one or two right now, in case some device PROM used
1151          * to boot us wants to speak to us.  We just ignore them.
1152          */
1153         __asm__ __volatile__("rdpr      %%pstate, %%g1\n\t"
1154                              "or        %%g1, %0, %%g1\n\t"
1155                              "wrpr      %%g1, 0x0, %%pstate"
1156                              : /* No outputs */
1157                              : "i" (PSTATE_IE)
1158                              : "g1");
1159 }
1160
1161 static struct proc_dir_entry * root_irq_dir;
1162 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1163
1164 #ifdef CONFIG_SMP
1165
1166 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1167                         int count, int *eof, void *data)
1168 {
1169         struct ino_bucket *bp = ivector_table + (long)data;
1170         struct irqaction *ap = bp->irq_info;
1171         cpumask_t mask;
1172         int len;
1173
1174         mask = get_smpaff_in_irqaction(ap);
1175         if (cpus_empty(mask))
1176                 mask = cpu_online_map;
1177
1178         len = cpumask_scnprintf(page, count, mask);
1179         if (count - len < 2)
1180                 return -EINVAL;
1181         len += sprintf(page + len, "\n");
1182         return len;
1183 }
1184
1185 static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1186 {
1187         struct ino_bucket *bp = ivector_table + irq;
1188
1189         /* Users specify affinity in terms of hw cpu ids.
1190          * As soon as we do this, handler_irq() might see and take action.
1191          */
1192         put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1193
1194         /* Migration is simply done by the next cpu to service this
1195          * interrupt.
1196          */
1197 }
1198
1199 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1200                                         unsigned long count, void *data)
1201 {
1202         int irq = (long) data, full_count = count, err;
1203         cpumask_t new_value;
1204
1205         err = cpumask_parse(buffer, count, new_value);
1206
1207         /*
1208          * Do not allow disabling IRQs completely - it's a too easy
1209          * way to make the system unusable accidentally :-) At least
1210          * one online CPU still has to be targeted.
1211          */
1212         cpus_and(new_value, new_value, cpu_online_map);
1213         if (cpus_empty(new_value))
1214                 return -EINVAL;
1215
1216         set_intr_affinity(irq, new_value);
1217
1218         return full_count;
1219 }
1220
1221 #endif
1222
1223 #define MAX_NAMELEN 10
1224
1225 static void register_irq_proc (unsigned int irq)
1226 {
1227         char name [MAX_NAMELEN];
1228
1229         if (!root_irq_dir || irq_dir[irq])
1230                 return;
1231
1232         memset(name, 0, MAX_NAMELEN);
1233         sprintf(name, "%x", irq);
1234
1235         /* create /proc/irq/1234 */
1236         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1237
1238 #ifdef CONFIG_SMP
1239         /* XXX SMP affinity not supported on starfire yet. */
1240         if (this_is_starfire == 0) {
1241                 struct proc_dir_entry *entry;
1242
1243                 /* create /proc/irq/1234/smp_affinity */
1244                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1245
1246                 if (entry) {
1247                         entry->nlink = 1;
1248                         entry->data = (void *)(long)irq;
1249                         entry->read_proc = irq_affinity_read_proc;
1250                         entry->write_proc = irq_affinity_write_proc;
1251                 }
1252         }
1253 #endif
1254 }
1255
1256 void init_irq_proc (void)
1257 {
1258         /* create /proc/irq */
1259         root_irq_dir = proc_mkdir("irq", NULL);
1260 }
1261