ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / arch / sparc64 / kernel / irq.c
1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2  * irq.c: UltraSparc IRQ handling/init/registry.
3  *
4  * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu)
5  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
6  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
16 #include <linux/mm.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
24
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
29 #include <asm/irq.h>
30 #include <asm/sbus.h>
31 #include <asm/iommu.h>
32 #include <asm/upa.h>
33 #include <asm/oplib.h>
34 #include <asm/timer.h>
35 #include <asm/smp.h>
36 #include <asm/hardirq.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
41
42 #ifdef CONFIG_SMP
43 static void distribute_irqs(void);
44 #endif
45
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47  * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48  * delivered.  We must translate this into a non-vector IRQ so we can
49  * set the softint on this cpu.
50  *
51  * To make processing these packets efficient and race free we use
52  * an array of irq buckets below.  The interrupt vector handler in
53  * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54  * The IVEC handler does not need to act atomically, the PIL dispatch
55  * code uses CAS to get an atomic snapshot of the list and clear it
56  * at the same time.
57  */
58
59 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
60
61 /* This has to be in the main kernel image, it cannot be
62  * turned into per-cpu data.  The reason is that the main
63  * kernel image is locked into the TLB and this structure
64  * is accessed from the vectored interrupt trap handler.  If
65  * access to this structure takes a TLB miss it could cause
66  * the 5-level sparc v9 trap stack to overflow.
67  */
68 struct irq_work_struct {
69         unsigned int    irq_worklists[16];
70 };
71 struct irq_work_struct __irq_work[NR_CPUS];
72 #define irq_work(__cpu, __pil)  &(__irq_work[(__cpu)].irq_worklists[(__pil)])
73
74 #ifdef CONFIG_PCI
75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76  * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77  * for devices behind busses other than APB on Sabre systems.
78  *
79  * Currently these physical addresses are just config space accesses
80  * to the command register for that device.
81  */
82 unsigned long pci_dma_wsync;
83 unsigned long dma_sync_reg_table[256];
84 unsigned char dma_sync_reg_table_entry = 0;
85 #endif
86
87 /* This is based upon code in the 32-bit Sparc kernel written mostly by
88  * David Redman (djhr@tadpole.co.uk).
89  */
90 #define MAX_STATIC_ALLOC        4
91 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92 static int static_irq_count;
93
94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
95 struct irqaction *irq_action[NR_IRQS+1] = {
96           NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97           NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98 };
99
100 /* This only synchronizes entities which modify IRQ handler
101  * state and some selected user-level spots that want to
102  * read things in the table.  IRQ handler processing orders
103  * its' accesses such that no locking is needed.
104  */
105 static spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
106
107 static void register_irq_proc (unsigned int irq);
108
109 /*
110  * Upper 2b of irqaction->flags holds the ino.
111  * irqaction->mask holds the smp affinity information.
112  */
113 #define put_ino_in_irqaction(action, irq) \
114         action->flags &= 0xffffffffffffUL; \
115         if (__bucket(irq) == &pil0_dummy_bucket) \
116                 action->flags |= 0xdeadUL << 48;  \
117         else \
118                 action->flags |= __irq_ino(irq) << 48;
119 #define get_ino_in_irqaction(action)    (action->flags >> 48)
120
121 #if NR_CPUS > 64
122 #error irqaction embedded smp affinity does not work with > 64 cpus, FIXME
123 #endif
124
125 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
126 #define get_smpaff_in_irqaction(action)         ((action)->mask)
127
128 int show_interrupts(struct seq_file *p, void *v)
129 {
130         unsigned long flags;
131         int i = *(loff_t *) v;
132         struct irqaction *action;
133 #ifdef CONFIG_SMP
134         int j;
135 #endif
136
137         spin_lock_irqsave(&irq_action_lock, flags);
138         if (i <= NR_IRQS) {
139                 if (!(action = *(i + irq_action)))
140                         goto out_unlock;
141                 seq_printf(p, "%3d: ", i);
142 #ifndef CONFIG_SMP
143                 seq_printf(p, "%10u ", kstat_irqs(i));
144 #else
145                 for (j = 0; j < NR_CPUS; j++) {
146                         if (!cpu_online(j))
147                                 continue;
148                         seq_printf(p, "%10u ",
149                                    kstat_cpu(j).irqs[i]);
150                 }
151 #endif
152                 seq_printf(p, " %s:%lx", action->name,
153                            get_ino_in_irqaction(action));
154                 for (action = action->next; action; action = action->next) {
155                         seq_printf(p, ", %s:%lx", action->name,
156                                    get_ino_in_irqaction(action));
157                 }
158                 seq_putc(p, '\n');
159         }
160 out_unlock:
161         spin_unlock_irqrestore(&irq_action_lock, flags);
162
163         return 0;
164 }
165
166 /* Now these are always passed a true fully specified sun4u INO. */
167 void enable_irq(unsigned int irq)
168 {
169         struct ino_bucket *bucket = __bucket(irq);
170         unsigned long imap;
171         unsigned long tid;
172
173         imap = bucket->imap;
174         if (imap == 0UL)
175                 return;
176
177         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
178                 unsigned long ver;
179
180                 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
181                 if ((ver >> 32) == 0x003e0016) {
182                         /* We set it to our JBUS ID. */
183                         __asm__ __volatile__("ldxa [%%g0] %1, %0"
184                                              : "=r" (tid)
185                                              : "i" (ASI_JBUS_CONFIG));
186                         tid = ((tid & (0x1fUL<<17)) << 9);
187                         tid &= IMAP_TID_JBUS;
188                 } else {
189                         /* We set it to our Safari AID. */
190                         __asm__ __volatile__("ldxa [%%g0] %1, %0"
191                                              : "=r" (tid)
192                                              : "i" (ASI_SAFARI_CONFIG));
193                         tid = ((tid & (0x3ffUL<<17)) << 9);
194                         tid &= IMAP_AID_SAFARI;
195                 }
196         } else if (this_is_starfire == 0) {
197                 /* We set it to our UPA MID. */
198                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
199                                      : "=r" (tid)
200                                      : "i" (ASI_UPA_CONFIG));
201                 tid = ((tid & UPA_CONFIG_MID) << 9);
202                 tid &= IMAP_TID_UPA;
203         } else {
204                 tid = (starfire_translate(imap, smp_processor_id()) << 26);
205                 tid &= IMAP_TID_UPA;
206         }
207
208         /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
209          * of this SYSIO's preconfigured IGN in the SYSIO Control
210          * Register, the hardware just mirrors that value here.
211          * However for Graphics and UPA Slave devices the full
212          * IMAP_INR field can be set by the programmer here.
213          *
214          * Things like FFB can now be handled via the new IRQ mechanism.
215          */
216         upa_writel(tid | IMAP_VALID, imap);
217 }
218
219 /* This now gets passed true ino's as well. */
220 void disable_irq(unsigned int irq)
221 {
222         struct ino_bucket *bucket = __bucket(irq);
223         unsigned long imap;
224
225         imap = bucket->imap;
226         if (imap != 0UL) {
227                 u32 tmp;
228
229                 /* NOTE: We do not want to futz with the IRQ clear registers
230                  *       and move the state to IDLE, the SCSI code does call
231                  *       disable_irq() to assure atomicity in the queue cmd
232                  *       SCSI adapter driver code.  Thus we'd lose interrupts.
233                  */
234                 tmp = upa_readl(imap);
235                 tmp &= ~IMAP_VALID;
236                 upa_writel(tmp, imap);
237         }
238 }
239
240 /* The timer is the one "weird" interrupt which is generated by
241  * the CPU %tick register and not by some normal vectored interrupt
242  * source.  To handle this special case, we use this dummy INO bucket.
243  */
244 static struct ino_bucket pil0_dummy_bucket = {
245         0,      /* irq_chain */
246         0,      /* pil */
247         0,      /* pending */
248         0,      /* flags */
249         0,      /* __unused */
250         NULL,   /* irq_info */
251         0UL,    /* iclr */
252         0UL,    /* imap */
253 };
254
255 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
256 {
257         struct ino_bucket *bucket;
258         int ino;
259
260         if (pil == 0) {
261                 if (iclr != 0UL || imap != 0UL) {
262                         prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
263                                     iclr, imap);
264                         prom_halt();
265                 }
266                 return __irq(&pil0_dummy_bucket);
267         }
268
269         /* RULE: Both must be specified in all other cases. */
270         if (iclr == 0UL || imap == 0UL) {
271                 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
272                             pil, inofixup, iclr, imap);
273                 prom_halt();
274         }
275         
276         ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
277         if (ino > NUM_IVECS) {
278                 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
279                             ino, pil, inofixup, iclr, imap);
280                 prom_halt();
281         }
282
283         /* Ok, looks good, set it up.  Don't touch the irq_chain or
284          * the pending flag.
285          */
286         bucket = &ivector_table[ino];
287         if ((bucket->flags & IBF_ACTIVE) ||
288             (bucket->irq_info != NULL)) {
289                 /* This is a gross fatal error if it happens here. */
290                 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
291                 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
292                             ino, pil, inofixup, iclr, imap);
293                 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
294                             bucket->pil, bucket->iclr, bucket->imap);
295                 prom_printf("IRQ: Cannot continue, halting...\n");
296                 prom_halt();
297         }
298         bucket->imap  = imap;
299         bucket->iclr  = iclr;
300         bucket->pil   = pil;
301         bucket->flags = 0;
302
303         bucket->irq_info = NULL;
304
305         return __irq(bucket);
306 }
307
308 static void atomic_bucket_insert(struct ino_bucket *bucket)
309 {
310         unsigned long pstate;
311         unsigned int *ent;
312
313         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
314         __asm__ __volatile__("wrpr %0, %1, %%pstate"
315                              : : "r" (pstate), "i" (PSTATE_IE));
316         ent = irq_work(smp_processor_id(), bucket->pil);
317         bucket->irq_chain = *ent;
318         *ent = __irq(bucket);
319         __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
320 }
321
322 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323                 unsigned long irqflags, const char *name, void *dev_id)
324 {
325         struct irqaction *action, *tmp = NULL;
326         struct ino_bucket *bucket = __bucket(irq);
327         unsigned long flags;
328         int pending = 0;
329
330         if ((bucket != &pil0_dummy_bucket) &&
331             (bucket < &ivector_table[0] ||
332              bucket >= &ivector_table[NUM_IVECS])) {
333                 unsigned int *caller;
334
335                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336                 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337                        "from %p, irq %08x.\n", caller, irq);
338                 return -EINVAL;
339         }       
340         if (!handler)
341             return -EINVAL;
342
343         if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
344                 /*
345                  * This function might sleep, we want to call it first,
346                  * outside of the atomic block. In SA_STATIC_ALLOC case,
347                  * random driver's kmalloc will fail, but it is safe.
348                  * If already initialized, random driver will not reinit.
349                  * Yes, this might clear the entropy pool if the wrong
350                  * driver is attempted to be loaded, without actually
351                  * installing a new handler, but is this really a problem,
352                  * only the sysadmin is able to do this.
353                  */
354                 rand_initialize_irq(irq);
355         }
356
357         spin_lock_irqsave(&irq_action_lock, flags);
358
359         action = *(bucket->pil + irq_action);
360         if (action) {
361                 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
362                         for (tmp = action; tmp->next; tmp = tmp->next)
363                                 ;
364                 else {
365                         spin_unlock_irqrestore(&irq_action_lock, flags);
366                         return -EBUSY;
367                 }
368                 action = NULL;          /* Or else! */
369         }
370
371         /* If this is flagged as statically allocated then we use our
372          * private struct which is never freed.
373          */
374         if (irqflags & SA_STATIC_ALLOC) {
375             if (static_irq_count < MAX_STATIC_ALLOC)
376                 action = &static_irqaction[static_irq_count++];
377             else
378                 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379                        "using kmalloc\n", irq, name);
380         }       
381         if (action == NULL)
382             action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
383                                                  GFP_ATOMIC);
384         
385         if (!action) { 
386                 spin_unlock_irqrestore(&irq_action_lock, flags);
387                 return -ENOMEM;
388         }
389
390         if (bucket == &pil0_dummy_bucket) {
391                 bucket->irq_info = action;
392                 bucket->flags |= IBF_ACTIVE;
393         } else {
394                 if ((bucket->flags & IBF_ACTIVE) != 0) {
395                         void *orig = bucket->irq_info;
396                         void **vector = NULL;
397
398                         if ((bucket->flags & IBF_PCI) == 0) {
399                                 printk("IRQ: Trying to share non-PCI bucket.\n");
400                                 goto free_and_ebusy;
401                         }
402                         if ((bucket->flags & IBF_MULTI) == 0) {
403                                 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
404                                 if (vector == NULL)
405                                         goto free_and_enomem;
406
407                                 /* We might have slept. */
408                                 if ((bucket->flags & IBF_MULTI) != 0) {
409                                         int ent;
410
411                                         kfree(vector);
412                                         vector = (void **)bucket->irq_info;
413                                         for(ent = 0; ent < 4; ent++) {
414                                                 if (vector[ent] == NULL) {
415                                                         vector[ent] = action;
416                                                         break;
417                                                 }
418                                         }
419                                         if (ent == 4)
420                                                 goto free_and_ebusy;
421                                 } else {
422                                         vector[0] = orig;
423                                         vector[1] = action;
424                                         vector[2] = NULL;
425                                         vector[3] = NULL;
426                                         bucket->irq_info = vector;
427                                         bucket->flags |= IBF_MULTI;
428                                 }
429                         } else {
430                                 int ent;
431
432                                 vector = (void **)orig;
433                                 for (ent = 0; ent < 4; ent++) {
434                                         if (vector[ent] == NULL) {
435                                                 vector[ent] = action;
436                                                 break;
437                                         }
438                                 }
439                                 if (ent == 4)
440                                         goto free_and_ebusy;
441                         }
442                 } else {
443                         bucket->irq_info = action;
444                         bucket->flags |= IBF_ACTIVE;
445                 }
446                 pending = bucket->pending;
447                 if (pending)
448                         bucket->pending = 0;
449         }
450
451         action->handler = handler;
452         action->flags = irqflags;
453         action->name = name;
454         action->next = NULL;
455         action->dev_id = dev_id;
456         put_ino_in_irqaction(action, irq);
457         put_smpaff_in_irqaction(action, 0);
458
459         if (tmp)
460                 tmp->next = action;
461         else
462                 *(bucket->pil + irq_action) = action;
463
464         enable_irq(irq);
465
466         /* We ate the IVEC already, this makes sure it does not get lost. */
467         if (pending) {
468                 atomic_bucket_insert(bucket);
469                 set_softint(1 << bucket->pil);
470         }
471         spin_unlock_irqrestore(&irq_action_lock, flags);
472         if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
473                 register_irq_proc(__irq_ino(irq));
474
475 #ifdef CONFIG_SMP
476         distribute_irqs();
477 #endif
478         return 0;
479
480 free_and_ebusy:
481         kfree(action);
482         spin_unlock_irqrestore(&irq_action_lock, flags);
483         return -EBUSY;
484
485 free_and_enomem:
486         kfree(action);
487         spin_unlock_irqrestore(&irq_action_lock, flags);
488         return -ENOMEM;
489 }
490
491 EXPORT_SYMBOL(request_irq);
492
493 void free_irq(unsigned int irq, void *dev_id)
494 {
495         struct irqaction *action;
496         struct irqaction *tmp = NULL;
497         unsigned long flags;
498         struct ino_bucket *bucket = __bucket(irq), *bp;
499
500         if ((bucket != &pil0_dummy_bucket) &&
501             (bucket < &ivector_table[0] ||
502              bucket >= &ivector_table[NUM_IVECS])) {
503                 unsigned int *caller;
504
505                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
506                 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
507                        "from %p, irq %08x.\n", caller, irq);
508                 return;
509         }
510         
511         spin_lock_irqsave(&irq_action_lock, flags);
512
513         action = *(bucket->pil + irq_action);
514         if (!action->handler) {
515                 printk("Freeing free IRQ %d\n", bucket->pil);
516                 return;
517         }
518         if (dev_id) {
519                 for ( ; action; action = action->next) {
520                         if (action->dev_id == dev_id)
521                                 break;
522                         tmp = action;
523                 }
524                 if (!action) {
525                         printk("Trying to free free shared IRQ %d\n", bucket->pil);
526                         spin_unlock_irqrestore(&irq_action_lock, flags);
527                         return;
528                 }
529         } else if (action->flags & SA_SHIRQ) {
530                 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531                 spin_unlock_irqrestore(&irq_action_lock, flags);
532                 return;
533         }
534
535         if (action->flags & SA_STATIC_ALLOC) {
536                 printk("Attempt to free statically allocated IRQ %d (%s)\n",
537                        bucket->pil, action->name);
538                 spin_unlock_irqrestore(&irq_action_lock, flags);
539                 return;
540         }
541
542         if (action && tmp)
543                 tmp->next = action->next;
544         else
545                 *(bucket->pil + irq_action) = action->next;
546
547         spin_unlock_irqrestore(&irq_action_lock, flags);
548
549         synchronize_irq(irq);
550
551         spin_lock_irqsave(&irq_action_lock, flags);
552
553         if (bucket != &pil0_dummy_bucket) {
554                 unsigned long imap = bucket->imap;
555                 void **vector, *orig;
556                 int ent;
557
558                 orig = bucket->irq_info;
559                 vector = (void **)orig;
560
561                 if ((bucket->flags & IBF_MULTI) != 0) {
562                         int other = 0;
563                         void *orphan = NULL;
564                         for (ent = 0; ent < 4; ent++) {
565                                 if (vector[ent] == action)
566                                         vector[ent] = NULL;
567                                 else if (vector[ent] != NULL) {
568                                         orphan = vector[ent];
569                                         other++;
570                                 }
571                         }
572
573                         /* Only free when no other shared irq
574                          * uses this bucket.
575                          */
576                         if (other) {
577                                 if (other == 1) {
578                                         /* Convert back to non-shared bucket. */
579                                         bucket->irq_info = orphan;
580                                         bucket->flags &= ~(IBF_MULTI);
581                                         kfree(vector);
582                                 }
583                                 goto out;
584                         }
585                 } else {
586                         bucket->irq_info = NULL;
587                 }
588
589                 /* This unique interrupt source is now inactive. */
590                 bucket->flags &= ~IBF_ACTIVE;
591
592                 /* See if any other buckets share this bucket's IMAP
593                  * and are still active.
594                  */
595                 for (ent = 0; ent < NUM_IVECS; ent++) {
596                         bp = &ivector_table[ent];
597                         if (bp != bucket        &&
598                             bp->imap == imap    &&
599                             (bp->flags & IBF_ACTIVE) != 0)
600                                 break;
601                 }
602
603                 /* Only disable when no other sub-irq levels of
604                  * the same IMAP are active.
605                  */
606                 if (ent == NUM_IVECS)
607                         disable_irq(irq);
608         }
609
610 out:
611         kfree(action);
612         spin_unlock_irqrestore(&irq_action_lock, flags);
613 }
614
615 EXPORT_SYMBOL(free_irq);
616
617 #ifdef CONFIG_SMP
618 void synchronize_irq(unsigned int irq)
619 {
620         struct ino_bucket *bucket = __bucket(irq);
621
622 #if 0
623         /* The following is how I wish I could implement this.
624          * Unfortunately the ICLR registers are read-only, you can
625          * only write ICLR_foo values to them.  To get the current
626          * IRQ status you would need to get at the IRQ diag registers
627          * in the PCI/SBUS controller and the layout of those vary
628          * from one controller to the next, sigh... -DaveM
629          */
630         unsigned long iclr = bucket->iclr;
631
632         while (1) {
633                 u32 tmp = upa_readl(iclr);
634                 
635                 if (tmp == ICLR_TRANSMIT ||
636                     tmp == ICLR_PENDING) {
637                         cpu_relax();
638                         continue;
639                 }
640                 break;
641         }
642 #else
643         /* So we have to do this with a INPROGRESS bit just like x86.  */
644         while (bucket->flags & IBF_INPROGRESS)
645                 cpu_relax();
646 #endif
647 }
648 #endif /* CONFIG_SMP */
649
650 void catch_disabled_ivec(struct pt_regs *regs)
651 {
652         int cpu = smp_processor_id();
653         struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
654
655         /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656          * to other devices.  Here a single IMAP enabled potentially multiple
657          * unique interrupt sources (which each do have a unique ICLR register.
658          *
659          * So what we do is just register that the IVEC arrived, when registered
660          * for real the request_irq() code will check the bit and signal
661          * a local CPU interrupt for it.
662          */
663 #if 0
664         printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665                bucket - &ivector_table[0], regs->tpc);
666 #endif
667         *irq_work(cpu, 0) = 0;
668         bucket->pending = 1;
669 }
670
671 /* Tune this... */
672 #define FORWARD_VOLUME          12
673
674 #ifdef CONFIG_SMP
675
676 static inline void redirect_intr(int cpu, struct ino_bucket *bp)
677 {
678         /* Ok, here is what is going on:
679          * 1) Retargeting IRQs on Starfire is very
680          *    expensive so just forget about it on them.
681          * 2) Moving around very high priority interrupts
682          *    is a losing game.
683          * 3) If the current cpu is idle, interrupts are
684          *    useful work, so keep them here.  But do not
685          *    pass to our neighbour if he is not very idle.
686          * 4) If sysadmin explicitly asks for directed intrs,
687          *    Just Do It.
688          */
689         struct irqaction *ap = bp->irq_info;
690         cpumask_t cpu_mask = get_smpaff_in_irqaction(ap);
691         unsigned int buddy, ticks;
692
693         cpus_and(cpu_mask, cpu_mask, cpu_online_map);
694         if (cpus_empty(cpu_mask))
695                 cpu_mask = cpu_online_map;
696
697         if (this_is_starfire != 0 ||
698             bp->pil >= 10 || current->pid == 0)
699                 goto out;
700
701         /* 'cpu' is the MID (ie. UPAID), calculate the MID
702          * of our buddy.
703          */
704         buddy = cpu + 1;
705         if (buddy >= NR_CPUS)
706                 buddy = 0;
707
708         ticks = 0;
709         while (!cpu_isset(buddy, cpu_mask)) {
710                 if (++buddy >= NR_CPUS)
711                         buddy = 0;
712                 if (++ticks > NR_CPUS) {
713                         put_smpaff_in_irqaction(ap, 0);
714                         goto out;
715                 }
716         }
717
718         if (buddy == cpu)
719                 goto out;
720
721         /* Voo-doo programming. */
722         if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
723                 goto out;
724
725         /* This just so happens to be correct on Cheetah
726          * at the moment.
727          */
728         buddy <<= 26;
729
730         /* Push it to our buddy. */
731         upa_writel(buddy | IMAP_VALID, bp->imap);
732
733 out:
734         return;
735 }
736
737 #endif
738
739 void handler_irq(int irq, struct pt_regs *regs)
740 {
741         struct ino_bucket *bp, *nbp;
742         int cpu = smp_processor_id();
743
744 #ifndef CONFIG_SMP
745         /*
746          * Check for TICK_INT on level 14 softint.
747          */
748         {
749                 unsigned long clr_mask = 1 << irq;
750                 unsigned long tick_mask = tick_ops->softint_mask;
751
752                 if ((irq == 14) && (get_softint() & tick_mask)) {
753                         irq = 0;
754                         clr_mask = tick_mask;
755                 }
756                 clear_softint(clr_mask);
757         }
758 #else
759         int should_forward = 1;
760
761         clear_softint(1 << irq);
762 #endif
763
764         irq_enter();
765         kstat_this_cpu.irqs[irq]++;
766
767         /* Sliiiick... */
768 #ifndef CONFIG_SMP
769         bp = ((irq != 0) ?
770               __bucket(xchg32(irq_work(cpu, irq), 0)) :
771               &pil0_dummy_bucket);
772 #else
773         bp = __bucket(xchg32(irq_work(cpu, irq), 0));
774 #endif
775         for ( ; bp != NULL; bp = nbp) {
776                 unsigned char flags = bp->flags;
777                 unsigned char random = 0;
778
779                 nbp = __bucket(bp->irq_chain);
780                 bp->irq_chain = 0;
781
782                 bp->flags |= IBF_INPROGRESS;
783
784                 if ((flags & IBF_ACTIVE) != 0) {
785 #ifdef CONFIG_PCI
786                         if ((flags & IBF_DMA_SYNC) != 0) {
787                                 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
788                                 upa_readq(pci_dma_wsync);
789                         }
790 #endif
791                         if ((flags & IBF_MULTI) == 0) {
792                                 struct irqaction *ap = bp->irq_info;
793                                 ap->handler(__irq(bp), ap->dev_id, regs);
794                                 random |= ap->flags & SA_SAMPLE_RANDOM;
795                         } else {
796                                 void **vector = (void **)bp->irq_info;
797                                 int ent;
798                                 for (ent = 0; ent < 4; ent++) {
799                                         struct irqaction *ap = vector[ent];
800                                         if (ap != NULL) {
801                                                 ap->handler(__irq(bp), ap->dev_id, regs);
802                                                 random |= ap->flags & SA_SAMPLE_RANDOM;
803                                         }
804                                 }
805                         }
806                         /* Only the dummy bucket lacks IMAP/ICLR. */
807                         if (bp->pil != 0) {
808 #ifdef CONFIG_SMP
809                                 if (should_forward) {
810                                         redirect_intr(cpu, bp);
811                                         should_forward = 0;
812                                 }
813 #endif
814                                 upa_writel(ICLR_IDLE, bp->iclr);
815                                 /* Test and add entropy */
816                                 if (random)
817                                         add_interrupt_randomness(irq);
818                         }
819                 } else
820                         bp->pending = 1;
821
822                 bp->flags &= ~IBF_INPROGRESS;
823         }
824         irq_exit();
825 }
826
827 #ifdef CONFIG_BLK_DEV_FD
828 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
829
830 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
831 {
832         struct irqaction *action = *(irq + irq_action);
833         struct ino_bucket *bucket;
834         int cpu = smp_processor_id();
835
836         irq_enter();
837         kstat_this_cpu.irqs[irq]++;
838
839         *(irq_work(cpu, irq)) = 0;
840         bucket = get_ino_in_irqaction(action) + ivector_table;
841
842         bucket->flags |= IBF_INPROGRESS;
843
844         floppy_interrupt(irq, dev_cookie, regs);
845         upa_writel(ICLR_IDLE, bucket->iclr);
846
847         bucket->flags &= ~IBF_INPROGRESS;
848
849         irq_exit();
850 }
851 #endif
852
853 /* The following assumes that the branch lies before the place we
854  * are branching to.  This is the case for a trap vector...
855  * You have been warned.
856  */
857 #define SPARC_BRANCH(dest_addr, inst_addr) \
858           (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
859
860 #define SPARC_NOP (0x01000000)
861
862 static void install_fast_irq(unsigned int cpu_irq,
863                              irqreturn_t (*handler)(int, void *, struct pt_regs *))
864 {
865         extern unsigned long sparc64_ttable_tl0;
866         unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
867         unsigned int *insns;
868
869         ttent += 0x820;
870         ttent += (cpu_irq - 1) << 5;
871         insns = (unsigned int *) ttent;
872         insns[0] = SPARC_BRANCH(((unsigned long) handler),
873                                 ((unsigned long)&insns[0]));
874         insns[1] = SPARC_NOP;
875         __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
876 }
877
878 int request_fast_irq(unsigned int irq,
879                      irqreturn_t (*handler)(int, void *, struct pt_regs *),
880                      unsigned long irqflags, const char *name, void *dev_id)
881 {
882         struct irqaction *action;
883         struct ino_bucket *bucket = __bucket(irq);
884         unsigned long flags;
885
886         /* No pil0 dummy buckets allowed here. */
887         if (bucket < &ivector_table[0] ||
888             bucket >= &ivector_table[NUM_IVECS]) {
889                 unsigned int *caller;
890
891                 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
892                 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
893                        "from %p, irq %08x.\n", caller, irq);
894                 return -EINVAL;
895         }       
896         
897         if (!handler)
898                 return -EINVAL;
899
900         if ((bucket->pil == 0) || (bucket->pil == 14)) {
901                 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
902                 return -EBUSY;
903         }
904
905         spin_lock_irqsave(&irq_action_lock, flags);
906
907         action = *(bucket->pil + irq_action);
908         if (action) {
909                 if (action->flags & SA_SHIRQ)
910                         panic("Trying to register fast irq when already shared.\n");
911                 if (irqflags & SA_SHIRQ)
912                         panic("Trying to register fast irq as shared.\n");
913                 printk("request_fast_irq: Trying to register yet already owned.\n");
914                 spin_unlock_irqrestore(&irq_action_lock, flags);
915                 return -EBUSY;
916         }
917
918         /*
919          * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
920          * support smp intr affinity in this path.
921          */
922         if (irqflags & SA_STATIC_ALLOC) {
923                 if (static_irq_count < MAX_STATIC_ALLOC)
924                         action = &static_irqaction[static_irq_count++];
925                 else
926                         printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
927                                "using kmalloc\n", bucket->pil, name);
928         }
929         if (action == NULL)
930                 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
931                                                      GFP_ATOMIC);
932         if (!action) {
933                 spin_unlock_irqrestore(&irq_action_lock, flags);
934                 return -ENOMEM;
935         }
936         install_fast_irq(bucket->pil, handler);
937
938         bucket->irq_info = action;
939         bucket->flags |= IBF_ACTIVE;
940
941         action->handler = handler;
942         action->flags = irqflags;
943         action->dev_id = NULL;
944         action->name = name;
945         action->next = NULL;
946         put_ino_in_irqaction(action, irq);
947         put_smpaff_in_irqaction(action, 0);
948
949         *(bucket->pil + irq_action) = action;
950         enable_irq(irq);
951
952         spin_unlock_irqrestore(&irq_action_lock, flags);
953
954 #ifdef CONFIG_SMP
955         distribute_irqs();
956 #endif
957         return 0;
958 }
959
960 /* We really don't need these at all on the Sparc.  We only have
961  * stubs here because they are exported to modules.
962  */
963 unsigned long probe_irq_on(void)
964 {
965         return 0;
966 }
967
968 EXPORT_SYMBOL(probe_irq_on);
969
970 int probe_irq_off(unsigned long mask)
971 {
972         return 0;
973 }
974
975 EXPORT_SYMBOL(probe_irq_off);
976
977 #ifdef CONFIG_SMP
978 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
979 {
980         struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
981         unsigned long imap = bucket->imap;
982         unsigned int tid;
983
984         while (!cpu_online(goal_cpu)) {
985                 if (++goal_cpu >= NR_CPUS)
986                         goal_cpu = 0;
987         }
988
989         if (tlb_type == cheetah || tlb_type == cheetah_plus) {
990                 tid = goal_cpu << 26;
991                 tid &= IMAP_AID_SAFARI;
992         } else if (this_is_starfire == 0) {
993                 tid = goal_cpu << 26;
994                 tid &= IMAP_TID_UPA;
995         } else {
996                 tid = (starfire_translate(imap, goal_cpu) << 26);
997                 tid &= IMAP_TID_UPA;
998         }
999         upa_writel(tid | IMAP_VALID, imap);
1000
1001         while (!cpu_online(goal_cpu)) {
1002                 if (++goal_cpu >= NR_CPUS)
1003                         goal_cpu = 0;
1004         }
1005
1006         return goal_cpu;
1007 }
1008
1009 /* Called from request_irq. */
1010 static void distribute_irqs(void)
1011 {
1012         unsigned long flags;
1013         int cpu, level;
1014
1015         spin_lock_irqsave(&irq_action_lock, flags);
1016         cpu = 0;
1017
1018         /*
1019          * Skip the timer at [0], and very rare error/power intrs at [15].
1020          * Also level [12], it causes problems on Ex000 systems.
1021          */
1022         for (level = 1; level < NR_IRQS; level++) {
1023                 struct irqaction *p = irq_action[level];
1024                 if (level == 12) continue;
1025                 while(p) {
1026                         cpu = retarget_one_irq(p, cpu);
1027                         p = p->next;
1028                 }
1029         }
1030         spin_unlock_irqrestore(&irq_action_lock, flags);
1031 }
1032 #endif
1033
1034
1035 struct sun5_timer *prom_timers;
1036 static u64 prom_limit0, prom_limit1;
1037
1038 static void map_prom_timers(void)
1039 {
1040         unsigned int addr[3];
1041         int tnode, err;
1042
1043         /* PROM timer node hangs out in the top level of device siblings... */
1044         tnode = prom_finddevice("/counter-timer");
1045
1046         /* Assume if node is not present, PROM uses different tick mechanism
1047          * which we should not care about.
1048          */
1049         if (tnode == 0 || tnode == -1) {
1050                 prom_timers = (struct sun5_timer *) 0;
1051                 return;
1052         }
1053
1054         /* If PROM is really using this, it must be mapped by him. */
1055         err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1056         if (err == -1) {
1057                 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1058                 prom_timers = (struct sun5_timer *) 0;
1059                 return;
1060         }
1061         prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1062 }
1063
1064 static void kill_prom_timer(void)
1065 {
1066         if (!prom_timers)
1067                 return;
1068
1069         /* Save them away for later. */
1070         prom_limit0 = prom_timers->limit0;
1071         prom_limit1 = prom_timers->limit1;
1072
1073         /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1074          * We turn both off here just to be paranoid.
1075          */
1076         prom_timers->limit0 = 0;
1077         prom_timers->limit1 = 0;
1078
1079         /* Wheee, eat the interrupt packet too... */
1080         __asm__ __volatile__(
1081 "       mov     0x40, %%g2\n"
1082 "       ldxa    [%%g0] %0, %%g1\n"
1083 "       ldxa    [%%g2] %1, %%g1\n"
1084 "       stxa    %%g0, [%%g0] %0\n"
1085 "       membar  #Sync\n"
1086         : /* no outputs */
1087         : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1088         : "g1", "g2");
1089 }
1090
1091 void enable_prom_timer(void)
1092 {
1093         if (!prom_timers)
1094                 return;
1095
1096         /* Set it to whatever was there before. */
1097         prom_timers->limit1 = prom_limit1;
1098         prom_timers->count1 = 0;
1099         prom_timers->limit0 = prom_limit0;
1100         prom_timers->count0 = 0;
1101 }
1102
1103 void init_irqwork_curcpu(void)
1104 {
1105         register struct irq_work_struct *workp asm("o2");
1106         unsigned long tmp;
1107         int cpu = hard_smp_processor_id();
1108
1109         memset(__irq_work + cpu, 0, sizeof(*workp));
1110
1111         /* Make sure we are called with PSTATE_IE disabled.  */
1112         __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
1113                              : "=r" (tmp));
1114         if (tmp & PSTATE_IE) {
1115                 prom_printf("BUG: init_irqwork_curcpu() called with "
1116                             "PSTATE_IE enabled, bailing.\n");
1117                 __asm__ __volatile__("mov       %%i7, %0\n\t"
1118                                      : "=r" (tmp));
1119                 prom_printf("BUG: Called from %lx\n", tmp);
1120                 prom_halt();
1121         }
1122
1123         /* Set interrupt globals.  */
1124         workp = &__irq_work[cpu];
1125         __asm__ __volatile__(
1126         "rdpr   %%pstate, %0\n\t"
1127         "wrpr   %0, %1, %%pstate\n\t"
1128         "mov    %2, %%g6\n\t"
1129         "wrpr   %0, 0x0, %%pstate\n\t"
1130         : "=&r" (tmp)
1131         : "i" (PSTATE_IG), "r" (workp));
1132 }
1133
1134 /* Only invoked on boot processor. */
1135 void __init init_IRQ(void)
1136 {
1137         map_prom_timers();
1138         kill_prom_timer();
1139         memset(&ivector_table[0], 0, sizeof(ivector_table));
1140
1141         /* We need to clear any IRQ's pending in the soft interrupt
1142          * registers, a spurious one could be left around from the
1143          * PROM timer which we just disabled.
1144          */
1145         clear_softint(get_softint());
1146
1147         /* Now that ivector table is initialized, it is safe
1148          * to receive IRQ vector traps.  We will normally take
1149          * one or two right now, in case some device PROM used
1150          * to boot us wants to speak to us.  We just ignore them.
1151          */
1152         __asm__ __volatile__("rdpr      %%pstate, %%g1\n\t"
1153                              "or        %%g1, %0, %%g1\n\t"
1154                              "wrpr      %%g1, 0x0, %%pstate"
1155                              : /* No outputs */
1156                              : "i" (PSTATE_IE)
1157                              : "g1");
1158 }
1159
1160 static struct proc_dir_entry * root_irq_dir;
1161 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1162
1163 #ifdef CONFIG_SMP
1164
1165 #define HEX_DIGITS 16
1166
1167 static unsigned int parse_hex_value (const char *buffer,
1168                 unsigned long count, unsigned long *ret)
1169 {
1170         unsigned char hexnum [HEX_DIGITS];
1171         unsigned long value;
1172         int i;
1173
1174         if (!count)
1175                 return -EINVAL;
1176         if (count > HEX_DIGITS)
1177                 count = HEX_DIGITS;
1178         if (copy_from_user(hexnum, buffer, count))
1179                 return -EFAULT;
1180
1181         /*
1182          * Parse the first 8 characters as a hex string, any non-hex char
1183          * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1184          */
1185         value = 0;
1186
1187         for (i = 0; i < count; i++) {
1188                 unsigned int c = hexnum[i];
1189
1190                 switch (c) {
1191                         case '0' ... '9': c -= '0'; break;
1192                         case 'a' ... 'f': c -= 'a'-10; break;
1193                         case 'A' ... 'F': c -= 'A'-10; break;
1194                 default:
1195                         goto out;
1196                 }
1197                 value = (value << 4) | c;
1198         }
1199 out:
1200         *ret = value;
1201         return 0;
1202 }
1203
1204 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1205                         int count, int *eof, void *data)
1206 {
1207         struct ino_bucket *bp = ivector_table + (long)data;
1208         struct irqaction *ap = bp->irq_info;
1209         cpumask_t mask = get_smpaff_in_irqaction(ap);
1210         int len;
1211
1212         if (cpus_empty(mask))
1213                 mask = cpu_online_map;
1214
1215         len = cpumask_scnprintf(page, count, mask);
1216         if (count - len < 2)
1217                 return -EINVAL;
1218         len += sprintf(page + len, "\n");
1219         return len;
1220 }
1221
1222 static inline void set_intr_affinity(int irq, unsigned long hw_aff)
1223 {
1224         struct ino_bucket *bp = ivector_table + irq;
1225
1226         /* Users specify affinity in terms of hw cpu ids.
1227          * As soon as we do this, handler_irq() might see and take action.
1228          */
1229         put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1230
1231         /* Migration is simply done by the next cpu to service this
1232          * interrupt.
1233          */
1234 }
1235
1236 static int irq_affinity_write_proc (struct file *file, const char *buffer,
1237                                         unsigned long count, void *data)
1238 {
1239         int irq = (long) data, full_count = count, err;
1240         unsigned long new_value, i;
1241
1242         err = parse_hex_value(buffer, count, &new_value);
1243
1244         /*
1245          * Do not allow disabling IRQs completely - it's a too easy
1246          * way to make the system unusable accidentally :-) At least
1247          * one online CPU still has to be targeted.
1248          */
1249         for (i = 0; i < NR_CPUS; i++) {
1250                 if ((new_value & (1UL << i)) != 0 &&
1251                     !cpu_online(i))
1252                         new_value &= ~(1UL << i);
1253         }
1254
1255         if (!new_value)
1256                 return -EINVAL;
1257
1258         set_intr_affinity(irq, new_value);
1259
1260         return full_count;
1261 }
1262
1263 #endif
1264
1265 #define MAX_NAMELEN 10
1266
1267 static void register_irq_proc (unsigned int irq)
1268 {
1269         char name [MAX_NAMELEN];
1270
1271         if (!root_irq_dir || irq_dir[irq])
1272                 return;
1273
1274         memset(name, 0, MAX_NAMELEN);
1275         sprintf(name, "%x", irq);
1276
1277         /* create /proc/irq/1234 */
1278         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1279
1280 #ifdef CONFIG_SMP
1281         /* XXX SMP affinity not supported on starfire yet. */
1282         if (this_is_starfire == 0) {
1283                 struct proc_dir_entry *entry;
1284
1285                 /* create /proc/irq/1234/smp_affinity */
1286                 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1287
1288                 if (entry) {
1289                         entry->nlink = 1;
1290                         entry->data = (void *)(long)irq;
1291                         entry->read_proc = irq_affinity_read_proc;
1292                         entry->write_proc = irq_affinity_write_proc;
1293                 }
1294         }
1295 #endif
1296 }
1297
1298 void init_irq_proc (void)
1299 {
1300         /* create /proc/irq */
1301         root_irq_dir = proc_mkdir("irq", 0);
1302 }
1303