1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
34 #include <asm/timer.h>
36 #include <asm/hardirq.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
43 static void distribute_irqs(void);
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
59 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
61 /* This has to be in the main kernel image, it cannot be
62 * turned into per-cpu data. The reason is that the main
63 * kernel image is locked into the TLB and this structure
64 * is accessed from the vectored interrupt trap handler. If
65 * access to this structure takes a TLB miss it could cause
66 * the 5-level sparc v9 trap stack to overflow.
68 struct irq_work_struct {
69 unsigned int irq_worklists[16];
71 struct irq_work_struct __irq_work[NR_CPUS];
72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
82 unsigned long pci_dma_wsync;
83 unsigned long dma_sync_reg_table[256];
84 unsigned char dma_sync_reg_table_entry = 0;
87 /* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
90 #define MAX_STATIC_ALLOC 4
91 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92 static int static_irq_count;
94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
95 struct irqaction *irq_action[NR_IRQS+1] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
100 /* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to
102 * read things in the table. IRQ handler processing orders
103 * its' accesses such that no locking is needed.
105 static spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
107 static void register_irq_proc (unsigned int irq);
110 * Upper 2b of irqaction->flags holds the ino.
111 * irqaction->mask holds the smp affinity information.
113 #define put_ino_in_irqaction(action, irq) \
114 action->flags &= 0xffffffffffffUL; \
115 if (__bucket(irq) == &pil0_dummy_bucket) \
116 action->flags |= 0xdeadUL << 48; \
118 action->flags |= __irq_ino(irq) << 48;
119 #define get_ino_in_irqaction(action) (action->flags >> 48)
121 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
122 #define get_smpaff_in_irqaction(action) ((action)->mask)
124 int show_interrupts(struct seq_file *p, void *v)
127 int i = *(loff_t *) v;
128 struct irqaction *action;
133 spin_lock_irqsave(&irq_action_lock, flags);
135 if (!(action = *(i + irq_action)))
137 seq_printf(p, "%3d: ", i);
139 seq_printf(p, "%10u ", kstat_irqs(i));
141 for (j = 0; j < NR_CPUS; j++) {
144 seq_printf(p, "%10u ",
145 kstat_cpu(j).irqs[i]);
148 seq_printf(p, " %s:%lx", action->name,
149 get_ino_in_irqaction(action));
150 for (action = action->next; action; action = action->next) {
151 seq_printf(p, ", %s:%lx", action->name,
152 get_ino_in_irqaction(action));
157 spin_unlock_irqrestore(&irq_action_lock, flags);
162 /* Now these are always passed a true fully specified sun4u INO. */
163 void enable_irq(unsigned int irq)
165 struct ino_bucket *bucket = __bucket(irq);
175 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
178 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
179 if ((ver >> 32) == 0x003e0016) {
180 /* We set it to our JBUS ID. */
181 __asm__ __volatile__("ldxa [%%g0] %1, %0"
183 : "i" (ASI_JBUS_CONFIG));
184 tid = ((tid & (0x1fUL<<17)) << 9);
185 tid &= IMAP_TID_JBUS;
187 /* We set it to our Safari AID. */
188 __asm__ __volatile__("ldxa [%%g0] %1, %0"
190 : "i" (ASI_SAFARI_CONFIG));
191 tid = ((tid & (0x3ffUL<<17)) << 9);
192 tid &= IMAP_AID_SAFARI;
194 } else if (this_is_starfire == 0) {
195 /* We set it to our UPA MID. */
196 __asm__ __volatile__("ldxa [%%g0] %1, %0"
198 : "i" (ASI_UPA_CONFIG));
199 tid = ((tid & UPA_CONFIG_MID) << 9);
202 tid = (starfire_translate(imap, smp_processor_id()) << 26);
206 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
207 * of this SYSIO's preconfigured IGN in the SYSIO Control
208 * Register, the hardware just mirrors that value here.
209 * However for Graphics and UPA Slave devices the full
210 * IMAP_INR field can be set by the programmer here.
212 * Things like FFB can now be handled via the new IRQ mechanism.
214 upa_writel(tid | IMAP_VALID, imap);
219 /* This now gets passed true ino's as well. */
220 void disable_irq(unsigned int irq)
222 struct ino_bucket *bucket = __bucket(irq);
229 /* NOTE: We do not want to futz with the IRQ clear registers
230 * and move the state to IDLE, the SCSI code does call
231 * disable_irq() to assure atomicity in the queue cmd
232 * SCSI adapter driver code. Thus we'd lose interrupts.
234 tmp = upa_readl(imap);
236 upa_writel(tmp, imap);
240 /* The timer is the one "weird" interrupt which is generated by
241 * the CPU %tick register and not by some normal vectored interrupt
242 * source. To handle this special case, we use this dummy INO bucket.
244 static struct ino_bucket pil0_dummy_bucket = {
255 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
257 struct ino_bucket *bucket;
261 if (iclr != 0UL || imap != 0UL) {
262 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
266 return __irq(&pil0_dummy_bucket);
269 /* RULE: Both must be specified in all other cases. */
270 if (iclr == 0UL || imap == 0UL) {
271 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
272 pil, inofixup, iclr, imap);
276 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
277 if (ino > NUM_IVECS) {
278 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
279 ino, pil, inofixup, iclr, imap);
283 /* Ok, looks good, set it up. Don't touch the irq_chain or
286 bucket = &ivector_table[ino];
287 if ((bucket->flags & IBF_ACTIVE) ||
288 (bucket->irq_info != NULL)) {
289 /* This is a gross fatal error if it happens here. */
290 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
291 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
292 ino, pil, inofixup, iclr, imap);
293 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
294 bucket->pil, bucket->iclr, bucket->imap);
295 prom_printf("IRQ: Cannot continue, halting...\n");
303 bucket->irq_info = NULL;
305 return __irq(bucket);
308 static void atomic_bucket_insert(struct ino_bucket *bucket)
310 unsigned long pstate;
313 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
314 __asm__ __volatile__("wrpr %0, %1, %%pstate"
315 : : "r" (pstate), "i" (PSTATE_IE));
316 ent = irq_work(smp_processor_id(), bucket->pil);
317 bucket->irq_chain = *ent;
318 *ent = __irq(bucket);
319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
322 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323 unsigned long irqflags, const char *name, void *dev_id)
325 struct irqaction *action, *tmp = NULL;
326 struct ino_bucket *bucket = __bucket(irq);
330 if ((bucket != &pil0_dummy_bucket) &&
331 (bucket < &ivector_table[0] ||
332 bucket >= &ivector_table[NUM_IVECS])) {
333 unsigned int *caller;
335 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337 "from %p, irq %08x.\n", caller, irq);
343 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
345 * This function might sleep, we want to call it first,
346 * outside of the atomic block. In SA_STATIC_ALLOC case,
347 * random driver's kmalloc will fail, but it is safe.
348 * If already initialized, random driver will not reinit.
349 * Yes, this might clear the entropy pool if the wrong
350 * driver is attempted to be loaded, without actually
351 * installing a new handler, but is this really a problem,
352 * only the sysadmin is able to do this.
354 rand_initialize_irq(irq);
357 spin_lock_irqsave(&irq_action_lock, flags);
359 action = *(bucket->pil + irq_action);
361 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
362 for (tmp = action; tmp->next; tmp = tmp->next)
365 spin_unlock_irqrestore(&irq_action_lock, flags);
368 action = NULL; /* Or else! */
371 /* If this is flagged as statically allocated then we use our
372 * private struct which is never freed.
374 if (irqflags & SA_STATIC_ALLOC) {
375 if (static_irq_count < MAX_STATIC_ALLOC)
376 action = &static_irqaction[static_irq_count++];
378 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379 "using kmalloc\n", irq, name);
382 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
386 spin_unlock_irqrestore(&irq_action_lock, flags);
390 if (bucket == &pil0_dummy_bucket) {
391 bucket->irq_info = action;
392 bucket->flags |= IBF_ACTIVE;
394 if ((bucket->flags & IBF_ACTIVE) != 0) {
395 void *orig = bucket->irq_info;
396 void **vector = NULL;
398 if ((bucket->flags & IBF_PCI) == 0) {
399 printk("IRQ: Trying to share non-PCI bucket.\n");
402 if ((bucket->flags & IBF_MULTI) == 0) {
403 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
405 goto free_and_enomem;
407 /* We might have slept. */
408 if ((bucket->flags & IBF_MULTI) != 0) {
412 vector = (void **)bucket->irq_info;
413 for(ent = 0; ent < 4; ent++) {
414 if (vector[ent] == NULL) {
415 vector[ent] = action;
426 bucket->irq_info = vector;
427 bucket->flags |= IBF_MULTI;
432 vector = (void **)orig;
433 for (ent = 0; ent < 4; ent++) {
434 if (vector[ent] == NULL) {
435 vector[ent] = action;
443 bucket->irq_info = action;
444 bucket->flags |= IBF_ACTIVE;
446 pending = bucket->pending;
451 action->handler = handler;
452 action->flags = irqflags;
455 action->dev_id = dev_id;
456 put_ino_in_irqaction(action, irq);
457 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
462 *(bucket->pil + irq_action) = action;
466 /* We ate the IVEC already, this makes sure it does not get lost. */
468 atomic_bucket_insert(bucket);
469 set_softint(1 << bucket->pil);
471 spin_unlock_irqrestore(&irq_action_lock, flags);
472 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
473 register_irq_proc(__irq_ino(irq));
482 spin_unlock_irqrestore(&irq_action_lock, flags);
487 spin_unlock_irqrestore(&irq_action_lock, flags);
491 EXPORT_SYMBOL(request_irq);
493 void free_irq(unsigned int irq, void *dev_id)
495 struct irqaction *action;
496 struct irqaction *tmp = NULL;
498 struct ino_bucket *bucket = __bucket(irq), *bp;
500 if ((bucket != &pil0_dummy_bucket) &&
501 (bucket < &ivector_table[0] ||
502 bucket >= &ivector_table[NUM_IVECS])) {
503 unsigned int *caller;
505 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
506 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
507 "from %p, irq %08x.\n", caller, irq);
511 spin_lock_irqsave(&irq_action_lock, flags);
513 action = *(bucket->pil + irq_action);
514 if (!action->handler) {
515 printk("Freeing free IRQ %d\n", bucket->pil);
519 for ( ; action; action = action->next) {
520 if (action->dev_id == dev_id)
525 printk("Trying to free free shared IRQ %d\n", bucket->pil);
526 spin_unlock_irqrestore(&irq_action_lock, flags);
529 } else if (action->flags & SA_SHIRQ) {
530 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531 spin_unlock_irqrestore(&irq_action_lock, flags);
535 if (action->flags & SA_STATIC_ALLOC) {
536 printk("Attempt to free statically allocated IRQ %d (%s)\n",
537 bucket->pil, action->name);
538 spin_unlock_irqrestore(&irq_action_lock, flags);
543 tmp->next = action->next;
545 *(bucket->pil + irq_action) = action->next;
547 spin_unlock_irqrestore(&irq_action_lock, flags);
549 synchronize_irq(irq);
551 spin_lock_irqsave(&irq_action_lock, flags);
553 if (bucket != &pil0_dummy_bucket) {
554 unsigned long imap = bucket->imap;
555 void **vector, *orig;
558 orig = bucket->irq_info;
559 vector = (void **)orig;
561 if ((bucket->flags & IBF_MULTI) != 0) {
564 for (ent = 0; ent < 4; ent++) {
565 if (vector[ent] == action)
567 else if (vector[ent] != NULL) {
568 orphan = vector[ent];
573 /* Only free when no other shared irq
578 /* Convert back to non-shared bucket. */
579 bucket->irq_info = orphan;
580 bucket->flags &= ~(IBF_MULTI);
586 bucket->irq_info = NULL;
589 /* This unique interrupt source is now inactive. */
590 bucket->flags &= ~IBF_ACTIVE;
592 /* See if any other buckets share this bucket's IMAP
593 * and are still active.
595 for (ent = 0; ent < NUM_IVECS; ent++) {
596 bp = &ivector_table[ent];
599 (bp->flags & IBF_ACTIVE) != 0)
603 /* Only disable when no other sub-irq levels of
604 * the same IMAP are active.
606 if (ent == NUM_IVECS)
612 spin_unlock_irqrestore(&irq_action_lock, flags);
615 EXPORT_SYMBOL(free_irq);
618 void synchronize_irq(unsigned int irq)
620 struct ino_bucket *bucket = __bucket(irq);
623 /* The following is how I wish I could implement this.
624 * Unfortunately the ICLR registers are read-only, you can
625 * only write ICLR_foo values to them. To get the current
626 * IRQ status you would need to get at the IRQ diag registers
627 * in the PCI/SBUS controller and the layout of those vary
628 * from one controller to the next, sigh... -DaveM
630 unsigned long iclr = bucket->iclr;
633 u32 tmp = upa_readl(iclr);
635 if (tmp == ICLR_TRANSMIT ||
636 tmp == ICLR_PENDING) {
643 /* So we have to do this with a INPROGRESS bit just like x86. */
644 while (bucket->flags & IBF_INPROGRESS)
648 #endif /* CONFIG_SMP */
650 void catch_disabled_ivec(struct pt_regs *regs)
652 int cpu = smp_processor_id();
653 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
655 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656 * to other devices. Here a single IMAP enabled potentially multiple
657 * unique interrupt sources (which each do have a unique ICLR register.
659 * So what we do is just register that the IVEC arrived, when registered
660 * for real the request_irq() code will check the bit and signal
661 * a local CPU interrupt for it.
664 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665 bucket - &ivector_table[0], regs->tpc);
667 *irq_work(cpu, 0) = 0;
672 #define FORWARD_VOLUME 12
676 static inline void redirect_intr(int cpu, struct ino_bucket *bp)
678 /* Ok, here is what is going on:
679 * 1) Retargeting IRQs on Starfire is very
680 * expensive so just forget about it on them.
681 * 2) Moving around very high priority interrupts
683 * 3) If the current cpu is idle, interrupts are
684 * useful work, so keep them here. But do not
685 * pass to our neighbour if he is not very idle.
686 * 4) If sysadmin explicitly asks for directed intrs,
689 struct irqaction *ap = bp->irq_info;
691 unsigned int buddy, ticks;
693 cpu_mask = get_smpaff_in_irqaction(ap);
694 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
695 if (cpus_empty(cpu_mask))
696 cpu_mask = cpu_online_map;
698 if (this_is_starfire != 0 ||
699 bp->pil >= 10 || current->pid == 0)
702 /* 'cpu' is the MID (ie. UPAID), calculate the MID
706 if (buddy >= NR_CPUS)
710 while (!cpu_isset(buddy, cpu_mask)) {
711 if (++buddy >= NR_CPUS)
713 if (++ticks > NR_CPUS) {
714 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
722 /* Voo-doo programming. */
723 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
726 /* This just so happens to be correct on Cheetah
731 /* Push it to our buddy. */
732 upa_writel(buddy | IMAP_VALID, bp->imap);
740 void handler_irq(int irq, struct pt_regs *regs)
742 struct ino_bucket *bp, *nbp;
743 int cpu = smp_processor_id();
747 * Check for TICK_INT on level 14 softint.
750 unsigned long clr_mask = 1 << irq;
751 unsigned long tick_mask = tick_ops->softint_mask;
753 if ((irq == 14) && (get_softint() & tick_mask)) {
755 clr_mask = tick_mask;
757 clear_softint(clr_mask);
760 int should_forward = 1;
762 clear_softint(1 << irq);
766 kstat_this_cpu.irqs[irq]++;
771 __bucket(xchg32(irq_work(cpu, irq), 0)) :
774 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
776 for ( ; bp != NULL; bp = nbp) {
777 unsigned char flags = bp->flags;
778 unsigned char random = 0;
780 nbp = __bucket(bp->irq_chain);
783 bp->flags |= IBF_INPROGRESS;
785 if ((flags & IBF_ACTIVE) != 0) {
787 if ((flags & IBF_DMA_SYNC) != 0) {
788 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
789 upa_readq(pci_dma_wsync);
792 if ((flags & IBF_MULTI) == 0) {
793 struct irqaction *ap = bp->irq_info;
794 ap->handler(__irq(bp), ap->dev_id, regs);
795 random |= ap->flags & SA_SAMPLE_RANDOM;
797 void **vector = (void **)bp->irq_info;
799 for (ent = 0; ent < 4; ent++) {
800 struct irqaction *ap = vector[ent];
802 ap->handler(__irq(bp), ap->dev_id, regs);
803 random |= ap->flags & SA_SAMPLE_RANDOM;
807 /* Only the dummy bucket lacks IMAP/ICLR. */
810 if (should_forward) {
811 redirect_intr(cpu, bp);
815 upa_writel(ICLR_IDLE, bp->iclr);
816 /* Test and add entropy */
818 add_interrupt_randomness(irq);
823 bp->flags &= ~IBF_INPROGRESS;
828 #ifdef CONFIG_BLK_DEV_FD
829 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
831 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
833 struct irqaction *action = *(irq + irq_action);
834 struct ino_bucket *bucket;
835 int cpu = smp_processor_id();
838 kstat_this_cpu.irqs[irq]++;
840 *(irq_work(cpu, irq)) = 0;
841 bucket = get_ino_in_irqaction(action) + ivector_table;
843 bucket->flags |= IBF_INPROGRESS;
845 floppy_interrupt(irq, dev_cookie, regs);
846 upa_writel(ICLR_IDLE, bucket->iclr);
848 bucket->flags &= ~IBF_INPROGRESS;
854 /* The following assumes that the branch lies before the place we
855 * are branching to. This is the case for a trap vector...
856 * You have been warned.
858 #define SPARC_BRANCH(dest_addr, inst_addr) \
859 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
861 #define SPARC_NOP (0x01000000)
863 static void install_fast_irq(unsigned int cpu_irq,
864 irqreturn_t (*handler)(int, void *, struct pt_regs *))
866 extern unsigned long sparc64_ttable_tl0;
867 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
871 ttent += (cpu_irq - 1) << 5;
872 insns = (unsigned int *) ttent;
873 insns[0] = SPARC_BRANCH(((unsigned long) handler),
874 ((unsigned long)&insns[0]));
875 insns[1] = SPARC_NOP;
876 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
879 int request_fast_irq(unsigned int irq,
880 irqreturn_t (*handler)(int, void *, struct pt_regs *),
881 unsigned long irqflags, const char *name, void *dev_id)
883 struct irqaction *action;
884 struct ino_bucket *bucket = __bucket(irq);
887 /* No pil0 dummy buckets allowed here. */
888 if (bucket < &ivector_table[0] ||
889 bucket >= &ivector_table[NUM_IVECS]) {
890 unsigned int *caller;
892 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
893 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
894 "from %p, irq %08x.\n", caller, irq);
901 if ((bucket->pil == 0) || (bucket->pil == 14)) {
902 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
906 spin_lock_irqsave(&irq_action_lock, flags);
908 action = *(bucket->pil + irq_action);
910 if (action->flags & SA_SHIRQ)
911 panic("Trying to register fast irq when already shared.\n");
912 if (irqflags & SA_SHIRQ)
913 panic("Trying to register fast irq as shared.\n");
914 printk("request_fast_irq: Trying to register yet already owned.\n");
915 spin_unlock_irqrestore(&irq_action_lock, flags);
920 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
921 * support smp intr affinity in this path.
923 if (irqflags & SA_STATIC_ALLOC) {
924 if (static_irq_count < MAX_STATIC_ALLOC)
925 action = &static_irqaction[static_irq_count++];
927 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
928 "using kmalloc\n", bucket->pil, name);
931 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
934 spin_unlock_irqrestore(&irq_action_lock, flags);
937 install_fast_irq(bucket->pil, handler);
939 bucket->irq_info = action;
940 bucket->flags |= IBF_ACTIVE;
942 action->handler = handler;
943 action->flags = irqflags;
944 action->dev_id = NULL;
947 put_ino_in_irqaction(action, irq);
948 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
950 *(bucket->pil + irq_action) = action;
953 spin_unlock_irqrestore(&irq_action_lock, flags);
961 /* We really don't need these at all on the Sparc. We only have
962 * stubs here because they are exported to modules.
964 unsigned long probe_irq_on(void)
969 EXPORT_SYMBOL(probe_irq_on);
971 int probe_irq_off(unsigned long mask)
976 EXPORT_SYMBOL(probe_irq_off);
979 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
981 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
982 unsigned long imap = bucket->imap;
985 while (!cpu_online(goal_cpu)) {
986 if (++goal_cpu >= NR_CPUS)
990 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
991 tid = goal_cpu << 26;
992 tid &= IMAP_AID_SAFARI;
993 } else if (this_is_starfire == 0) {
994 tid = goal_cpu << 26;
997 tid = (starfire_translate(imap, goal_cpu) << 26);
1000 upa_writel(tid | IMAP_VALID, imap);
1002 while (!cpu_online(goal_cpu)) {
1003 if (++goal_cpu >= NR_CPUS)
1010 /* Called from request_irq. */
1011 static void distribute_irqs(void)
1013 unsigned long flags;
1016 spin_lock_irqsave(&irq_action_lock, flags);
1020 * Skip the timer at [0], and very rare error/power intrs at [15].
1021 * Also level [12], it causes problems on Ex000 systems.
1023 for (level = 1; level < NR_IRQS; level++) {
1024 struct irqaction *p = irq_action[level];
1025 if (level == 12) continue;
1027 cpu = retarget_one_irq(p, cpu);
1031 spin_unlock_irqrestore(&irq_action_lock, flags);
1036 struct sun5_timer *prom_timers;
1037 static u64 prom_limit0, prom_limit1;
1039 static void map_prom_timers(void)
1041 unsigned int addr[3];
1044 /* PROM timer node hangs out in the top level of device siblings... */
1045 tnode = prom_finddevice("/counter-timer");
1047 /* Assume if node is not present, PROM uses different tick mechanism
1048 * which we should not care about.
1050 if (tnode == 0 || tnode == -1) {
1051 prom_timers = (struct sun5_timer *) 0;
1055 /* If PROM is really using this, it must be mapped by him. */
1056 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1058 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1059 prom_timers = (struct sun5_timer *) 0;
1062 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1065 static void kill_prom_timer(void)
1070 /* Save them away for later. */
1071 prom_limit0 = prom_timers->limit0;
1072 prom_limit1 = prom_timers->limit1;
1074 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1075 * We turn both off here just to be paranoid.
1077 prom_timers->limit0 = 0;
1078 prom_timers->limit1 = 0;
1080 /* Wheee, eat the interrupt packet too... */
1081 __asm__ __volatile__(
1083 " ldxa [%%g0] %0, %%g1\n"
1084 " ldxa [%%g2] %1, %%g1\n"
1085 " stxa %%g0, [%%g0] %0\n"
1088 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1092 void enable_prom_timer(void)
1097 /* Set it to whatever was there before. */
1098 prom_timers->limit1 = prom_limit1;
1099 prom_timers->count1 = 0;
1100 prom_timers->limit0 = prom_limit0;
1101 prom_timers->count0 = 0;
1104 void init_irqwork_curcpu(void)
1106 register struct irq_work_struct *workp asm("o2");
1108 int cpu = hard_smp_processor_id();
1110 memset(__irq_work + cpu, 0, sizeof(*workp));
1112 /* Make sure we are called with PSTATE_IE disabled. */
1113 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1115 if (tmp & PSTATE_IE) {
1116 prom_printf("BUG: init_irqwork_curcpu() called with "
1117 "PSTATE_IE enabled, bailing.\n");
1118 __asm__ __volatile__("mov %%i7, %0\n\t"
1120 prom_printf("BUG: Called from %lx\n", tmp);
1124 /* Set interrupt globals. */
1125 workp = &__irq_work[cpu];
1126 __asm__ __volatile__(
1127 "rdpr %%pstate, %0\n\t"
1128 "wrpr %0, %1, %%pstate\n\t"
1130 "wrpr %0, 0x0, %%pstate\n\t"
1132 : "i" (PSTATE_IG), "r" (workp));
1135 /* Only invoked on boot processor. */
1136 void __init init_IRQ(void)
1140 memset(&ivector_table[0], 0, sizeof(ivector_table));
1142 /* We need to clear any IRQ's pending in the soft interrupt
1143 * registers, a spurious one could be left around from the
1144 * PROM timer which we just disabled.
1146 clear_softint(get_softint());
1148 /* Now that ivector table is initialized, it is safe
1149 * to receive IRQ vector traps. We will normally take
1150 * one or two right now, in case some device PROM used
1151 * to boot us wants to speak to us. We just ignore them.
1153 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1154 "or %%g1, %0, %%g1\n\t"
1155 "wrpr %%g1, 0x0, %%pstate"
1161 static struct proc_dir_entry * root_irq_dir;
1162 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1166 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1167 int count, int *eof, void *data)
1169 struct ino_bucket *bp = ivector_table + (long)data;
1170 struct irqaction *ap = bp->irq_info;
1174 mask = get_smpaff_in_irqaction(ap);
1175 if (cpus_empty(mask))
1176 mask = cpu_online_map;
1178 len = cpumask_scnprintf(page, count, mask);
1179 if (count - len < 2)
1181 len += sprintf(page + len, "\n");
1185 static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1187 struct ino_bucket *bp = ivector_table + irq;
1189 /* Users specify affinity in terms of hw cpu ids.
1190 * As soon as we do this, handler_irq() might see and take action.
1192 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1194 /* Migration is simply done by the next cpu to service this
1199 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1200 unsigned long count, void *data)
1202 int irq = (long) data, full_count = count, err;
1203 cpumask_t new_value;
1205 err = cpumask_parse(buffer, count, new_value);
1208 * Do not allow disabling IRQs completely - it's a too easy
1209 * way to make the system unusable accidentally :-) At least
1210 * one online CPU still has to be targeted.
1212 cpus_and(new_value, new_value, cpu_online_map);
1213 if (cpus_empty(new_value))
1216 set_intr_affinity(irq, new_value);
1223 #define MAX_NAMELEN 10
1225 static void register_irq_proc (unsigned int irq)
1227 char name [MAX_NAMELEN];
1229 if (!root_irq_dir || irq_dir[irq])
1232 memset(name, 0, MAX_NAMELEN);
1233 sprintf(name, "%x", irq);
1235 /* create /proc/irq/1234 */
1236 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1239 /* XXX SMP affinity not supported on starfire yet. */
1240 if (this_is_starfire == 0) {
1241 struct proc_dir_entry *entry;
1243 /* create /proc/irq/1234/smp_affinity */
1244 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1248 entry->data = (void *)(long)irq;
1249 entry->read_proc = irq_affinity_read_proc;
1250 entry->write_proc = irq_affinity_write_proc;
1256 void init_irq_proc (void)
1258 /* create /proc/irq */
1259 root_irq_dir = proc_mkdir("irq", NULL);