1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ptrace.h>
13 #include <linux/errno.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/signal.h>
17 #include <linux/interrupt.h>
18 #include <linux/slab.h>
19 #include <linux/random.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/proc_fs.h>
23 #include <linux/seq_file.h>
25 #include <asm/ptrace.h>
26 #include <asm/processor.h>
27 #include <asm/atomic.h>
28 #include <asm/system.h>
31 #include <asm/iommu.h>
33 #include <asm/oplib.h>
34 #include <asm/timer.h>
36 #include <asm/hardirq.h>
37 #include <asm/starfire.h>
38 #include <asm/uaccess.h>
39 #include <asm/cache.h>
40 #include <asm/cpudata.h>
43 static void distribute_irqs(void);
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
59 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
61 /* This has to be in the main kernel image, it cannot be
62 * turned into per-cpu data. The reason is that the main
63 * kernel image is locked into the TLB and this structure
64 * is accessed from the vectored interrupt trap handler. If
65 * access to this structure takes a TLB miss it could cause
66 * the 5-level sparc v9 trap stack to overflow.
68 struct irq_work_struct {
69 unsigned int irq_worklists[16];
71 struct irq_work_struct __irq_work[NR_CPUS];
72 #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
75 /* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
82 unsigned long pci_dma_wsync;
83 unsigned long dma_sync_reg_table[256];
84 unsigned char dma_sync_reg_table_entry = 0;
87 /* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
90 #define MAX_STATIC_ALLOC 4
91 static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92 static int static_irq_count;
94 /* This is exported so that fast IRQ handlers can get at it... -DaveM */
95 struct irqaction *irq_action[NR_IRQS+1] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
100 /* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to
102 * read things in the table. IRQ handler processing orders
103 * its' accesses such that no locking is needed.
105 static spinlock_t irq_action_lock = SPIN_LOCK_UNLOCKED;
107 static void register_irq_proc (unsigned int irq);
110 * Upper 2b of irqaction->flags holds the ino.
111 * irqaction->mask holds the smp affinity information.
113 #define put_ino_in_irqaction(action, irq) \
114 action->flags &= 0xffffffffffffUL; \
115 if (__bucket(irq) == &pil0_dummy_bucket) \
116 action->flags |= 0xdeadUL << 48; \
118 action->flags |= __irq_ino(irq) << 48;
119 #define get_ino_in_irqaction(action) (action->flags >> 48)
122 #error irqaction embedded smp affinity does not work with > 64 cpus, FIXME
125 #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
126 #define get_smpaff_in_irqaction(action) ((action)->mask)
128 int show_interrupts(struct seq_file *p, void *v)
131 int i = *(loff_t *) v;
132 struct irqaction *action;
137 spin_lock_irqsave(&irq_action_lock, flags);
139 if (!(action = *(i + irq_action)))
141 seq_printf(p, "%3d: ", i);
143 seq_printf(p, "%10u ", kstat_irqs(i));
145 for (j = 0; j < NR_CPUS; j++) {
148 seq_printf(p, "%10u ",
149 kstat_cpu(j).irqs[i]);
152 seq_printf(p, " %s:%lx", action->name,
153 get_ino_in_irqaction(action));
154 for (action = action->next; action; action = action->next) {
155 seq_printf(p, ", %s:%lx", action->name,
156 get_ino_in_irqaction(action));
161 spin_unlock_irqrestore(&irq_action_lock, flags);
166 /* Now these are always passed a true fully specified sun4u INO. */
167 void enable_irq(unsigned int irq)
169 struct ino_bucket *bucket = __bucket(irq);
179 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
182 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
183 if ((ver >> 32) == 0x003e0016) {
184 /* We set it to our JBUS ID. */
185 __asm__ __volatile__("ldxa [%%g0] %1, %0"
187 : "i" (ASI_JBUS_CONFIG));
188 tid = ((tid & (0x1fUL<<17)) << 9);
189 tid &= IMAP_TID_JBUS;
191 /* We set it to our Safari AID. */
192 __asm__ __volatile__("ldxa [%%g0] %1, %0"
194 : "i" (ASI_SAFARI_CONFIG));
195 tid = ((tid & (0x3ffUL<<17)) << 9);
196 tid &= IMAP_AID_SAFARI;
198 } else if (this_is_starfire == 0) {
199 /* We set it to our UPA MID. */
200 __asm__ __volatile__("ldxa [%%g0] %1, %0"
202 : "i" (ASI_UPA_CONFIG));
203 tid = ((tid & UPA_CONFIG_MID) << 9);
206 tid = (starfire_translate(imap, smp_processor_id()) << 26);
210 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
211 * of this SYSIO's preconfigured IGN in the SYSIO Control
212 * Register, the hardware just mirrors that value here.
213 * However for Graphics and UPA Slave devices the full
214 * IMAP_INR field can be set by the programmer here.
216 * Things like FFB can now be handled via the new IRQ mechanism.
218 upa_writel(tid | IMAP_VALID, imap);
223 /* This now gets passed true ino's as well. */
224 void disable_irq(unsigned int irq)
226 struct ino_bucket *bucket = __bucket(irq);
233 /* NOTE: We do not want to futz with the IRQ clear registers
234 * and move the state to IDLE, the SCSI code does call
235 * disable_irq() to assure atomicity in the queue cmd
236 * SCSI adapter driver code. Thus we'd lose interrupts.
238 tmp = upa_readl(imap);
240 upa_writel(tmp, imap);
244 /* The timer is the one "weird" interrupt which is generated by
245 * the CPU %tick register and not by some normal vectored interrupt
246 * source. To handle this special case, we use this dummy INO bucket.
248 static struct ino_bucket pil0_dummy_bucket = {
259 unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
261 struct ino_bucket *bucket;
265 if (iclr != 0UL || imap != 0UL) {
266 prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
270 return __irq(&pil0_dummy_bucket);
273 /* RULE: Both must be specified in all other cases. */
274 if (iclr == 0UL || imap == 0UL) {
275 prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
276 pil, inofixup, iclr, imap);
280 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
281 if (ino > NUM_IVECS) {
282 prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
283 ino, pil, inofixup, iclr, imap);
287 /* Ok, looks good, set it up. Don't touch the irq_chain or
290 bucket = &ivector_table[ino];
291 if ((bucket->flags & IBF_ACTIVE) ||
292 (bucket->irq_info != NULL)) {
293 /* This is a gross fatal error if it happens here. */
294 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");
295 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",
296 ino, pil, inofixup, iclr, imap);
297 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",
298 bucket->pil, bucket->iclr, bucket->imap);
299 prom_printf("IRQ: Cannot continue, halting...\n");
307 bucket->irq_info = NULL;
309 return __irq(bucket);
312 static void atomic_bucket_insert(struct ino_bucket *bucket)
314 unsigned long pstate;
317 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
318 __asm__ __volatile__("wrpr %0, %1, %%pstate"
319 : : "r" (pstate), "i" (PSTATE_IE));
320 ent = irq_work(smp_processor_id(), bucket->pil);
321 bucket->irq_chain = *ent;
322 *ent = __irq(bucket);
323 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
326 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
327 unsigned long irqflags, const char *name, void *dev_id)
329 struct irqaction *action, *tmp = NULL;
330 struct ino_bucket *bucket = __bucket(irq);
334 if ((bucket != &pil0_dummy_bucket) &&
335 (bucket < &ivector_table[0] ||
336 bucket >= &ivector_table[NUM_IVECS])) {
337 unsigned int *caller;
339 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
340 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
341 "from %p, irq %08x.\n", caller, irq);
347 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
349 * This function might sleep, we want to call it first,
350 * outside of the atomic block. In SA_STATIC_ALLOC case,
351 * random driver's kmalloc will fail, but it is safe.
352 * If already initialized, random driver will not reinit.
353 * Yes, this might clear the entropy pool if the wrong
354 * driver is attempted to be loaded, without actually
355 * installing a new handler, but is this really a problem,
356 * only the sysadmin is able to do this.
358 rand_initialize_irq(irq);
361 spin_lock_irqsave(&irq_action_lock, flags);
363 action = *(bucket->pil + irq_action);
365 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
366 for (tmp = action; tmp->next; tmp = tmp->next)
369 spin_unlock_irqrestore(&irq_action_lock, flags);
372 action = NULL; /* Or else! */
375 /* If this is flagged as statically allocated then we use our
376 * private struct which is never freed.
378 if (irqflags & SA_STATIC_ALLOC) {
379 if (static_irq_count < MAX_STATIC_ALLOC)
380 action = &static_irqaction[static_irq_count++];
382 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
383 "using kmalloc\n", irq, name);
386 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
390 spin_unlock_irqrestore(&irq_action_lock, flags);
394 if (bucket == &pil0_dummy_bucket) {
395 bucket->irq_info = action;
396 bucket->flags |= IBF_ACTIVE;
398 if ((bucket->flags & IBF_ACTIVE) != 0) {
399 void *orig = bucket->irq_info;
400 void **vector = NULL;
402 if ((bucket->flags & IBF_PCI) == 0) {
403 printk("IRQ: Trying to share non-PCI bucket.\n");
406 if ((bucket->flags & IBF_MULTI) == 0) {
407 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
409 goto free_and_enomem;
411 /* We might have slept. */
412 if ((bucket->flags & IBF_MULTI) != 0) {
416 vector = (void **)bucket->irq_info;
417 for(ent = 0; ent < 4; ent++) {
418 if (vector[ent] == NULL) {
419 vector[ent] = action;
430 bucket->irq_info = vector;
431 bucket->flags |= IBF_MULTI;
436 vector = (void **)orig;
437 for (ent = 0; ent < 4; ent++) {
438 if (vector[ent] == NULL) {
439 vector[ent] = action;
447 bucket->irq_info = action;
448 bucket->flags |= IBF_ACTIVE;
450 pending = bucket->pending;
455 action->handler = handler;
456 action->flags = irqflags;
459 action->dev_id = dev_id;
460 put_ino_in_irqaction(action, irq);
461 put_smpaff_in_irqaction(action, 0);
466 *(bucket->pil + irq_action) = action;
470 /* We ate the IVEC already, this makes sure it does not get lost. */
472 atomic_bucket_insert(bucket);
473 set_softint(1 << bucket->pil);
475 spin_unlock_irqrestore(&irq_action_lock, flags);
476 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))
477 register_irq_proc(__irq_ino(irq));
486 spin_unlock_irqrestore(&irq_action_lock, flags);
491 spin_unlock_irqrestore(&irq_action_lock, flags);
495 EXPORT_SYMBOL(request_irq);
497 void free_irq(unsigned int irq, void *dev_id)
499 struct irqaction *action;
500 struct irqaction *tmp = NULL;
502 struct ino_bucket *bucket = __bucket(irq), *bp;
504 if ((bucket != &pil0_dummy_bucket) &&
505 (bucket < &ivector_table[0] ||
506 bucket >= &ivector_table[NUM_IVECS])) {
507 unsigned int *caller;
509 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
510 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "
511 "from %p, irq %08x.\n", caller, irq);
515 spin_lock_irqsave(&irq_action_lock, flags);
517 action = *(bucket->pil + irq_action);
518 if (!action->handler) {
519 printk("Freeing free IRQ %d\n", bucket->pil);
523 for ( ; action; action = action->next) {
524 if (action->dev_id == dev_id)
529 printk("Trying to free free shared IRQ %d\n", bucket->pil);
530 spin_unlock_irqrestore(&irq_action_lock, flags);
533 } else if (action->flags & SA_SHIRQ) {
534 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
535 spin_unlock_irqrestore(&irq_action_lock, flags);
539 if (action->flags & SA_STATIC_ALLOC) {
540 printk("Attempt to free statically allocated IRQ %d (%s)\n",
541 bucket->pil, action->name);
542 spin_unlock_irqrestore(&irq_action_lock, flags);
547 tmp->next = action->next;
549 *(bucket->pil + irq_action) = action->next;
551 spin_unlock_irqrestore(&irq_action_lock, flags);
553 synchronize_irq(irq);
555 spin_lock_irqsave(&irq_action_lock, flags);
557 if (bucket != &pil0_dummy_bucket) {
558 unsigned long imap = bucket->imap;
559 void **vector, *orig;
562 orig = bucket->irq_info;
563 vector = (void **)orig;
565 if ((bucket->flags & IBF_MULTI) != 0) {
568 for (ent = 0; ent < 4; ent++) {
569 if (vector[ent] == action)
571 else if (vector[ent] != NULL) {
572 orphan = vector[ent];
577 /* Only free when no other shared irq
582 /* Convert back to non-shared bucket. */
583 bucket->irq_info = orphan;
584 bucket->flags &= ~(IBF_MULTI);
590 bucket->irq_info = NULL;
593 /* This unique interrupt source is now inactive. */
594 bucket->flags &= ~IBF_ACTIVE;
596 /* See if any other buckets share this bucket's IMAP
597 * and are still active.
599 for (ent = 0; ent < NUM_IVECS; ent++) {
600 bp = &ivector_table[ent];
603 (bp->flags & IBF_ACTIVE) != 0)
607 /* Only disable when no other sub-irq levels of
608 * the same IMAP are active.
610 if (ent == NUM_IVECS)
616 spin_unlock_irqrestore(&irq_action_lock, flags);
619 EXPORT_SYMBOL(free_irq);
622 void synchronize_irq(unsigned int irq)
624 struct ino_bucket *bucket = __bucket(irq);
627 /* The following is how I wish I could implement this.
628 * Unfortunately the ICLR registers are read-only, you can
629 * only write ICLR_foo values to them. To get the current
630 * IRQ status you would need to get at the IRQ diag registers
631 * in the PCI/SBUS controller and the layout of those vary
632 * from one controller to the next, sigh... -DaveM
634 unsigned long iclr = bucket->iclr;
637 u32 tmp = upa_readl(iclr);
639 if (tmp == ICLR_TRANSMIT ||
640 tmp == ICLR_PENDING) {
647 /* So we have to do this with a INPROGRESS bit just like x86. */
648 while (bucket->flags & IBF_INPROGRESS)
652 #endif /* CONFIG_SMP */
654 void catch_disabled_ivec(struct pt_regs *regs)
656 int cpu = smp_processor_id();
657 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
659 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
660 * to other devices. Here a single IMAP enabled potentially multiple
661 * unique interrupt sources (which each do have a unique ICLR register.
663 * So what we do is just register that the IVEC arrived, when registered
664 * for real the request_irq() code will check the bit and signal
665 * a local CPU interrupt for it.
668 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
669 bucket - &ivector_table[0], regs->tpc);
671 *irq_work(cpu, 0) = 0;
676 #define FORWARD_VOLUME 12
680 static inline void redirect_intr(int cpu, struct ino_bucket *bp)
682 /* Ok, here is what is going on:
683 * 1) Retargeting IRQs on Starfire is very
684 * expensive so just forget about it on them.
685 * 2) Moving around very high priority interrupts
687 * 3) If the current cpu is idle, interrupts are
688 * useful work, so keep them here. But do not
689 * pass to our neighbour if he is not very idle.
690 * 4) If sysadmin explicitly asks for directed intrs,
693 struct irqaction *ap = bp->irq_info;
694 cpumask_t cpu_mask = get_smpaff_in_irqaction(ap);
695 unsigned int buddy, ticks;
697 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
698 if (cpus_empty(cpu_mask))
699 cpu_mask = cpu_online_map;
701 if (this_is_starfire != 0 ||
702 bp->pil >= 10 || current->pid == 0)
705 /* 'cpu' is the MID (ie. UPAID), calculate the MID
709 if (buddy >= NR_CPUS)
713 while (!cpu_isset(buddy, cpu_mask)) {
714 if (++buddy >= NR_CPUS)
716 if (++ticks > NR_CPUS) {
717 put_smpaff_in_irqaction(ap, 0);
725 /* Voo-doo programming. */
726 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME)
729 /* This just so happens to be correct on Cheetah
734 /* Push it to our buddy. */
735 upa_writel(buddy | IMAP_VALID, bp->imap);
743 void handler_irq(int irq, struct pt_regs *regs)
745 struct ino_bucket *bp, *nbp;
746 int cpu = smp_processor_id();
750 * Check for TICK_INT on level 14 softint.
753 unsigned long clr_mask = 1 << irq;
754 unsigned long tick_mask = tick_ops->softint_mask;
756 if ((irq == 14) && (get_softint() & tick_mask)) {
758 clr_mask = tick_mask;
760 clear_softint(clr_mask);
763 int should_forward = 1;
765 clear_softint(1 << irq);
769 kstat_this_cpu.irqs[irq]++;
774 __bucket(xchg32(irq_work(cpu, irq), 0)) :
777 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
779 for ( ; bp != NULL; bp = nbp) {
780 unsigned char flags = bp->flags;
781 unsigned char random = 0;
783 nbp = __bucket(bp->irq_chain);
786 bp->flags |= IBF_INPROGRESS;
788 if ((flags & IBF_ACTIVE) != 0) {
790 if ((flags & IBF_DMA_SYNC) != 0) {
791 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
792 upa_readq(pci_dma_wsync);
795 if ((flags & IBF_MULTI) == 0) {
796 struct irqaction *ap = bp->irq_info;
797 ap->handler(__irq(bp), ap->dev_id, regs);
798 random |= ap->flags & SA_SAMPLE_RANDOM;
800 void **vector = (void **)bp->irq_info;
802 for (ent = 0; ent < 4; ent++) {
803 struct irqaction *ap = vector[ent];
805 ap->handler(__irq(bp), ap->dev_id, regs);
806 random |= ap->flags & SA_SAMPLE_RANDOM;
810 /* Only the dummy bucket lacks IMAP/ICLR. */
813 if (should_forward) {
814 redirect_intr(cpu, bp);
818 upa_writel(ICLR_IDLE, bp->iclr);
819 /* Test and add entropy */
821 add_interrupt_randomness(irq);
826 bp->flags &= ~IBF_INPROGRESS;
831 #ifdef CONFIG_BLK_DEV_FD
832 extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
834 void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
836 struct irqaction *action = *(irq + irq_action);
837 struct ino_bucket *bucket;
838 int cpu = smp_processor_id();
841 kstat_this_cpu.irqs[irq]++;
843 *(irq_work(cpu, irq)) = 0;
844 bucket = get_ino_in_irqaction(action) + ivector_table;
846 bucket->flags |= IBF_INPROGRESS;
848 floppy_interrupt(irq, dev_cookie, regs);
849 upa_writel(ICLR_IDLE, bucket->iclr);
851 bucket->flags &= ~IBF_INPROGRESS;
857 /* The following assumes that the branch lies before the place we
858 * are branching to. This is the case for a trap vector...
859 * You have been warned.
861 #define SPARC_BRANCH(dest_addr, inst_addr) \
862 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
864 #define SPARC_NOP (0x01000000)
866 static void install_fast_irq(unsigned int cpu_irq,
867 irqreturn_t (*handler)(int, void *, struct pt_regs *))
869 extern unsigned long sparc64_ttable_tl0;
870 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
874 ttent += (cpu_irq - 1) << 5;
875 insns = (unsigned int *) ttent;
876 insns[0] = SPARC_BRANCH(((unsigned long) handler),
877 ((unsigned long)&insns[0]));
878 insns[1] = SPARC_NOP;
879 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
882 int request_fast_irq(unsigned int irq,
883 irqreturn_t (*handler)(int, void *, struct pt_regs *),
884 unsigned long irqflags, const char *name, void *dev_id)
886 struct irqaction *action;
887 struct ino_bucket *bucket = __bucket(irq);
890 /* No pil0 dummy buckets allowed here. */
891 if (bucket < &ivector_table[0] ||
892 bucket >= &ivector_table[NUM_IVECS]) {
893 unsigned int *caller;
895 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
896 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "
897 "from %p, irq %08x.\n", caller, irq);
904 if ((bucket->pil == 0) || (bucket->pil == 14)) {
905 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");
909 spin_lock_irqsave(&irq_action_lock, flags);
911 action = *(bucket->pil + irq_action);
913 if (action->flags & SA_SHIRQ)
914 panic("Trying to register fast irq when already shared.\n");
915 if (irqflags & SA_SHIRQ)
916 panic("Trying to register fast irq as shared.\n");
917 printk("request_fast_irq: Trying to register yet already owned.\n");
918 spin_unlock_irqrestore(&irq_action_lock, flags);
923 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
924 * support smp intr affinity in this path.
926 if (irqflags & SA_STATIC_ALLOC) {
927 if (static_irq_count < MAX_STATIC_ALLOC)
928 action = &static_irqaction[static_irq_count++];
930 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
931 "using kmalloc\n", bucket->pil, name);
934 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
937 spin_unlock_irqrestore(&irq_action_lock, flags);
940 install_fast_irq(bucket->pil, handler);
942 bucket->irq_info = action;
943 bucket->flags |= IBF_ACTIVE;
945 action->handler = handler;
946 action->flags = irqflags;
947 action->dev_id = NULL;
950 put_ino_in_irqaction(action, irq);
951 put_smpaff_in_irqaction(action, 0);
953 *(bucket->pil + irq_action) = action;
956 spin_unlock_irqrestore(&irq_action_lock, flags);
964 /* We really don't need these at all on the Sparc. We only have
965 * stubs here because they are exported to modules.
967 unsigned long probe_irq_on(void)
972 EXPORT_SYMBOL(probe_irq_on);
974 int probe_irq_off(unsigned long mask)
979 EXPORT_SYMBOL(probe_irq_off);
982 static int retarget_one_irq(struct irqaction *p, int goal_cpu)
984 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
985 unsigned long imap = bucket->imap;
988 while (!cpu_online(goal_cpu)) {
989 if (++goal_cpu >= NR_CPUS)
993 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
994 tid = goal_cpu << 26;
995 tid &= IMAP_AID_SAFARI;
996 } else if (this_is_starfire == 0) {
997 tid = goal_cpu << 26;
1000 tid = (starfire_translate(imap, goal_cpu) << 26);
1001 tid &= IMAP_TID_UPA;
1003 upa_writel(tid | IMAP_VALID, imap);
1005 while (!cpu_online(goal_cpu)) {
1006 if (++goal_cpu >= NR_CPUS)
1013 /* Called from request_irq. */
1014 static void distribute_irqs(void)
1016 unsigned long flags;
1019 spin_lock_irqsave(&irq_action_lock, flags);
1023 * Skip the timer at [0], and very rare error/power intrs at [15].
1024 * Also level [12], it causes problems on Ex000 systems.
1026 for (level = 1; level < NR_IRQS; level++) {
1027 struct irqaction *p = irq_action[level];
1028 if (level == 12) continue;
1030 cpu = retarget_one_irq(p, cpu);
1034 spin_unlock_irqrestore(&irq_action_lock, flags);
1039 struct sun5_timer *prom_timers;
1040 static u64 prom_limit0, prom_limit1;
1042 static void map_prom_timers(void)
1044 unsigned int addr[3];
1047 /* PROM timer node hangs out in the top level of device siblings... */
1048 tnode = prom_finddevice("/counter-timer");
1050 /* Assume if node is not present, PROM uses different tick mechanism
1051 * which we should not care about.
1053 if (tnode == 0 || tnode == -1) {
1054 prom_timers = (struct sun5_timer *) 0;
1058 /* If PROM is really using this, it must be mapped by him. */
1059 err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
1061 prom_printf("PROM does not have timer mapped, trying to continue.\n");
1062 prom_timers = (struct sun5_timer *) 0;
1065 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
1068 static void kill_prom_timer(void)
1073 /* Save them away for later. */
1074 prom_limit0 = prom_timers->limit0;
1075 prom_limit1 = prom_timers->limit1;
1077 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
1078 * We turn both off here just to be paranoid.
1080 prom_timers->limit0 = 0;
1081 prom_timers->limit1 = 0;
1083 /* Wheee, eat the interrupt packet too... */
1084 __asm__ __volatile__(
1086 " ldxa [%%g0] %0, %%g1\n"
1087 " ldxa [%%g2] %1, %%g1\n"
1088 " stxa %%g0, [%%g0] %0\n"
1091 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
1095 void enable_prom_timer(void)
1100 /* Set it to whatever was there before. */
1101 prom_timers->limit1 = prom_limit1;
1102 prom_timers->count1 = 0;
1103 prom_timers->limit0 = prom_limit0;
1104 prom_timers->count0 = 0;
1107 void init_irqwork_curcpu(void)
1109 register struct irq_work_struct *workp asm("o2");
1111 int cpu = hard_smp_processor_id();
1113 memset(__irq_work + cpu, 0, sizeof(*workp));
1115 /* Make sure we are called with PSTATE_IE disabled. */
1116 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1118 if (tmp & PSTATE_IE) {
1119 prom_printf("BUG: init_irqwork_curcpu() called with "
1120 "PSTATE_IE enabled, bailing.\n");
1121 __asm__ __volatile__("mov %%i7, %0\n\t"
1123 prom_printf("BUG: Called from %lx\n", tmp);
1127 /* Set interrupt globals. */
1128 workp = &__irq_work[cpu];
1129 __asm__ __volatile__(
1130 "rdpr %%pstate, %0\n\t"
1131 "wrpr %0, %1, %%pstate\n\t"
1133 "wrpr %0, 0x0, %%pstate\n\t"
1135 : "i" (PSTATE_IG), "r" (workp));
1138 /* Only invoked on boot processor. */
1139 void __init init_IRQ(void)
1143 memset(&ivector_table[0], 0, sizeof(ivector_table));
1145 /* We need to clear any IRQ's pending in the soft interrupt
1146 * registers, a spurious one could be left around from the
1147 * PROM timer which we just disabled.
1149 clear_softint(get_softint());
1151 /* Now that ivector table is initialized, it is safe
1152 * to receive IRQ vector traps. We will normally take
1153 * one or two right now, in case some device PROM used
1154 * to boot us wants to speak to us. We just ignore them.
1156 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
1157 "or %%g1, %0, %%g1\n\t"
1158 "wrpr %%g1, 0x0, %%pstate"
1164 static struct proc_dir_entry * root_irq_dir;
1165 static struct proc_dir_entry * irq_dir [NUM_IVECS];
1169 #define HEX_DIGITS 16
1171 static unsigned int parse_hex_value (const char __user *buffer,
1172 unsigned long count, unsigned long *ret)
1174 unsigned char hexnum [HEX_DIGITS];
1175 unsigned long value;
1180 if (count > HEX_DIGITS)
1182 if (copy_from_user(hexnum, buffer, count))
1186 * Parse the first 8 characters as a hex string, any non-hex char
1187 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
1191 for (i = 0; i < count; i++) {
1192 unsigned int c = hexnum[i];
1195 case '0' ... '9': c -= '0'; break;
1196 case 'a' ... 'f': c -= 'a'-10; break;
1197 case 'A' ... 'F': c -= 'A'-10; break;
1201 value = (value << 4) | c;
1208 static int irq_affinity_read_proc (char *page, char **start, off_t off,
1209 int count, int *eof, void *data)
1211 struct ino_bucket *bp = ivector_table + (long)data;
1212 struct irqaction *ap = bp->irq_info;
1213 cpumask_t mask = get_smpaff_in_irqaction(ap);
1216 if (cpus_empty(mask))
1217 mask = cpu_online_map;
1219 len = cpumask_scnprintf(page, count, mask);
1220 if (count - len < 2)
1222 len += sprintf(page + len, "\n");
1226 static inline void set_intr_affinity(int irq, unsigned long hw_aff)
1228 struct ino_bucket *bp = ivector_table + irq;
1230 /* Users specify affinity in terms of hw cpu ids.
1231 * As soon as we do this, handler_irq() might see and take action.
1233 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);
1235 /* Migration is simply done by the next cpu to service this
1240 static int irq_affinity_write_proc (struct file *file, const char __user *buffer,
1241 unsigned long count, void *data)
1243 int irq = (long) data, full_count = count, err;
1244 unsigned long new_value, i;
1246 err = parse_hex_value(buffer, count, &new_value);
1249 * Do not allow disabling IRQs completely - it's a too easy
1250 * way to make the system unusable accidentally :-) At least
1251 * one online CPU still has to be targeted.
1253 for (i = 0; i < NR_CPUS; i++) {
1254 if ((new_value & (1UL << i)) != 0 &&
1256 new_value &= ~(1UL << i);
1262 set_intr_affinity(irq, new_value);
1269 #define MAX_NAMELEN 10
1271 static void register_irq_proc (unsigned int irq)
1273 char name [MAX_NAMELEN];
1275 if (!root_irq_dir || irq_dir[irq])
1278 memset(name, 0, MAX_NAMELEN);
1279 sprintf(name, "%x", irq);
1281 /* create /proc/irq/1234 */
1282 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
1285 /* XXX SMP affinity not supported on starfire yet. */
1286 if (this_is_starfire == 0) {
1287 struct proc_dir_entry *entry;
1289 /* create /proc/irq/1234/smp_affinity */
1290 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1294 entry->data = (void *)(long)irq;
1295 entry->read_proc = irq_affinity_read_proc;
1296 entry->write_proc = irq_affinity_write_proc;
1302 void init_irq_proc (void)
1304 /* create /proc/irq */
1305 root_irq_dir = proc_mkdir("irq", 0);