2 * linux/arch/sh/kernel/irq.c
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
7 * SuperH version: Copyright (C) 1999 Niibe Yutaka
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
15 #include <linux/irq.h>
16 #include <linux/vs_context.h>
17 #include <asm/processor.h>
18 #include <asm/uaccess.h>
19 #include <asm/thread_info.h>
20 #include <asm/cpu/mmu_context.h>
22 atomic_t irq_err_count;
25 * 'what should we do if we get a hw irq event on an illegal vector'.
26 * each architecture has to answer this themselves, it doesn't deserve
27 * a generic callback i think.
29 void ack_bad_irq(unsigned int irq)
31 atomic_inc(&irq_err_count);
32 printk("unexpected IRQ trap at vector %02x\n", irq);
35 #if defined(CONFIG_PROC_FS)
36 int show_interrupts(struct seq_file *p, void *v)
38 int i = *(loff_t *) v, j;
39 struct irqaction * action;
44 for_each_online_cpu(j)
45 seq_printf(p, "CPU%d ",j);
50 spin_lock_irqsave(&irq_desc[i].lock, flags);
51 action = irq_desc[i].action;
54 seq_printf(p, "%3d: ",i);
55 for_each_online_cpu(j)
56 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
57 seq_printf(p, " %14s", irq_desc[i].chip->name);
58 seq_printf(p, "-%-8s", irq_desc[i].name);
59 seq_printf(p, " %s", action->name);
61 for (action=action->next; action; action = action->next)
62 seq_printf(p, ", %s", action->name);
65 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
66 } else if (i == NR_IRQS)
67 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
73 #ifdef CONFIG_4KSTACKS
75 * per-CPU IRQ handling contexts (thread information and stack)
78 struct thread_info tinfo;
79 u32 stack[THREAD_SIZE/sizeof(u32)];
82 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
83 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
86 asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
87 unsigned long r6, unsigned long r7,
88 struct pt_regs __regs)
90 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
91 struct pt_regs *old_regs = set_irq_regs(regs);
93 #ifdef CONFIG_4KSTACKS
94 union irq_ctx *curctx, *irqctx;
99 #ifdef CONFIG_DEBUG_STACKOVERFLOW
100 /* Debugging check for stack overflow: is there less than 1KB free? */
104 __asm__ __volatile__ ("and r15, %0" :
105 "=r" (sp) : "0" (THREAD_SIZE - 1));
107 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
108 printk("do_IRQ: stack overflow: %ld\n",
109 sp - sizeof(struct thread_info));
115 #ifdef CONFIG_CPU_HAS_INTEVT
116 irq = evt2irq(ctrl_inl(INTEVT));
121 irq = irq_demux(irq);
123 #ifdef CONFIG_4KSTACKS
124 curctx = (union irq_ctx *)current_thread_info();
125 irqctx = hardirq_ctx[smp_processor_id()];
128 * this is where we switch to the IRQ stack. However, if we are
129 * already using the IRQ stack (because we interrupted a hardirq
130 * handler) we can't do that and just have to keep using the
131 * current stack (which is the irq stack already after all)
133 if (curctx != irqctx) {
136 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
137 irqctx->tinfo.task = curctx->tinfo.task;
138 irqctx->tinfo.previous_sp = current_stack_pointer;
141 * Copy the softirq bits in preempt_count so that the
142 * softirq checks work in the hardirq context.
144 irqctx->tinfo.preempt_count =
145 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
146 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
148 __asm__ __volatile__ (
152 /* swith to the irq stack */
154 /* restore the stack (ring zero) */
157 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
158 : "memory", "r0", "r1", "r2", "r3", "r4",
159 "r5", "r6", "r7", "r8", "t", "pr"
163 generic_handle_irq(irq);
167 set_irq_regs(old_regs);
171 #ifdef CONFIG_4KSTACKS
173 * These should really be __section__(".bss.page_aligned") as well, but
174 * gcc's 3.0 and earlier don't handle that correctly.
176 static char softirq_stack[NR_CPUS * THREAD_SIZE]
177 __attribute__((__aligned__(THREAD_SIZE)));
179 static char hardirq_stack[NR_CPUS * THREAD_SIZE]
180 __attribute__((__aligned__(THREAD_SIZE)));
183 * allocate per-cpu stacks for hardirq and for softirq processing
185 void irq_ctx_init(int cpu)
187 union irq_ctx *irqctx;
189 if (hardirq_ctx[cpu])
192 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
193 irqctx->tinfo.task = NULL;
194 irqctx->tinfo.exec_domain = NULL;
195 irqctx->tinfo.cpu = cpu;
196 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
197 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
199 hardirq_ctx[cpu] = irqctx;
201 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
202 irqctx->tinfo.task = NULL;
203 irqctx->tinfo.exec_domain = NULL;
204 irqctx->tinfo.cpu = cpu;
205 irqctx->tinfo.preempt_count = 0;
206 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
208 softirq_ctx[cpu] = irqctx;
210 printk("CPU %u irqstacks, hard=%p soft=%p\n",
211 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
214 void irq_ctx_exit(int cpu)
216 hardirq_ctx[cpu] = NULL;
219 extern asmlinkage void __do_softirq(void);
221 asmlinkage void do_softirq(void)
224 struct thread_info *curctx;
225 union irq_ctx *irqctx;
231 local_irq_save(flags);
233 if (local_softirq_pending()) {
234 curctx = current_thread_info();
235 irqctx = softirq_ctx[smp_processor_id()];
236 irqctx->tinfo.task = curctx->task;
237 irqctx->tinfo.previous_sp = current_stack_pointer;
239 /* build the stack frame on the softirq stack */
240 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
242 __asm__ __volatile__ (
245 /* switch to the softirq stack */
247 /* restore the thread stack */
250 : "r" (__do_softirq), "r" (isp)
251 : "memory", "r0", "r1", "r2", "r3", "r4",
252 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
256 * Shouldnt happen, we returned above if in_interrupt():
258 WARN_ON_ONCE(softirq_count());
261 local_irq_restore(flags);
263 EXPORT_SYMBOL(do_softirq);
266 void __init init_IRQ(void)
268 #ifdef CONFIG_CPU_HAS_PINT_IRQ
272 #ifdef CONFIG_CPU_HAS_INTC2_IRQ
276 #ifdef CONFIG_CPU_HAS_IPR_IRQ
280 /* Perform the machine specific initialisation */
281 if (sh_mv.mv_init_irq)
284 irq_ctx_init(smp_processor_id());