2 * linux/arch/ia64/kernel/irq.c
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
17 #include <linux/module.h>
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/slab.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h> /* for rand_initialize_irq() */
28 #include <linux/signal.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/threads.h>
32 #include <linux/bitops.h>
34 #include <linux/cpu.h>
37 #include <asm/delay.h>
38 #include <asm/intrinsics.h>
40 #include <asm/hw_irq.h>
41 #include <asm/machvec.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
46 # include <asm/perfmon.h>
51 /* These can be overridden in platform_irq_init */
52 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
53 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
55 /* default base addr of IPI table */
56 void __iomem *ipi_base_addr = ((void __iomem *)
57 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
60 * Legacy IRQ to IA-64 vector translation table.
62 __u8 isa_irq_to_vector_map[16] = {
63 /* 8259 IRQ translation, first 16 entries */
64 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
65 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
67 EXPORT_SYMBOL(isa_irq_to_vector_map);
69 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
72 assign_irq_vector (int irq)
77 if (is_running_on_xen()) {
78 extern int xen_assign_irq_vector(int);
79 return xen_assign_irq_vector(irq);
83 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
84 vector = IA64_FIRST_DEVICE_VECTOR + pos;
85 if (vector > IA64_LAST_DEVICE_VECTOR)
87 if (test_and_set_bit(pos, ia64_vector_mask))
93 free_irq_vector (int vector)
97 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
100 pos = vector - IA64_FIRST_DEVICE_VECTOR;
101 if (!test_and_clear_bit(pos, ia64_vector_mask))
102 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
106 reserve_irq_vector (int vector)
110 if (vector < IA64_FIRST_DEVICE_VECTOR ||
111 vector > IA64_LAST_DEVICE_VECTOR)
114 pos = vector - IA64_FIRST_DEVICE_VECTOR;
115 return test_and_set_bit(pos, ia64_vector_mask);
119 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
121 # define IS_RESCHEDULE(vec) (0)
124 * That's where the IVT branches when we get an external
125 * interrupt. This branches to the correct hardware IRQ handler via
129 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
131 unsigned long saved_tpr;
135 unsigned long bsp, sp;
138 * Note: if the interrupt happened while executing in
139 * the context switch routine (ia64_switch_to), we may
140 * get a spurious stack overflow here. This is
141 * because the register and the memory stack are not
142 * switched atomically.
144 bsp = ia64_getreg(_IA64_REG_AR_BSP);
145 sp = ia64_getreg(_IA64_REG_SP);
147 if ((sp - bsp) < 1024) {
148 static unsigned char count;
149 static long last_time;
151 if (jiffies - last_time > 5*HZ)
155 printk("ia64_handle_irq: DANGER: less than "
156 "1KB of free stack space!!\n"
157 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
161 #endif /* IRQ_DEBUG */
164 * Always set TPR to limit maximum interrupt nesting depth to
165 * 16 (without this, it would be ~240, which could easily lead
166 * to kernel stack overflows).
169 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
171 while (vector != IA64_SPURIOUS_INT_VECTOR) {
172 if (!IS_RESCHEDULE(vector)) {
173 ia64_setreg(_IA64_REG_CR_TPR, vector);
176 __do_IRQ(local_vector_to_irq(vector), regs);
179 * Disable interrupts and send EOI:
182 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
185 vector = ia64_get_ivr();
188 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
189 * handler needs to be able to wait for further keyboard interrupts, which can't
190 * come through until ia64_eoi() has been done.
195 #ifdef CONFIG_HOTPLUG_CPU
197 * This function emulates a interrupt processing when a cpu is about to be
200 void ia64_process_pending_intr(void)
203 unsigned long saved_tpr;
204 extern unsigned int vectors_in_migration[NR_IRQS];
206 vector = ia64_get_ivr();
209 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
213 * Perform normal interrupt style processing
215 while (vector != IA64_SPURIOUS_INT_VECTOR) {
216 if (!IS_RESCHEDULE(vector)) {
217 ia64_setreg(_IA64_REG_CR_TPR, vector);
221 * Now try calling normal ia64_handle_irq as it would have got called
222 * from a real intr handler. Try passing null for pt_regs, hopefully
223 * it will work. I hope it works!.
224 * Probably could shared code.
226 vectors_in_migration[local_vector_to_irq(vector)]=0;
227 __do_IRQ(local_vector_to_irq(vector), NULL);
230 * Disable interrupts and send EOI
233 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
236 vector = ia64_get_ivr();
244 extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
246 static struct irqaction ipi_irqaction = {
247 .handler = handle_IPI,
248 .flags = IRQF_DISABLED,
254 #include <xen/evtchn.h>
255 #include <xen/interface/callback.h>
257 static DEFINE_PER_CPU(int, timer_irq) = -1;
258 static DEFINE_PER_CPU(int, ipi_irq) = -1;
259 static DEFINE_PER_CPU(int, resched_irq) = -1;
260 static char timer_name[NR_CPUS][15];
261 static char ipi_name[NR_CPUS][15];
262 static char resched_name[NR_CPUS][15];
266 struct irqaction *action;
268 /* 16 should be far optimistic value, since only several percpu irqs
269 * are registered early.
271 #define MAX_LATE_IRQ 16
272 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
273 static unsigned short late_irq_cnt = 0;
274 static unsigned short saved_irq_cnt = 0;
275 static int xen_slab_ready = 0;
278 /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
279 * it ends up to issue several memory accesses upon percpu data and
280 * thus adds unnecessary traffic to other paths.
283 handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
289 static struct irqaction resched_irqaction = {
290 .handler = handle_reschedule,
291 .flags = SA_INTERRUPT,
297 * This is xen version percpu irq registration, which needs bind
298 * to xen specific evtchn sub-system. One trick here is that xen
299 * evtchn binding interface depends on kmalloc because related
300 * port needs to be freed at device/cpu down. So we cache the
301 * registration on BSP before slab is ready and then deal them
302 * at later point. For rest instances happening after slab ready,
303 * we hook them to xen evtchn immediately.
305 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
309 xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
311 unsigned int cpu = smp_processor_id();
314 if (xen_slab_ready) {
316 case IA64_TIMER_VECTOR:
317 sprintf(timer_name[cpu], "%s%d", action->name, cpu);
318 ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
319 action->handler, action->flags,
320 timer_name[cpu], action->dev_id);
321 per_cpu(timer_irq,cpu) = ret;
322 printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
324 case IA64_IPI_RESCHEDULE:
325 sprintf(resched_name[cpu], "%s%d", action->name, cpu);
326 ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
327 action->handler, action->flags,
328 resched_name[cpu], action->dev_id);
329 per_cpu(resched_irq,cpu) = ret;
330 printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
332 case IA64_IPI_VECTOR:
333 sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
334 ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
335 action->handler, action->flags,
336 ipi_name[cpu], action->dev_id);
337 per_cpu(ipi_irq,cpu) = ret;
338 printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
340 case IA64_SPURIOUS_INT_VECTOR:
343 printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
349 /* For BSP, we cache registered percpu irqs, and then re-walk
350 * them when initializing APs
353 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
354 saved_percpu_irqs[saved_irq_cnt].irq = irq;
355 saved_percpu_irqs[saved_irq_cnt].action = action;
363 xen_bind_early_percpu_irq (void)
368 /* There's no race when accessing this cached array, since only
369 * BSP will face with such step shortly
371 for (i = 0; i < late_irq_cnt; i++)
372 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
373 saved_percpu_irqs[i].action, 0);
376 /* FIXME: There's no obvious point to check whether slab is ready. So
377 * a hack is used here by utilizing a late time hook.
379 extern void (*late_time_init)(void);
380 extern char xen_event_callback;
381 extern void xen_init_IRQ(void);
383 #ifdef CONFIG_HOTPLUG_CPU
385 unbind_evtchn_callback(struct notifier_block *nfb,
386 unsigned long action, void *hcpu)
388 unsigned int cpu = (unsigned long)hcpu;
390 if (action == CPU_DEAD) {
391 /* Unregister evtchn. */
392 if (per_cpu(ipi_irq,cpu) >= 0) {
393 unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
394 per_cpu(ipi_irq, cpu) = -1;
396 if (per_cpu(resched_irq,cpu) >= 0) {
397 unbind_from_irqhandler (per_cpu(resched_irq, cpu),
399 per_cpu(resched_irq, cpu) = -1;
401 if (per_cpu(timer_irq,cpu) >= 0) {
402 unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
403 per_cpu(timer_irq, cpu) = -1;
409 static struct notifier_block unbind_evtchn_notifier = {
410 .notifier_call = unbind_evtchn_callback,
415 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
416 void xen_smp_intr_init(void)
419 unsigned int cpu = smp_processor_id();
421 struct callback_register event = {
422 .type = CALLBACKTYPE_event,
423 .address = (unsigned long)&xen_event_callback,
427 /* Initialization was already done for boot cpu. */
428 #ifdef CONFIG_HOTPLUG_CPU
429 /* Register the notifier only once. */
430 register_cpu_notifier(&unbind_evtchn_notifier);
435 /* This should be piggyback when setup vcpu guest context */
436 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
438 for (i = 0; i < saved_irq_cnt; i++)
439 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
440 saved_percpu_irqs[i].action, 0);
441 #endif /* CONFIG_SMP */
443 #endif /* CONFIG_XEN */
446 register_percpu_irq (ia64_vector vec, struct irqaction *action)
452 if (is_running_on_xen())
453 return xen_register_percpu_irq(vec, action, 1);
456 for (irq = 0; irq < NR_IRQS; ++irq)
457 if (irq_to_vector(irq) == vec) {
459 if (is_running_on_xen())
460 return xen_register_percpu_irq(vec, action, 1);
462 desc = irq_desc + irq;
463 desc->status |= IRQ_PER_CPU;
464 desc->chip = &irq_type_ia64_lsapic;
466 setup_irq(irq, action);
474 /* Maybe put into platform_irq_init later */
475 if (is_running_on_xen()) {
476 struct callback_register event = {
477 .type = CALLBACKTYPE_event,
478 .address = (unsigned long)&xen_event_callback,
481 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
482 late_time_init = xen_bind_early_percpu_irq;
484 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
485 #endif /* CONFIG_SMP */
487 #endif /* CONFIG_XEN */
488 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
490 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
492 #ifdef CONFIG_PERFMON
499 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
501 void __iomem *ipi_addr;
502 unsigned long ipi_data;
503 unsigned long phys_cpu_id;
506 if (is_running_on_xen()) {
510 /* TODO: we need to call vcpu_up here */
511 if (unlikely(vector == ap_wakeup_vector)) {
512 extern void xen_send_ipi (int cpu, int vec);
513 xen_send_ipi (cpu, vector);
514 //vcpu_prepare_and_up(cpu);
520 case IA64_IPI_VECTOR:
521 irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
523 case IA64_IPI_RESCHEDULE:
524 irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
527 printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
533 notify_remote_via_irq(irq);
536 #endif /* CONFIG_XEN */
539 phys_cpu_id = cpu_physical_id(cpu);
541 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
545 * cpu number is in 8bit ID and 8bit EID
548 ipi_data = (delivery_mode << 8) | (vector & 0xff);
549 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
551 writeq(ipi_data, ipi_addr);