2 * linux/arch/ia64/kernel/irq_ia64.c
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
17 #include <linux/module.h>
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/slab.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h> /* for rand_initialize_irq() */
28 #include <linux/signal.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/threads.h>
32 #include <linux/bitops.h>
33 #include <linux/irq.h>
35 #include <linux/cpu.h>
39 #include <asm/delay.h>
40 #include <asm/intrinsics.h>
42 #include <asm/hw_irq.h>
43 #include <asm/machvec.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
48 # include <asm/perfmon.h>
53 /* These can be overridden in platform_irq_init */
54 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
55 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
57 /* default base addr of IPI table */
58 void __iomem *ipi_base_addr = ((void __iomem *)
59 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62 * Legacy IRQ to IA-64 vector translation table.
64 __u8 isa_irq_to_vector_map[16] = {
65 /* 8259 IRQ translation, first 16 entries */
66 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
67 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
69 EXPORT_SYMBOL(isa_irq_to_vector_map);
71 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
74 assign_irq_vector (int irq)
79 if (is_running_on_xen()) {
80 extern int xen_assign_irq_vector(int);
81 return xen_assign_irq_vector(irq);
85 pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
86 vector = IA64_FIRST_DEVICE_VECTOR + pos;
87 if (vector > IA64_LAST_DEVICE_VECTOR)
89 if (test_and_set_bit(pos, ia64_vector_mask))
95 free_irq_vector (int vector)
99 if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
102 pos = vector - IA64_FIRST_DEVICE_VECTOR;
103 if (!test_and_clear_bit(pos, ia64_vector_mask))
104 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
108 reserve_irq_vector (int vector)
112 if (vector < IA64_FIRST_DEVICE_VECTOR ||
113 vector > IA64_LAST_DEVICE_VECTOR)
116 pos = vector - IA64_FIRST_DEVICE_VECTOR;
117 return test_and_set_bit(pos, ia64_vector_mask);
121 * Dynamic irq allocate and deallocation for MSI
125 int vector = assign_irq_vector(AUTO_ASSIGN);
128 dynamic_irq_init(vector);
133 void destroy_irq(unsigned int irq)
135 dynamic_irq_cleanup(irq);
136 free_irq_vector(irq);
140 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
142 # define IS_RESCHEDULE(vec) (0)
145 * That's where the IVT branches when we get an external
146 * interrupt. This branches to the correct hardware IRQ handler via
150 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
152 struct pt_regs *old_regs = set_irq_regs(regs);
153 unsigned long saved_tpr;
157 unsigned long bsp, sp;
160 * Note: if the interrupt happened while executing in
161 * the context switch routine (ia64_switch_to), we may
162 * get a spurious stack overflow here. This is
163 * because the register and the memory stack are not
164 * switched atomically.
166 bsp = ia64_getreg(_IA64_REG_AR_BSP);
167 sp = ia64_getreg(_IA64_REG_SP);
169 if ((sp - bsp) < 1024) {
170 static unsigned char count;
171 static long last_time;
173 if (jiffies - last_time > 5*HZ)
177 printk("ia64_handle_irq: DANGER: less than "
178 "1KB of free stack space!!\n"
179 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
183 #endif /* IRQ_DEBUG */
186 * Always set TPR to limit maximum interrupt nesting depth to
187 * 16 (without this, it would be ~240, which could easily lead
188 * to kernel stack overflows).
191 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
193 while (vector != IA64_SPURIOUS_INT_VECTOR) {
194 if (unlikely(IS_RESCHEDULE(vector)))
195 kstat_this_cpu.irqs[vector]++;
197 ia64_setreg(_IA64_REG_CR_TPR, vector);
200 generic_handle_irq(local_vector_to_irq(vector));
203 * Disable interrupts and send EOI:
206 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
209 vector = ia64_get_ivr();
212 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
213 * handler needs to be able to wait for further keyboard interrupts, which can't
214 * come through until ia64_eoi() has been done.
217 set_irq_regs(old_regs);
220 #ifdef CONFIG_HOTPLUG_CPU
222 * This function emulates a interrupt processing when a cpu is about to be
225 void ia64_process_pending_intr(void)
228 unsigned long saved_tpr;
229 extern unsigned int vectors_in_migration[NR_IRQS];
231 vector = ia64_get_ivr();
234 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
238 * Perform normal interrupt style processing
240 while (vector != IA64_SPURIOUS_INT_VECTOR) {
241 if (unlikely(IS_RESCHEDULE(vector)))
242 kstat_this_cpu.irqs[vector]++;
244 struct pt_regs *old_regs = set_irq_regs(NULL);
246 ia64_setreg(_IA64_REG_CR_TPR, vector);
250 * Now try calling normal ia64_handle_irq as it would have got called
251 * from a real intr handler. Try passing null for pt_regs, hopefully
252 * it will work. I hope it works!.
253 * Probably could shared code.
255 vectors_in_migration[local_vector_to_irq(vector)]=0;
256 generic_handle_irq(local_vector_to_irq(vector));
257 set_irq_regs(old_regs);
260 * Disable interrupts and send EOI
263 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
266 vector = ia64_get_ivr();
274 extern irqreturn_t handle_IPI (int irq, void *dev_id);
276 static irqreturn_t dummy_handler (int irq, void *dev_id)
281 static struct irqaction ipi_irqaction = {
282 .handler = handle_IPI,
283 .flags = IRQF_DISABLED,
287 static struct irqaction resched_irqaction = {
288 .handler = dummy_handler,
289 .flags = SA_INTERRUPT,
295 #include <xen/evtchn.h>
296 #include <xen/interface/callback.h>
298 static DEFINE_PER_CPU(int, timer_irq) = -1;
299 static DEFINE_PER_CPU(int, ipi_irq) = -1;
300 static DEFINE_PER_CPU(int, resched_irq) = -1;
301 static char timer_name[NR_CPUS][15];
302 static char ipi_name[NR_CPUS][15];
303 static char resched_name[NR_CPUS][15];
307 struct irqaction *action;
309 /* 16 should be far optimistic value, since only several percpu irqs
310 * are registered early.
312 #define MAX_LATE_IRQ 16
313 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
314 static unsigned short late_irq_cnt = 0;
315 static unsigned short saved_irq_cnt = 0;
316 static int xen_slab_ready = 0;
319 /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
320 * it ends up to issue several memory accesses upon percpu data and
321 * thus adds unnecessary traffic to other paths.
324 handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
330 static struct irqaction resched_irqaction = {
331 .handler = handle_reschedule,
332 .flags = SA_INTERRUPT,
338 * This is xen version percpu irq registration, which needs bind
339 * to xen specific evtchn sub-system. One trick here is that xen
340 * evtchn binding interface depends on kmalloc because related
341 * port needs to be freed at device/cpu down. So we cache the
342 * registration on BSP before slab is ready and then deal them
343 * at later point. For rest instances happening after slab ready,
344 * we hook them to xen evtchn immediately.
346 * FIXME: MCA is not supported by far, and thus "nomca" boot param is
350 xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
352 unsigned int cpu = smp_processor_id();
355 if (xen_slab_ready) {
357 case IA64_TIMER_VECTOR:
358 sprintf(timer_name[cpu], "%s%d", action->name, cpu);
359 ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
360 action->handler, action->flags,
361 timer_name[cpu], action->dev_id);
362 per_cpu(timer_irq,cpu) = ret;
363 printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
365 case IA64_IPI_RESCHEDULE:
366 sprintf(resched_name[cpu], "%s%d", action->name, cpu);
367 ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
368 action->handler, action->flags,
369 resched_name[cpu], action->dev_id);
370 per_cpu(resched_irq,cpu) = ret;
371 printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
373 case IA64_IPI_VECTOR:
374 sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
375 ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
376 action->handler, action->flags,
377 ipi_name[cpu], action->dev_id);
378 per_cpu(ipi_irq,cpu) = ret;
379 printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
381 case IA64_SPURIOUS_INT_VECTOR:
384 printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
390 /* For BSP, we cache registered percpu irqs, and then re-walk
391 * them when initializing APs
394 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
395 saved_percpu_irqs[saved_irq_cnt].irq = irq;
396 saved_percpu_irqs[saved_irq_cnt].action = action;
404 xen_bind_early_percpu_irq (void)
409 /* There's no race when accessing this cached array, since only
410 * BSP will face with such step shortly
412 for (i = 0; i < late_irq_cnt; i++)
413 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
414 saved_percpu_irqs[i].action, 0);
417 /* FIXME: There's no obvious point to check whether slab is ready. So
418 * a hack is used here by utilizing a late time hook.
420 extern void (*late_time_init)(void);
421 extern char xen_event_callback;
422 extern void xen_init_IRQ(void);
424 #ifdef CONFIG_HOTPLUG_CPU
426 unbind_evtchn_callback(struct notifier_block *nfb,
427 unsigned long action, void *hcpu)
429 unsigned int cpu = (unsigned long)hcpu;
431 if (action == CPU_DEAD) {
432 /* Unregister evtchn. */
433 if (per_cpu(ipi_irq,cpu) >= 0) {
434 unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
435 per_cpu(ipi_irq, cpu) = -1;
437 if (per_cpu(resched_irq,cpu) >= 0) {
438 unbind_from_irqhandler (per_cpu(resched_irq, cpu),
440 per_cpu(resched_irq, cpu) = -1;
442 if (per_cpu(timer_irq,cpu) >= 0) {
443 unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
444 per_cpu(timer_irq, cpu) = -1;
450 static struct notifier_block unbind_evtchn_notifier = {
451 .notifier_call = unbind_evtchn_callback,
456 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
457 void xen_smp_intr_init(void)
460 unsigned int cpu = smp_processor_id();
462 struct callback_register event = {
463 .type = CALLBACKTYPE_event,
464 .address = (unsigned long)&xen_event_callback,
468 /* Initialization was already done for boot cpu. */
469 #ifdef CONFIG_HOTPLUG_CPU
470 /* Register the notifier only once. */
471 register_cpu_notifier(&unbind_evtchn_notifier);
476 /* This should be piggyback when setup vcpu guest context */
477 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
479 for (i = 0; i < saved_irq_cnt; i++)
480 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
481 saved_percpu_irqs[i].action, 0);
482 #endif /* CONFIG_SMP */
484 #endif /* CONFIG_XEN */
487 register_percpu_irq (ia64_vector vec, struct irqaction *action)
493 if (is_running_on_xen())
494 return xen_register_percpu_irq(vec, action, 1);
497 for (irq = 0; irq < NR_IRQS; ++irq)
498 if (irq_to_vector(irq) == vec) {
500 if (is_running_on_xen())
501 return xen_register_percpu_irq(vec, action, 1);
503 desc = irq_desc + irq;
504 desc->status |= IRQ_PER_CPU;
505 desc->chip = &irq_type_ia64_lsapic;
507 setup_irq(irq, action);
515 /* Maybe put into platform_irq_init later */
516 if (is_running_on_xen()) {
517 struct callback_register event = {
518 .type = CALLBACKTYPE_event,
519 .address = (unsigned long)&xen_event_callback,
522 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
523 late_time_init = xen_bind_early_percpu_irq;
525 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
526 #endif /* CONFIG_SMP */
528 #endif /* CONFIG_XEN */
529 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
531 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
532 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
534 #ifdef CONFIG_PERFMON
541 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
543 void __iomem *ipi_addr;
544 unsigned long ipi_data;
545 unsigned long phys_cpu_id;
548 if (is_running_on_xen()) {
552 /* TODO: we need to call vcpu_up here */
553 if (unlikely(vector == ap_wakeup_vector)) {
554 extern void xen_send_ipi (int cpu, int vec);
555 xen_send_ipi (cpu, vector);
556 //vcpu_prepare_and_up(cpu);
562 case IA64_IPI_VECTOR:
563 irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
565 case IA64_IPI_RESCHEDULE:
566 irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
569 printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
575 notify_remote_via_irq(irq);
578 #endif /* CONFIG_XEN */
581 phys_cpu_id = cpu_physical_id(cpu);
583 phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
587 * cpu number is in 8bit ID and 8bit EID
590 ipi_data = (delivery_mode << 8) | (vector & 0xff);
591 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
593 writeq(ipi_data, ipi_addr);