+
+static struct irqaction resched_irqaction = {
+ .handler = dummy_handler,
+ .flags = SA_INTERRUPT,
+ .name = "resched"
+};
+#endif
+
+#ifdef CONFIG_XEN
+#include <xen/evtchn.h>
+#include <xen/interface/callback.h>
+
+static DEFINE_PER_CPU(int, timer_irq) = -1;
+static DEFINE_PER_CPU(int, ipi_irq) = -1;
+static DEFINE_PER_CPU(int, resched_irq) = -1;
+static char timer_name[NR_CPUS][15];
+static char ipi_name[NR_CPUS][15];
+static char resched_name[NR_CPUS][15];
+
+struct saved_irq {
+ unsigned int irq;
+ struct irqaction *action;
+};
+/* 16 should be far optimistic value, since only several percpu irqs
+ * are registered early.
+ */
+#define MAX_LATE_IRQ 16
+static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
+static unsigned short late_irq_cnt = 0;
+static unsigned short saved_irq_cnt = 0;
+static int xen_slab_ready = 0;
+
+#ifdef CONFIG_SMP
+/* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
+ * it ends up to issue several memory accesses upon percpu data and
+ * thus adds unnecessary traffic to other paths.
+ */
+static irqreturn_t
+handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
+{
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction resched_irqaction = {
+ .handler = handle_reschedule,
+ .flags = SA_INTERRUPT,
+ .name = "RESCHED"
+};
+#endif
+
+/*
+ * This is xen version percpu irq registration, which needs bind
+ * to xen specific evtchn sub-system. One trick here is that xen
+ * evtchn binding interface depends on kmalloc because related
+ * port needs to be freed at device/cpu down. So we cache the
+ * registration on BSP before slab is ready and then deal them
+ * at later point. For rest instances happening after slab ready,
+ * we hook them to xen evtchn immediately.
+ *
+ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
+ * required.
+ */
+static void
+xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret = 0;
+
+ if (xen_slab_ready) {
+ switch (irq) {
+ case IA64_TIMER_VECTOR:
+ sprintf(timer_name[cpu], "%s%d", action->name, cpu);
+ ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
+ action->handler, action->flags,
+ timer_name[cpu], action->dev_id);
+ per_cpu(timer_irq,cpu) = ret;
+ printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
+ break;
+ case IA64_IPI_RESCHEDULE:
+ sprintf(resched_name[cpu], "%s%d", action->name, cpu);
+ ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
+ action->handler, action->flags,
+ resched_name[cpu], action->dev_id);
+ per_cpu(resched_irq,cpu) = ret;
+ printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
+ break;
+ case IA64_IPI_VECTOR:
+ sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
+ ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
+ action->handler, action->flags,
+ ipi_name[cpu], action->dev_id);
+ per_cpu(ipi_irq,cpu) = ret;
+ printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
+ break;
+ case IA64_SPURIOUS_INT_VECTOR:
+ break;
+ default:
+ printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
+ break;
+ }
+ BUG_ON(ret < 0);
+ }
+
+ /* For BSP, we cache registered percpu irqs, and then re-walk
+ * them when initializing APs
+ */
+ if (!cpu && save) {
+ BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
+ saved_percpu_irqs[saved_irq_cnt].irq = irq;
+ saved_percpu_irqs[saved_irq_cnt].action = action;
+ saved_irq_cnt++;
+ if (!xen_slab_ready)
+ late_irq_cnt++;
+ }
+}
+
+static void
+xen_bind_early_percpu_irq (void)
+{
+ int i;
+
+ xen_slab_ready = 1;
+ /* There's no race when accessing this cached array, since only
+ * BSP will face with such step shortly
+ */
+ for (i = 0; i < late_irq_cnt; i++)
+ xen_register_percpu_irq(saved_percpu_irqs[i].irq,
+ saved_percpu_irqs[i].action, 0);
+}
+
+/* FIXME: There's no obvious point to check whether slab is ready. So
+ * a hack is used here by utilizing a late time hook.
+ */
+extern void (*late_time_init)(void);
+extern char xen_event_callback;
+extern void xen_init_IRQ(void);
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __devinit
+unbind_evtchn_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if (action == CPU_DEAD) {
+ /* Unregister evtchn. */
+ if (per_cpu(ipi_irq,cpu) >= 0) {
+ unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
+ per_cpu(ipi_irq, cpu) = -1;
+ }
+ if (per_cpu(resched_irq,cpu) >= 0) {
+ unbind_from_irqhandler (per_cpu(resched_irq, cpu),
+ NULL);
+ per_cpu(resched_irq, cpu) = -1;
+ }
+ if (per_cpu(timer_irq,cpu) >= 0) {
+ unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
+ per_cpu(timer_irq, cpu) = -1;
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block unbind_evtchn_notifier = {
+ .notifier_call = unbind_evtchn_callback,
+ .priority = 0
+};