1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/module.h>
34 #include <linux/irq.h>
35 #include <linux/interrupt.h>
36 #include <linux/sched.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/version.h>
39 #include <asm/atomic.h>
40 #include <asm/system.h>
41 #include <asm/ptrace.h>
42 #include <asm/synch_bitops.h>
43 #include <xen/evtchn.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <linux/mc146818rtc.h> /* RTC_IRQ */
50 * This lock protects updates to the following mapping and reference-count
51 * arrays. The lock does not need to be acquired to read the mapping tables.
53 static DEFINE_SPINLOCK(irq_mapping_update_lock);
55 /* IRQ <-> event-channel mappings. */
56 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
57 [0 ... NR_EVENT_CHANNELS-1] = -1 };
59 /* Packed IRQ information: binding type, sub-type index, and event channel. */
60 static u32 irq_info[NR_IRQS];
63 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
65 /* Constructor for packed IRQ information. */
66 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
68 return ((type << 24) | (index << 16) | evtchn);
71 /* Convenient shorthand for packed representation of an unbound IRQ. */
72 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
75 * Accessors for packed IRQ information.
78 static inline unsigned int evtchn_from_irq(int irq)
80 return (u16)(irq_info[irq]);
83 static inline unsigned int index_from_irq(int irq)
85 return (u8)(irq_info[irq] >> 16);
88 static inline unsigned int type_from_irq(int irq)
90 return (u8)(irq_info[irq] >> 24);
93 /* IRQ <-> VIRQ mapping. */
94 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
96 /* IRQ <-> IPI mapping. */
100 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
102 /* Reference counts for bindings to IRQs. */
103 static int irq_bindcount[NR_IRQS];
105 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
106 static unsigned long pirq_needs_eoi[NR_PIRQS/sizeof(unsigned long)];
110 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
111 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
113 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
116 return (sh->evtchn_pending[idx] &
117 cpu_evtchn_mask[cpu][idx] &
118 ~sh->evtchn_mask[idx]);
121 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
123 int irq = evtchn_to_irq[chn];
126 set_native_irq_info(irq, cpumask_of_cpu(cpu));
128 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
129 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
130 cpu_evtchn[chn] = cpu;
133 static void init_evtchn_cpu_bindings(void)
137 /* By default all event channels notify CPU#0. */
138 for (i = 0; i < NR_IRQS; i++)
139 set_native_irq_info(i, cpumask_of_cpu(0));
141 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
142 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
145 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
147 return cpu_evtchn[evtchn];
152 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
155 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
158 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
162 static void init_evtchn_cpu_bindings(void)
166 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
173 /* Upcall to generic IRQ layer. */
175 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
176 void __init xen_init_IRQ(void);
177 void __init init_IRQ(void)
182 #if defined (__i386__)
183 static inline void exit_idle(void) {}
184 #define IRQ_REG orig_eax
185 #elif defined (__x86_64__)
186 #include <asm/idle.h>
187 #define IRQ_REG orig_rax
189 #define do_IRQ(irq, regs) do { \
190 (regs)->IRQ_REG = ~(irq); \
195 /* Xen will never allocate port zero for any purpose. */
196 #define VALID_EVTCHN(chn) ((chn) != 0)
199 * Force a proper event-channel callback from Xen after clearing the
200 * callback mask. We do this in a very simple manner, by making a call
201 * down into Xen. The pending flag will be checked by Xen on return.
203 void force_evtchn_callback(void)
205 (void)HYPERVISOR_xen_version(0, NULL);
207 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
208 EXPORT_SYMBOL(force_evtchn_callback);
210 /* NB. Interrupts are disabled on entry. */
211 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
213 unsigned long l1, l2;
214 unsigned int l1i, l2i, port;
215 int irq, cpu = smp_processor_id();
216 shared_info_t *s = HYPERVISOR_shared_info;
217 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
219 vcpu_info->evtchn_upcall_pending = 0;
221 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
222 /* Clear master pending flag /before/ clearing selector flag. */
225 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
230 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
233 port = (l1i * BITS_PER_LONG) + l2i;
234 if ((irq = evtchn_to_irq[port]) != -1)
238 evtchn_device_upcall(port);
244 static int find_unbound_irq(void)
248 /* Only allocate from dynirq range */
249 for (irq = DYNIRQ_BASE; irq < NR_IRQS; irq++)
250 if (irq_bindcount[irq] == 0)
254 panic("No available IRQ to bind to: increase NR_IRQS!\n");
259 static int bind_evtchn_to_irq(unsigned int evtchn)
263 spin_lock(&irq_mapping_update_lock);
265 if ((irq = evtchn_to_irq[evtchn]) == -1) {
266 irq = find_unbound_irq();
267 evtchn_to_irq[evtchn] = irq;
268 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
271 irq_bindcount[irq]++;
273 spin_unlock(&irq_mapping_update_lock);
278 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
280 struct evtchn_bind_virq bind_virq;
283 spin_lock(&irq_mapping_update_lock);
285 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
286 bind_virq.virq = virq;
287 bind_virq.vcpu = cpu;
288 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
291 evtchn = bind_virq.port;
293 irq = find_unbound_irq();
294 evtchn_to_irq[evtchn] = irq;
295 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
297 per_cpu(virq_to_irq, cpu)[virq] = irq;
299 bind_evtchn_to_cpu(evtchn, cpu);
302 irq_bindcount[irq]++;
304 spin_unlock(&irq_mapping_update_lock);
309 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
311 struct evtchn_bind_ipi bind_ipi;
314 spin_lock(&irq_mapping_update_lock);
316 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
318 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
321 evtchn = bind_ipi.port;
323 irq = find_unbound_irq();
324 evtchn_to_irq[evtchn] = irq;
325 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
327 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
329 bind_evtchn_to_cpu(evtchn, cpu);
332 irq_bindcount[irq]++;
334 spin_unlock(&irq_mapping_update_lock);
339 static void unbind_from_irq(unsigned int irq)
341 struct evtchn_close close;
342 int evtchn = evtchn_from_irq(irq);
344 spin_lock(&irq_mapping_update_lock);
346 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
348 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
351 switch (type_from_irq(irq)) {
353 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
354 [index_from_irq(irq)] = -1;
357 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
358 [index_from_irq(irq)] = -1;
364 /* Closed ports are implicitly re-bound to VCPU0. */
365 bind_evtchn_to_cpu(evtchn, 0);
367 evtchn_to_irq[evtchn] = -1;
368 irq_info[irq] = IRQ_UNBOUND;
371 spin_unlock(&irq_mapping_update_lock);
374 int bind_evtchn_to_irqhandler(
376 irqreturn_t (*handler)(int, void *, struct pt_regs *),
377 unsigned long irqflags,
384 irq = bind_evtchn_to_irq(evtchn);
385 retval = request_irq(irq, handler, irqflags, devname, dev_id);
387 unbind_from_irq(irq);
393 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
395 int bind_virq_to_irqhandler(
398 irqreturn_t (*handler)(int, void *, struct pt_regs *),
399 unsigned long irqflags,
406 irq = bind_virq_to_irq(virq, cpu);
407 retval = request_irq(irq, handler, irqflags, devname, dev_id);
409 unbind_from_irq(irq);
415 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
417 int bind_ipi_to_irqhandler(
420 irqreturn_t (*handler)(int, void *, struct pt_regs *),
421 unsigned long irqflags,
428 irq = bind_ipi_to_irq(ipi, cpu);
429 retval = request_irq(irq, handler, irqflags, devname, dev_id);
431 unbind_from_irq(irq);
437 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
439 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
441 free_irq(irq, dev_id);
442 unbind_from_irq(irq);
444 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
446 /* Rebind an evtchn so that it gets delivered to a specific cpu */
447 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
449 struct evtchn_bind_vcpu bind_vcpu;
450 int evtchn = evtchn_from_irq(irq);
452 if (!VALID_EVTCHN(evtchn))
455 /* Send future instances of this interrupt to other vcpu. */
456 bind_vcpu.port = evtchn;
457 bind_vcpu.vcpu = tcpu;
460 * If this fails, it usually just indicates that we're dealing with a
461 * virq or IPI channel, which don't actually need to be rebound. Ignore
462 * it, but don't do the xenlinux-level rebind in that case.
464 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
465 bind_evtchn_to_cpu(evtchn, tcpu);
469 static void set_affinity_irq(unsigned irq, cpumask_t dest)
471 unsigned tcpu = first_cpu(dest);
472 rebind_irq_to_cpu(irq, tcpu);
475 static int retrigger(unsigned int irq)
477 int evtchn = evtchn_from_irq(irq);
478 shared_info_t *s = HYPERVISOR_shared_info;
479 if (!VALID_EVTCHN(evtchn))
481 synch_set_bit(evtchn, &s->evtchn_pending[0]);
486 * Interface to generic handling in irq.c
489 static unsigned int startup_dynirq(unsigned int irq)
491 int evtchn = evtchn_from_irq(irq);
493 if (VALID_EVTCHN(evtchn))
494 unmask_evtchn(evtchn);
498 static void shutdown_dynirq(unsigned int irq)
500 int evtchn = evtchn_from_irq(irq);
502 if (VALID_EVTCHN(evtchn))
506 static void enable_dynirq(unsigned int irq)
508 int evtchn = evtchn_from_irq(irq);
510 if (VALID_EVTCHN(evtchn))
511 unmask_evtchn(evtchn);
514 static void disable_dynirq(unsigned int irq)
516 int evtchn = evtchn_from_irq(irq);
518 if (VALID_EVTCHN(evtchn))
522 static void ack_dynirq(unsigned int irq)
524 int evtchn = evtchn_from_irq(irq);
526 move_native_irq(irq);
528 if (VALID_EVTCHN(evtchn)) {
530 clear_evtchn(evtchn);
534 static void end_dynirq(unsigned int irq)
536 int evtchn = evtchn_from_irq(irq);
538 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
539 unmask_evtchn(evtchn);
542 static struct hw_interrupt_type dynirq_type = {
543 .typename = "Dynamic-irq",
544 .startup = startup_dynirq,
545 .shutdown = shutdown_dynirq,
546 .enable = enable_dynirq,
547 .disable = disable_dynirq,
551 .set_affinity = set_affinity_irq,
553 .retrigger = retrigger,
556 static inline void pirq_unmask_notify(int pirq)
558 struct physdev_eoi eoi = { .irq = pirq };
559 if (unlikely(test_bit(pirq, &pirq_needs_eoi[0])))
560 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
563 static inline void pirq_query_unmask(int pirq)
565 struct physdev_irq_status_query irq_status;
566 irq_status.irq = pirq;
567 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
568 clear_bit(pirq, &pirq_needs_eoi[0]);
569 if (irq_status.flags & XENIRQSTAT_needs_eoi)
570 set_bit(pirq, &pirq_needs_eoi[0]);
574 * On startup, if there is no action associated with the IRQ then we are
575 * probing. In this case we should not share with others as it will confuse us.
577 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
579 static unsigned int startup_pirq(unsigned int irq)
581 struct evtchn_bind_pirq bind_pirq;
582 int evtchn = evtchn_from_irq(irq);
584 if (VALID_EVTCHN(evtchn))
587 bind_pirq.pirq = irq;
588 /* NB. We are happy to share unless we are probing. */
589 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
590 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
591 if (!probing_irq(irq))
592 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
596 evtchn = bind_pirq.port;
598 pirq_query_unmask(irq_to_pirq(irq));
600 evtchn_to_irq[evtchn] = irq;
601 bind_evtchn_to_cpu(evtchn, 0);
602 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
605 unmask_evtchn(evtchn);
606 pirq_unmask_notify(irq_to_pirq(irq));
611 static void shutdown_pirq(unsigned int irq)
613 struct evtchn_close close;
614 int evtchn = evtchn_from_irq(irq);
616 if (!VALID_EVTCHN(evtchn))
622 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
625 bind_evtchn_to_cpu(evtchn, 0);
626 evtchn_to_irq[evtchn] = -1;
627 irq_info[irq] = IRQ_UNBOUND;
630 static void enable_pirq(unsigned int irq)
632 int evtchn = evtchn_from_irq(irq);
634 if (VALID_EVTCHN(evtchn)) {
635 unmask_evtchn(evtchn);
636 pirq_unmask_notify(irq_to_pirq(irq));
640 static void disable_pirq(unsigned int irq)
642 int evtchn = evtchn_from_irq(irq);
644 if (VALID_EVTCHN(evtchn))
648 static void ack_pirq(unsigned int irq)
650 int evtchn = evtchn_from_irq(irq);
652 move_native_irq(irq);
654 if (VALID_EVTCHN(evtchn)) {
656 clear_evtchn(evtchn);
660 static void end_pirq(unsigned int irq)
662 int evtchn = evtchn_from_irq(irq);
664 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
665 unmask_evtchn(evtchn);
666 pirq_unmask_notify(irq_to_pirq(irq));
670 static struct hw_interrupt_type pirq_type = {
671 .typename = "Phys-irq",
672 .startup = startup_pirq,
673 .shutdown = shutdown_pirq,
674 .enable = enable_pirq,
675 .disable = disable_pirq,
679 .set_affinity = set_affinity_irq,
681 .retrigger = retrigger,
684 int irq_ignore_unhandled(unsigned int irq)
686 struct physdev_irq_status_query irq_status = { .irq = irq };
688 if (!is_running_on_xen())
691 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
692 return !!(irq_status.flags & XENIRQSTAT_shared);
695 void resend_irq_on_evtchn(unsigned int i)
697 int evtchn = evtchn_from_irq(i);
698 shared_info_t *s = HYPERVISOR_shared_info;
699 if (!VALID_EVTCHN(evtchn))
701 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
702 synch_set_bit(evtchn, &s->evtchn_pending[0]);
705 void notify_remote_via_irq(int irq)
707 int evtchn = evtchn_from_irq(irq);
709 if (VALID_EVTCHN(evtchn))
710 notify_remote_via_evtchn(evtchn);
712 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
714 void mask_evtchn(int port)
716 shared_info_t *s = HYPERVISOR_shared_info;
717 synch_set_bit(port, &s->evtchn_mask[0]);
719 EXPORT_SYMBOL_GPL(mask_evtchn);
721 void unmask_evtchn(int port)
723 shared_info_t *s = HYPERVISOR_shared_info;
724 unsigned int cpu = smp_processor_id();
725 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
727 BUG_ON(!irqs_disabled());
729 /* Slow path (hypercall) if this is a non-local port. */
730 if (unlikely(cpu != cpu_from_evtchn(port))) {
731 struct evtchn_unmask unmask = { .port = port };
732 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
736 synch_clear_bit(port, &s->evtchn_mask[0]);
739 * The following is basically the equivalent of 'hw_resend_irq'. Just
740 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
743 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
744 !synch_test_and_set_bit(port / BITS_PER_LONG,
745 &vcpu_info->evtchn_pending_sel))
746 vcpu_info->evtchn_upcall_pending = 1;
748 EXPORT_SYMBOL_GPL(unmask_evtchn);
750 void irq_resume(void)
752 struct evtchn_bind_virq bind_virq;
753 struct evtchn_bind_ipi bind_ipi;
754 int cpu, pirq, virq, ipi, irq, evtchn;
756 init_evtchn_cpu_bindings();
758 /* New event-channel space is not 'live' yet. */
759 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
762 /* Check that no PIRQs are still bound. */
763 for (pirq = 0; pirq < NR_PIRQS; pirq++)
764 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
766 /* Secondary CPUs must have no VIRQ or IPI bindings. */
767 for_each_possible_cpu(cpu) {
770 for (virq = 0; virq < NR_VIRQS; virq++)
771 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
772 for (ipi = 0; ipi < NR_IPIS; ipi++)
773 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
776 /* No IRQ <-> event-channel mappings. */
777 for (irq = 0; irq < NR_IRQS; irq++)
778 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
779 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
780 evtchn_to_irq[evtchn] = -1;
782 /* Primary CPU: rebind VIRQs automatically. */
783 for (virq = 0; virq < NR_VIRQS; virq++) {
784 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
787 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
789 /* Get a new binding from Xen. */
790 bind_virq.virq = virq;
792 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
795 evtchn = bind_virq.port;
797 /* Record the new mapping. */
798 evtchn_to_irq[evtchn] = irq;
799 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
802 unmask_evtchn(evtchn);
805 /* Primary CPU: rebind IPIs automatically. */
806 for (ipi = 0; ipi < NR_IPIS; ipi++) {
807 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
810 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
812 /* Get a new binding from Xen. */
814 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
817 evtchn = bind_ipi.port;
819 /* Record the new mapping. */
820 evtchn_to_irq[evtchn] = irq;
821 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
824 unmask_evtchn(evtchn);
828 void __init xen_init_IRQ(void)
832 init_evtchn_cpu_bindings();
834 /* No event channels are 'live' right now. */
835 for (i = 0; i < NR_EVENT_CHANNELS; i++)
838 /* No IRQ -> event-channel mappings. */
839 for (i = 0; i < NR_IRQS; i++)
840 irq_info[i] = IRQ_UNBOUND;
842 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
843 for (i = 0; i < NR_DYNIRQS; i++) {
844 irq_bindcount[dynirq_to_irq(i)] = 0;
846 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
847 irq_desc[dynirq_to_irq(i)].action = NULL;
848 irq_desc[dynirq_to_irq(i)].depth = 1;
849 irq_desc[dynirq_to_irq(i)].chip = &dynirq_type;
852 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
853 for (i = 0; i < NR_PIRQS; i++) {
854 irq_bindcount[pirq_to_irq(i)] = 1;
857 /* If not domain 0, force our RTC driver to fail its probe. */
858 if ((i == RTC_IRQ) && !is_initial_xendomain())
862 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
863 irq_desc[pirq_to_irq(i)].action = NULL;
864 irq_desc[pirq_to_irq(i)].depth = 1;
865 irq_desc[pirq_to_irq(i)].chip = &pirq_type;