1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2004, K A Fraser
8 * This file may be distributed separately from the Linux kernel, or
9 * incorporated into other software packages, subject to the following license:
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this source file (the "Software"), to deal in the Software without
13 * restriction, including without limitation the rights to use, copy, modify,
14 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15 * and to permit persons to whom the Software is furnished to do so, subject to
16 * the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/irq.h>
33 #include <linux/interrupt.h>
34 #include <linux/sched.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/version.h>
37 #include <asm/atomic.h>
38 #include <asm/system.h>
39 #include <asm/ptrace.h>
40 #include <asm/synch_bitops.h>
41 #include <asm-xen/xen-public/event_channel.h>
42 #include <asm-xen/xen-public/physdev.h>
43 #include <asm-xen/ctrl_if.h>
44 #include <asm-xen/hypervisor.h>
45 #include <asm-xen/evtchn.h>
47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
48 EXPORT_SYMBOL(force_evtchn_callback);
49 EXPORT_SYMBOL(evtchn_do_upcall);
53 * This lock protects updates to the following mapping and reference-count
54 * arrays. The lock does not need to be acquired to read the mapping tables.
56 static spinlock_t irq_mapping_update_lock;
58 /* IRQ <-> event-channel mappings. */
59 static int evtchn_to_irq[NR_EVENT_CHANNELS];
60 static int irq_to_evtchn[NR_IRQS];
62 /* IRQ <-> VIRQ mapping. */
63 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
65 /* evtchn <-> IPI mapping. */
69 DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
71 /* Reference counts for bindings to IRQs. */
72 static int irq_bindcount[NR_IRQS];
74 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
75 static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
77 /* Upcall to generic IRQ layer. */
79 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
80 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
82 extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
84 #if defined (__i386__)
85 #define IRQ_REG orig_eax
86 #elif defined (__x86_64__)
87 #define IRQ_REG orig_rax
89 #define do_IRQ(irq, regs) do { \
90 (regs)->IRQ_REG = (irq); \
95 #define VALID_EVTCHN(_chn) ((_chn) >= 0)
98 * Force a proper event-channel callback from Xen after clearing the
99 * callback mask. We do this in a very simple manner, by making a call
100 * down into Xen. The pending flag will be checked by Xen on return.
102 void force_evtchn_callback(void)
104 (void)HYPERVISOR_xen_version(0);
107 /* NB. Interrupts are disabled on entry. */
108 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
111 unsigned int l1i, l2i, port;
113 shared_info_t *s = HYPERVISOR_shared_info;
114 vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
116 vcpu_info->evtchn_upcall_pending = 0;
118 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
119 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
125 while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
130 port = (l1i << 5) + l2i;
131 if ( (irq = evtchn_to_irq[port]) != -1 )
134 evtchn_device_upcall(port);
139 static int find_unbound_irq(void)
143 for ( irq = 0; irq < NR_IRQS; irq++ )
144 if ( irq_bindcount[irq] == 0 )
147 if ( irq == NR_IRQS )
148 panic("No available IRQ to bind to: increase NR_IRQS!\n");
153 int bind_virq_to_irq(int virq)
157 int cpu = smp_processor_id();
159 spin_lock(&irq_mapping_update_lock);
161 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
163 op.cmd = EVTCHNOP_bind_virq;
164 op.u.bind_virq.virq = virq;
165 if ( HYPERVISOR_event_channel_op(&op) != 0 )
166 panic("Failed to bind virtual IRQ %d\n", virq);
167 evtchn = op.u.bind_virq.port;
169 irq = find_unbound_irq();
170 evtchn_to_irq[evtchn] = irq;
171 irq_to_evtchn[irq] = evtchn;
173 per_cpu(virq_to_irq, cpu)[virq] = irq;
176 irq_bindcount[irq]++;
178 spin_unlock(&irq_mapping_update_lock);
183 void unbind_virq_from_irq(int virq)
186 int cpu = smp_processor_id();
187 int irq = per_cpu(virq_to_irq, cpu)[virq];
188 int evtchn = irq_to_evtchn[irq];
190 spin_lock(&irq_mapping_update_lock);
192 if ( --irq_bindcount[irq] == 0 )
194 op.cmd = EVTCHNOP_close;
195 op.u.close.dom = DOMID_SELF;
196 op.u.close.port = evtchn;
197 if ( HYPERVISOR_event_channel_op(&op) != 0 )
198 panic("Failed to unbind virtual IRQ %d\n", virq);
200 evtchn_to_irq[evtchn] = -1;
201 irq_to_evtchn[irq] = -1;
202 per_cpu(virq_to_irq, cpu)[virq] = -1;
205 spin_unlock(&irq_mapping_update_lock);
208 int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
213 spin_lock(&irq_mapping_update_lock);
215 if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
217 op.cmd = EVTCHNOP_bind_ipi;
218 op.u.bind_ipi.ipi_edom = cpu;
219 if ( HYPERVISOR_event_channel_op(&op) != 0 )
220 panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
221 evtchn = op.u.bind_ipi.port;
223 irq = find_unbound_irq();
224 evtchn_to_irq[evtchn] = irq;
225 irq_to_evtchn[irq] = evtchn;
227 per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
229 irq = evtchn_to_irq[evtchn];
231 irq_bindcount[irq]++;
233 spin_unlock(&irq_mapping_update_lock);
238 void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
241 int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
242 int irq = irq_to_evtchn[evtchn];
244 spin_lock(&irq_mapping_update_lock);
246 if ( --irq_bindcount[irq] == 0 )
248 op.cmd = EVTCHNOP_close;
249 op.u.close.dom = DOMID_SELF;
250 op.u.close.port = evtchn;
251 if ( HYPERVISOR_event_channel_op(&op) != 0 )
252 panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
254 evtchn_to_irq[evtchn] = -1;
255 irq_to_evtchn[irq] = -1;
256 per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
259 spin_unlock(&irq_mapping_update_lock);
262 int bind_evtchn_to_irq(int evtchn)
266 spin_lock(&irq_mapping_update_lock);
268 if ( (irq = evtchn_to_irq[evtchn]) == -1 )
270 irq = find_unbound_irq();
271 evtchn_to_irq[evtchn] = irq;
272 irq_to_evtchn[irq] = evtchn;
275 irq_bindcount[irq]++;
277 spin_unlock(&irq_mapping_update_lock);
282 void unbind_evtchn_from_irq(int evtchn)
284 int irq = evtchn_to_irq[evtchn];
286 spin_lock(&irq_mapping_update_lock);
288 if ( --irq_bindcount[irq] == 0 )
290 evtchn_to_irq[evtchn] = -1;
291 irq_to_evtchn[irq] = -1;
294 spin_unlock(&irq_mapping_update_lock);
299 * Interface to generic handling in irq.c
302 static unsigned int startup_dynirq(unsigned int irq)
304 int evtchn = irq_to_evtchn[irq];
306 if ( !VALID_EVTCHN(evtchn) )
308 unmask_evtchn(evtchn);
312 static void shutdown_dynirq(unsigned int irq)
314 int evtchn = irq_to_evtchn[irq];
316 if ( !VALID_EVTCHN(evtchn) )
321 static void enable_dynirq(unsigned int irq)
323 int evtchn = irq_to_evtchn[irq];
325 unmask_evtchn(evtchn);
328 static void disable_dynirq(unsigned int irq)
330 int evtchn = irq_to_evtchn[irq];
335 static void ack_dynirq(unsigned int irq)
337 int evtchn = irq_to_evtchn[irq];
340 clear_evtchn(evtchn);
343 static void end_dynirq(unsigned int irq)
345 int evtchn = irq_to_evtchn[irq];
347 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
348 unmask_evtchn(evtchn);
351 static struct hw_interrupt_type dynirq_type = {
362 static inline void pirq_unmask_notify(int pirq)
365 if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
367 op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
368 (void)HYPERVISOR_physdev_op(&op);
372 static inline void pirq_query_unmask(int pirq)
375 op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
376 op.u.irq_status_query.irq = pirq;
377 (void)HYPERVISOR_physdev_op(&op);
378 clear_bit(pirq, &pirq_needs_unmask_notify[0]);
379 if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
380 set_bit(pirq, &pirq_needs_unmask_notify[0]);
384 * On startup, if there is no action associated with the IRQ then we are
385 * probing. In this case we should not share with others as it will confuse us.
387 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
389 static unsigned int startup_pirq(unsigned int irq)
394 op.cmd = EVTCHNOP_bind_pirq;
395 op.u.bind_pirq.pirq = irq;
396 /* NB. We are happy to share unless we are probing. */
397 op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
398 if ( HYPERVISOR_event_channel_op(&op) != 0 )
400 if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
401 printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
404 evtchn = op.u.bind_pirq.port;
406 pirq_query_unmask(irq_to_pirq(irq));
408 evtchn_to_irq[evtchn] = irq;
409 irq_to_evtchn[irq] = evtchn;
411 unmask_evtchn(evtchn);
412 pirq_unmask_notify(irq_to_pirq(irq));
417 static void shutdown_pirq(unsigned int irq)
420 int evtchn = irq_to_evtchn[irq];
422 if ( !VALID_EVTCHN(evtchn) )
427 op.cmd = EVTCHNOP_close;
428 op.u.close.dom = DOMID_SELF;
429 op.u.close.port = evtchn;
430 if ( HYPERVISOR_event_channel_op(&op) != 0 )
431 panic("Failed to unbind physical IRQ %d\n", irq);
433 evtchn_to_irq[evtchn] = -1;
434 irq_to_evtchn[irq] = -1;
437 static void enable_pirq(unsigned int irq)
439 int evtchn = irq_to_evtchn[irq];
440 if ( !VALID_EVTCHN(evtchn) )
442 unmask_evtchn(evtchn);
443 pirq_unmask_notify(irq_to_pirq(irq));
446 static void disable_pirq(unsigned int irq)
448 int evtchn = irq_to_evtchn[irq];
449 if ( !VALID_EVTCHN(evtchn) )
454 static void ack_pirq(unsigned int irq)
456 int evtchn = irq_to_evtchn[irq];
457 if ( !VALID_EVTCHN(evtchn) )
460 clear_evtchn(evtchn);
463 static void end_pirq(unsigned int irq)
465 int evtchn = irq_to_evtchn[irq];
466 if ( !VALID_EVTCHN(evtchn) )
468 if ( !(irq_desc[irq].status & IRQ_DISABLED) )
470 unmask_evtchn(evtchn);
471 pirq_unmask_notify(irq_to_pirq(irq));
475 static struct hw_interrupt_type pirq_type = {
486 void irq_suspend(void)
488 int pirq, virq, irq, evtchn;
489 int cpu = smp_processor_id(); /* XXX */
491 /* Unbind VIRQs from event channels. */
492 for ( virq = 0; virq < NR_VIRQS; virq++ )
494 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
496 evtchn = irq_to_evtchn[irq];
498 /* Mark the event channel as unused in our table. */
499 evtchn_to_irq[evtchn] = -1;
500 irq_to_evtchn[irq] = -1;
503 /* Check that no PIRQs are still bound. */
504 for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
505 if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
506 panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
510 void irq_resume(void)
513 int virq, irq, evtchn;
514 int cpu = smp_processor_id(); /* XXX */
516 for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
517 mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
519 for ( virq = 0; virq < NR_VIRQS; virq++ )
521 if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
524 /* Get a new binding from Xen. */
525 op.cmd = EVTCHNOP_bind_virq;
526 op.u.bind_virq.virq = virq;
527 if ( HYPERVISOR_event_channel_op(&op) != 0 )
528 panic("Failed to bind virtual IRQ %d\n", virq);
529 evtchn = op.u.bind_virq.port;
531 /* Record the new mapping. */
532 evtchn_to_irq[evtchn] = irq;
533 irq_to_evtchn[irq] = evtchn;
536 unmask_evtchn(evtchn);
540 void __init init_IRQ(void)
547 spin_lock_init(&irq_mapping_update_lock);
549 for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
550 /* No VIRQ -> IRQ mappings. */
551 for ( i = 0; i < NR_VIRQS; i++ )
552 per_cpu(virq_to_irq, cpu)[i] = -1;
555 /* No event-channel -> IRQ mappings. */
556 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
558 evtchn_to_irq[i] = -1;
559 mask_evtchn(i); /* No event channels are 'live' right now. */
562 /* No IRQ -> event-channel mappings. */
563 for ( i = 0; i < NR_IRQS; i++ )
564 irq_to_evtchn[i] = -1;
566 for ( i = 0; i < NR_DYNIRQS; i++ )
568 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
569 irq_bindcount[dynirq_to_irq(i)] = 0;
571 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
572 irq_desc[dynirq_to_irq(i)].action = 0;
573 irq_desc[dynirq_to_irq(i)].depth = 1;
574 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
577 for ( i = 0; i < NR_PIRQS; i++ )
579 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
580 irq_bindcount[pirq_to_irq(i)] = 1;
582 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
583 irq_desc[pirq_to_irq(i)].action = 0;
584 irq_desc[pirq_to_irq(i)].depth = 1;
585 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
588 /* This needs to be done early, but after the IRQ subsystem is alive. */