fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / ia64 / kernel / irq_ia64.c
1 /*
2  * linux/arch/ia64/kernel/irq_ia64.c
3  *
4  * Copyright (C) 1998-2001 Hewlett-Packard Co
5  *      Stephane Eranian <eranian@hpl.hp.com>
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  *  6/10/99: Updated to bring in sync with x86 version to facilitate
9  *           support for SMP and different interrupt controllers.
10  *
11  * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12  *                      PCI to vector allocation routine.
13  * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14  *                                              Added CPU Hotplug handling for IPF.
15  */
16
17 #include <linux/module.h>
18
19 #include <linux/jiffies.h>
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/ioport.h>
24 #include <linux/kernel_stat.h>
25 #include <linux/slab.h>
26 #include <linux/ptrace.h>
27 #include <linux/random.h>       /* for rand_initialize_irq() */
28 #include <linux/signal.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/threads.h>
32 #include <linux/bitops.h>
33 #include <linux/irq.h>
34 #ifdef CONFIG_XEN
35 #include <linux/cpu.h>
36 #endif
37
38
39 #include <asm/delay.h>
40 #include <asm/intrinsics.h>
41 #include <asm/io.h>
42 #include <asm/hw_irq.h>
43 #include <asm/machvec.h>
44 #include <asm/pgtable.h>
45 #include <asm/system.h>
46
47 #ifdef CONFIG_PERFMON
48 # include <asm/perfmon.h>
49 #endif
50
51 #define IRQ_DEBUG       0
52
53 /* These can be overridden in platform_irq_init */
54 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
55 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
56
57 /* default base addr of IPI table */
58 void __iomem *ipi_base_addr = ((void __iomem *)
59                                (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
60
61 /*
62  * Legacy IRQ to IA-64 vector translation table.
63  */
64 __u8 isa_irq_to_vector_map[16] = {
65         /* 8259 IRQ translation, first 16 entries */
66         0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
67         0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
68 };
69 EXPORT_SYMBOL(isa_irq_to_vector_map);
70
71 static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
72
73 int
74 assign_irq_vector (int irq)
75 {
76         int pos, vector;
77
78 #ifdef CONFIG_XEN
79         if (is_running_on_xen()) {
80                 extern int xen_assign_irq_vector(int);
81                 return xen_assign_irq_vector(irq);
82         }
83 #endif
84  again:
85         pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
86         vector = IA64_FIRST_DEVICE_VECTOR + pos;
87         if (vector > IA64_LAST_DEVICE_VECTOR)
88                 return -ENOSPC;
89         if (test_and_set_bit(pos, ia64_vector_mask))
90                 goto again;
91         return vector;
92 }
93
94 void
95 free_irq_vector (int vector)
96 {
97         int pos;
98
99         if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
100                 return;
101
102         pos = vector - IA64_FIRST_DEVICE_VECTOR;
103         if (!test_and_clear_bit(pos, ia64_vector_mask))
104                 printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
105 }
106
107 int
108 reserve_irq_vector (int vector)
109 {
110         int pos;
111
112         if (vector < IA64_FIRST_DEVICE_VECTOR ||
113             vector > IA64_LAST_DEVICE_VECTOR)
114                 return -EINVAL;
115
116         pos = vector - IA64_FIRST_DEVICE_VECTOR;
117         return test_and_set_bit(pos, ia64_vector_mask);
118 }
119
120 /*
121  * Dynamic irq allocate and deallocation for MSI
122  */
123 int create_irq(void)
124 {
125         int vector = assign_irq_vector(AUTO_ASSIGN);
126
127         if (vector >= 0)
128                 dynamic_irq_init(vector);
129
130         return vector;
131 }
132
133 void destroy_irq(unsigned int irq)
134 {
135         dynamic_irq_cleanup(irq);
136         free_irq_vector(irq);
137 }
138
139 #ifdef CONFIG_SMP
140 #       define IS_RESCHEDULE(vec)       (vec == IA64_IPI_RESCHEDULE)
141 #else
142 #       define IS_RESCHEDULE(vec)       (0)
143 #endif
144 /*
145  * That's where the IVT branches when we get an external
146  * interrupt. This branches to the correct hardware IRQ handler via
147  * function ptr.
148  */
149 void
150 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
151 {
152         struct pt_regs *old_regs = set_irq_regs(regs);
153         unsigned long saved_tpr;
154
155 #if IRQ_DEBUG
156         {
157                 unsigned long bsp, sp;
158
159                 /*
160                  * Note: if the interrupt happened while executing in
161                  * the context switch routine (ia64_switch_to), we may
162                  * get a spurious stack overflow here.  This is
163                  * because the register and the memory stack are not
164                  * switched atomically.
165                  */
166                 bsp = ia64_getreg(_IA64_REG_AR_BSP);
167                 sp = ia64_getreg(_IA64_REG_SP);
168
169                 if ((sp - bsp) < 1024) {
170                         static unsigned char count;
171                         static long last_time;
172
173                         if (jiffies - last_time > 5*HZ)
174                                 count = 0;
175                         if (++count < 5) {
176                                 last_time = jiffies;
177                                 printk("ia64_handle_irq: DANGER: less than "
178                                        "1KB of free stack space!!\n"
179                                        "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
180                         }
181                 }
182         }
183 #endif /* IRQ_DEBUG */
184
185         /*
186          * Always set TPR to limit maximum interrupt nesting depth to
187          * 16 (without this, it would be ~240, which could easily lead
188          * to kernel stack overflows).
189          */
190         irq_enter();
191         saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
192         ia64_srlz_d();
193         while (vector != IA64_SPURIOUS_INT_VECTOR) {
194                 if (unlikely(IS_RESCHEDULE(vector)))
195                          kstat_this_cpu.irqs[vector]++;
196                 else {
197                         ia64_setreg(_IA64_REG_CR_TPR, vector);
198                         ia64_srlz_d();
199
200                         generic_handle_irq(local_vector_to_irq(vector));
201
202                         /*
203                          * Disable interrupts and send EOI:
204                          */
205                         local_irq_disable();
206                         ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
207                 }
208                 ia64_eoi();
209                 vector = ia64_get_ivr();
210         }
211         /*
212          * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
213          * handler needs to be able to wait for further keyboard interrupts, which can't
214          * come through until ia64_eoi() has been done.
215          */
216         irq_exit();
217         set_irq_regs(old_regs);
218 }
219
220 #ifdef CONFIG_HOTPLUG_CPU
221 /*
222  * This function emulates a interrupt processing when a cpu is about to be
223  * brought down.
224  */
225 void ia64_process_pending_intr(void)
226 {
227         ia64_vector vector;
228         unsigned long saved_tpr;
229         extern unsigned int vectors_in_migration[NR_IRQS];
230
231         vector = ia64_get_ivr();
232
233          irq_enter();
234          saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
235          ia64_srlz_d();
236
237          /*
238           * Perform normal interrupt style processing
239           */
240         while (vector != IA64_SPURIOUS_INT_VECTOR) {
241                 if (unlikely(IS_RESCHEDULE(vector)))
242                          kstat_this_cpu.irqs[vector]++;
243                 else {
244                         struct pt_regs *old_regs = set_irq_regs(NULL);
245
246                         ia64_setreg(_IA64_REG_CR_TPR, vector);
247                         ia64_srlz_d();
248
249                         /*
250                          * Now try calling normal ia64_handle_irq as it would have got called
251                          * from a real intr handler. Try passing null for pt_regs, hopefully
252                          * it will work. I hope it works!.
253                          * Probably could shared code.
254                          */
255                         vectors_in_migration[local_vector_to_irq(vector)]=0;
256                         generic_handle_irq(local_vector_to_irq(vector));
257                         set_irq_regs(old_regs);
258
259                         /*
260                          * Disable interrupts and send EOI
261                          */
262                         local_irq_disable();
263                         ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
264                 }
265                 ia64_eoi();
266                 vector = ia64_get_ivr();
267         }
268         irq_exit();
269 }
270 #endif
271
272
273 #ifdef CONFIG_SMP
274 extern irqreturn_t handle_IPI (int irq, void *dev_id);
275
276 static irqreturn_t dummy_handler (int irq, void *dev_id)
277 {
278         BUG();
279 }
280
281 static struct irqaction ipi_irqaction = {
282         .handler =      handle_IPI,
283         .flags =        IRQF_DISABLED,
284         .name =         "IPI"
285 };
286
287 static struct irqaction resched_irqaction = {
288         .handler =      dummy_handler,
289         .flags =        SA_INTERRUPT,
290         .name =         "resched"
291 };
292 #endif
293
294 #ifdef CONFIG_XEN
295 #include <xen/evtchn.h>
296 #include <xen/interface/callback.h>
297
298 static DEFINE_PER_CPU(int, timer_irq) = -1;
299 static DEFINE_PER_CPU(int, ipi_irq) = -1;
300 static DEFINE_PER_CPU(int, resched_irq) = -1;
301 static char timer_name[NR_CPUS][15];
302 static char ipi_name[NR_CPUS][15];
303 static char resched_name[NR_CPUS][15];
304
305 struct saved_irq {
306         unsigned int irq;
307         struct irqaction *action;
308 };
309 /* 16 should be far optimistic value, since only several percpu irqs
310  * are registered early.
311  */
312 #define MAX_LATE_IRQ    16
313 static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
314 static unsigned short late_irq_cnt = 0;
315 static unsigned short saved_irq_cnt = 0;
316 static int xen_slab_ready = 0;
317
318 #ifdef CONFIG_SMP
319 /* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
320  * it ends up to issue several memory accesses upon percpu data and
321  * thus adds unnecessary traffic to other paths.
322  */
323 static irqreturn_t
324 handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
325 {
326
327         return IRQ_HANDLED;
328 }
329
330 static struct irqaction resched_irqaction = {
331         .handler =      handle_reschedule,
332         .flags =        SA_INTERRUPT,
333         .name =         "RESCHED"
334 };
335 #endif
336
337 /*
338  * This is xen version percpu irq registration, which needs bind
339  * to xen specific evtchn sub-system. One trick here is that xen
340  * evtchn binding interface depends on kmalloc because related
341  * port needs to be freed at device/cpu down. So we cache the
342  * registration on BSP before slab is ready and then deal them
343  * at later point. For rest instances happening after slab ready,
344  * we hook them to xen evtchn immediately.
345  *
346  * FIXME: MCA is not supported by far, and thus "nomca" boot param is
347  * required.
348  */
349 static void
350 xen_register_percpu_irq (unsigned int irq, struct irqaction *action, int save)
351 {
352         unsigned int cpu = smp_processor_id();
353         int ret = 0;
354
355         if (xen_slab_ready) {
356                 switch (irq) {
357                 case IA64_TIMER_VECTOR:
358                         sprintf(timer_name[cpu], "%s%d", action->name, cpu);
359                         ret = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
360                                 action->handler, action->flags,
361                                 timer_name[cpu], action->dev_id);
362                         per_cpu(timer_irq,cpu) = ret;
363                         printk(KERN_INFO "register VIRQ_ITC (%s) to xen irq (%d)\n", timer_name[cpu], ret);
364                         break;
365                 case IA64_IPI_RESCHEDULE:
366                         sprintf(resched_name[cpu], "%s%d", action->name, cpu);
367                         ret = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
368                                 action->handler, action->flags,
369                                 resched_name[cpu], action->dev_id);
370                         per_cpu(resched_irq,cpu) = ret;
371                         printk(KERN_INFO "register RESCHEDULE_VECTOR (%s) to xen irq (%d)\n", resched_name[cpu], ret);
372                         break;
373                 case IA64_IPI_VECTOR:
374                         sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
375                         ret = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
376                                 action->handler, action->flags,
377                                 ipi_name[cpu], action->dev_id);
378                         per_cpu(ipi_irq,cpu) = ret;
379                         printk(KERN_INFO "register IPI_VECTOR (%s) to xen irq (%d)\n", ipi_name[cpu], ret);
380                         break;
381                 case IA64_SPURIOUS_INT_VECTOR:
382                         break;
383                 default:
384                         printk(KERN_WARNING "Percpu irq %d is unsupported by xen!\n", irq);
385                         break;
386                 }
387                 BUG_ON(ret < 0);
388         } 
389
390         /* For BSP, we cache registered percpu irqs, and then re-walk
391          * them when initializing APs
392          */
393         if (!cpu && save) {
394                 BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
395                 saved_percpu_irqs[saved_irq_cnt].irq = irq;
396                 saved_percpu_irqs[saved_irq_cnt].action = action;
397                 saved_irq_cnt++;
398                 if (!xen_slab_ready)
399                         late_irq_cnt++;
400         }
401 }
402
403 static void
404 xen_bind_early_percpu_irq (void)
405 {
406         int i;
407
408         xen_slab_ready = 1;
409         /* There's no race when accessing this cached array, since only
410          * BSP will face with such step shortly
411          */
412         for (i = 0; i < late_irq_cnt; i++)
413                 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
414                                         saved_percpu_irqs[i].action, 0);
415 }
416
417 /* FIXME: There's no obvious point to check whether slab is ready. So
418  * a hack is used here by utilizing a late time hook.
419  */
420 extern void (*late_time_init)(void);
421 extern char xen_event_callback;
422 extern void xen_init_IRQ(void);
423
424 #ifdef CONFIG_HOTPLUG_CPU
425 static int __devinit
426 unbind_evtchn_callback(struct notifier_block *nfb,
427                        unsigned long action, void *hcpu)
428 {
429         unsigned int cpu = (unsigned long)hcpu;
430
431         if (action == CPU_DEAD) {
432                 /* Unregister evtchn.  */
433                 if (per_cpu(ipi_irq,cpu) >= 0) {
434                         unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
435                         per_cpu(ipi_irq, cpu) = -1;
436                 }
437                 if (per_cpu(resched_irq,cpu) >= 0) {
438                         unbind_from_irqhandler (per_cpu(resched_irq, cpu),
439                                                 NULL);
440                         per_cpu(resched_irq, cpu) = -1;
441                 }
442                 if (per_cpu(timer_irq,cpu) >= 0) {
443                         unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
444                         per_cpu(timer_irq, cpu) = -1;
445                 }
446         }
447         return NOTIFY_OK;
448 }
449
450 static struct notifier_block unbind_evtchn_notifier = {
451         .notifier_call = unbind_evtchn_callback,
452         .priority = 0
453 };
454 #endif
455
456 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
457 void xen_smp_intr_init(void)
458 {
459 #ifdef CONFIG_SMP
460         unsigned int cpu = smp_processor_id();
461         unsigned int i = 0;
462         struct callback_register event = {
463                 .type = CALLBACKTYPE_event,
464                 .address = (unsigned long)&xen_event_callback,
465         };
466
467         if (cpu == 0) {
468                 /* Initialization was already done for boot cpu.  */
469 #ifdef CONFIG_HOTPLUG_CPU
470                 /* Register the notifier only once.  */
471                 register_cpu_notifier(&unbind_evtchn_notifier);
472 #endif
473                 return;
474         }
475
476         /* This should be piggyback when setup vcpu guest context */
477         BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
478
479         for (i = 0; i < saved_irq_cnt; i++)
480                 xen_register_percpu_irq(saved_percpu_irqs[i].irq,
481                                         saved_percpu_irqs[i].action, 0);
482 #endif /* CONFIG_SMP */
483 }
484 #endif /* CONFIG_XEN */
485
486 void
487 register_percpu_irq (ia64_vector vec, struct irqaction *action)
488 {
489         irq_desc_t *desc;
490         unsigned int irq;
491
492 #ifdef CONFIG_XEN
493         if (is_running_on_xen())
494                 return xen_register_percpu_irq(vec, action, 1);
495 #endif
496
497         for (irq = 0; irq < NR_IRQS; ++irq)
498                 if (irq_to_vector(irq) == vec) {
499 #ifdef CONFIG_XEN
500                         if (is_running_on_xen())
501                                 return xen_register_percpu_irq(vec, action, 1);
502 #endif
503                         desc = irq_desc + irq;
504                         desc->status |= IRQ_PER_CPU;
505                         desc->chip = &irq_type_ia64_lsapic;
506                         if (action)
507                                 setup_irq(irq, action);
508                 }
509 }
510
511 void __init
512 init_IRQ (void)
513 {
514 #ifdef CONFIG_XEN
515         /* Maybe put into platform_irq_init later */
516         if (is_running_on_xen()) {
517                 struct callback_register event = {
518                         .type = CALLBACKTYPE_event,
519                         .address = (unsigned long)&xen_event_callback,
520                 };
521                 xen_init_IRQ();
522                 BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
523                 late_time_init = xen_bind_early_percpu_irq;
524 #ifdef CONFIG_SMP
525                 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
526 #endif /* CONFIG_SMP */
527         }
528 #endif /* CONFIG_XEN */
529         register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
530 #ifdef CONFIG_SMP
531         register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
532         register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
533 #endif
534 #ifdef CONFIG_PERFMON
535         pfm_init_percpu();
536 #endif
537         platform_irq_init();
538 }
539
540 void
541 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
542 {
543         void __iomem *ipi_addr;
544         unsigned long ipi_data;
545         unsigned long phys_cpu_id;
546
547 #ifdef CONFIG_XEN
548         if (is_running_on_xen()) {
549                 int irq = -1;
550
551 #ifdef CONFIG_SMP
552                 /* TODO: we need to call vcpu_up here */
553                 if (unlikely(vector == ap_wakeup_vector)) {
554                         extern void xen_send_ipi (int cpu, int vec);
555                         xen_send_ipi (cpu, vector);
556                         //vcpu_prepare_and_up(cpu);
557                         return;
558                 }
559 #endif
560
561                 switch(vector) {
562                 case IA64_IPI_VECTOR:
563                         irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
564                         break;
565                 case IA64_IPI_RESCHEDULE:
566                         irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
567                         break;
568                 default:
569                         printk(KERN_WARNING"Unsupported IPI type 0x%x\n", vector);
570                         irq = 0;
571                         break;
572                 }               
573         
574                 BUG_ON(irq < 0);
575                 notify_remote_via_irq(irq);
576                 return;
577         }
578 #endif /* CONFIG_XEN */
579
580 #ifdef CONFIG_SMP
581         phys_cpu_id = cpu_physical_id(cpu);
582 #else
583         phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff;
584 #endif
585
586         /*
587          * cpu number is in 8bit ID and 8bit EID
588          */
589
590         ipi_data = (delivery_mode << 8) | (vector & 0xff);
591         ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
592
593         writeq(ipi_data, ipi_addr);
594 }