patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / ppc64 / kernel / xics.c
1 /* 
2  * arch/ppc64/kernel/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/pgtable.h>
26 #include <asm/smp.h>
27 #include <asm/naca.h>
28 #include <asm/rtas.h>
29 #include <asm/xics.h>
30 #include <asm/ppcdebug.h>
31 #include <asm/hvcall.h>
32 #include <asm/machdep.h>
33
34 #include "i8259.h"
35
36 static unsigned int xics_startup(unsigned int irq);
37 static void xics_enable_irq(unsigned int irq);
38 static void xics_disable_irq(unsigned int irq);
39 static void xics_mask_and_ack_irq(unsigned int irq);
40 static void xics_end_irq(unsigned int irq);
41 static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
42
43 struct hw_interrupt_type xics_pic = {
44         .typename = " XICS     ",
45         .startup = xics_startup,
46         .enable = xics_enable_irq,
47         .disable = xics_disable_irq,
48         .ack = xics_mask_and_ack_irq,
49         .end = xics_end_irq,
50         .set_affinity = xics_set_affinity
51 };
52
53 struct hw_interrupt_type xics_8259_pic = {
54         .typename = " XICS/8259",
55         .ack = xics_mask_and_ack_irq,
56 };
57
58 /* This is used to map real irq numbers to virtual */
59 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
60
61 #define XICS_IPI                2
62 #define XICS_IRQ_SPURIOUS       0
63
64 /* Want a priority other than 0.  Various HW issues require this. */
65 #define DEFAULT_PRIORITY        5
66
67 /* 
68  * Mark IPIs as higher priority so we can take them inside interrupts that
69  * arent marked SA_INTERRUPT
70  */
71 #define IPI_PRIORITY            4
72
73 struct xics_ipl {
74         union {
75                 u32 word;
76                 u8 bytes[4];
77         } xirr_poll;
78         union {
79                 u32 word;
80                 u8 bytes[4];
81         } xirr;
82         u32 dummy;
83         union {
84                 u32 word;
85                 u8 bytes[4];
86         } qirr;
87 };
88
89 static struct xics_ipl *xics_per_cpu[NR_CPUS];
90
91 static int xics_irq_8259_cascade = 0;
92 static int xics_irq_8259_cascade_real = 0;
93 static unsigned int default_server = 0xFF;
94 /* also referenced in smp.c... */
95 unsigned int default_distrib_server = 0;
96
97 /*
98  * XICS only has a single IPI, so encode the messages per CPU
99  */
100 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
101
102 /* RTAS service tokens */
103 int ibm_get_xive;
104 int ibm_set_xive;
105 int ibm_int_on;
106 int ibm_int_off;
107
108 typedef struct {
109         int (*xirr_info_get)(int cpu);
110         void (*xirr_info_set)(int cpu, int val);
111         void (*cppr_info)(int cpu, u8 val);
112         void (*qirr_info)(int cpu, u8 val);
113 } xics_ops;
114
115
116 /* SMP */
117
118 static int pSeries_xirr_info_get(int n_cpu)
119 {
120         return xics_per_cpu[n_cpu]->xirr.word;
121 }
122
123 static void pSeries_xirr_info_set(int n_cpu, int value)
124 {
125         xics_per_cpu[n_cpu]->xirr.word = value;
126 }
127
128 static void pSeries_cppr_info(int n_cpu, u8 value)
129 {
130         xics_per_cpu[n_cpu]->xirr.bytes[0] = value;
131 }
132
133 static void pSeries_qirr_info(int n_cpu, u8 value)
134 {
135         xics_per_cpu[n_cpu]->qirr.bytes[0] = value;
136 }
137
138 static xics_ops pSeries_ops = {
139         pSeries_xirr_info_get,
140         pSeries_xirr_info_set,
141         pSeries_cppr_info,
142         pSeries_qirr_info
143 };
144
145 static xics_ops *ops = &pSeries_ops;
146
147
148 /* LPAR */
149
150 static inline long plpar_eoi(unsigned long xirr)
151 {
152         return plpar_hcall_norets(H_EOI, xirr);
153 }
154
155 static inline long plpar_cppr(unsigned long cppr)
156 {
157         return plpar_hcall_norets(H_CPPR, cppr);
158 }
159
160 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
161 {
162         return plpar_hcall_norets(H_IPI, servernum, mfrr);
163 }
164
165 static inline long plpar_xirr(unsigned long *xirr_ret)
166 {
167         unsigned long dummy;
168         return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
169 }
170
171 static int pSeriesLP_xirr_info_get(int n_cpu)
172 {
173         unsigned long lpar_rc;
174         unsigned long return_value; 
175
176         lpar_rc = plpar_xirr(&return_value);
177         if (lpar_rc != H_Success)
178                 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 
179         return (int)return_value;
180 }
181
182 static void pSeriesLP_xirr_info_set(int n_cpu, int value)
183 {
184         unsigned long lpar_rc;
185         unsigned long val64 = value & 0xffffffff;
186
187         lpar_rc = plpar_eoi(val64);
188         if (lpar_rc != H_Success)
189                 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
190                       val64); 
191 }
192
193 static void pSeriesLP_cppr_info(int n_cpu, u8 value)
194 {
195         unsigned long lpar_rc;
196
197         lpar_rc = plpar_cppr(value);
198         if (lpar_rc != H_Success)
199                 panic("bad return code cppr - rc = %lx\n", lpar_rc); 
200 }
201
202 static void pSeriesLP_qirr_info(int n_cpu , u8 value)
203 {
204         unsigned long lpar_rc;
205
206         lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
207         if (lpar_rc != H_Success)
208                 panic("bad return code qirr - rc = %lx\n", lpar_rc); 
209 }
210
211 xics_ops pSeriesLP_ops = {
212         pSeriesLP_xirr_info_get,
213         pSeriesLP_xirr_info_set,
214         pSeriesLP_cppr_info,
215         pSeriesLP_qirr_info
216 };
217
218 static unsigned int xics_startup(unsigned int virq)
219 {
220         virq = irq_offset_down(virq);
221         if (radix_tree_insert(&irq_map, virt_irq_to_real(virq),
222                               &virt_irq_to_real_map[virq]) == -ENOMEM)
223                 printk(KERN_CRIT "Out of memory creating real -> virtual"
224                        " IRQ mapping for irq %u (real 0x%x)\n",
225                        virq, virt_irq_to_real(virq));
226         return 0;       /* return value is ignored */
227 }
228
229 static unsigned int real_irq_to_virt(unsigned int real_irq)
230 {
231         unsigned int *ptr;
232
233         ptr = radix_tree_lookup(&irq_map, real_irq);
234         if (ptr == NULL)
235                 return NO_IRQ;
236         return ptr - virt_irq_to_real_map;
237 }
238
239 #ifdef CONFIG_SMP
240 static int get_irq_server(unsigned int irq)
241 {
242         cpumask_t cpumask = irq_affinity[irq];
243         cpumask_t allcpus = CPU_MASK_ALL;
244         cpumask_t tmp = CPU_MASK_NONE;
245         unsigned int server;
246
247 #ifdef CONFIG_IRQ_ALL_CPUS
248         /* For the moment only implement delivery to all cpus or one cpu */
249         if (smp_threads_ready) {
250                 if (cpus_equal(cpumask, allcpus)) {
251                         server = default_distrib_server;
252                 } else {
253                         cpus_and(tmp, cpu_online_map, cpumask);
254
255                         if (cpus_empty(tmp))
256                                 server = default_distrib_server;
257                         else
258                                 server = get_hard_smp_processor_id(first_cpu(tmp));
259                 }
260         } else {
261                 server = default_server;
262         }
263 #else
264         server = default_server;
265 #endif
266         return server;
267
268 }
269 #else
270 static int get_irq_server(unsigned int irq)
271 {
272         return default_server;
273 }
274 #endif
275
276 static void xics_enable_irq(unsigned int virq)
277 {
278         unsigned int irq;
279         long call_status;
280         unsigned int server;
281
282         irq = virt_irq_to_real(irq_offset_down(virq));
283         if (irq == XICS_IPI)
284                 return;
285
286         server = get_irq_server(virq);
287         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
288                                 DEFAULT_PRIORITY);
289         if (call_status != 0) {
290                 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
291                        "returned %lx\n", irq, call_status);
292                 return;
293         }
294
295         /* Now unmask the interrupt (often a no-op) */
296         call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
297         if (call_status != 0) {
298                 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
299                        "returned %lx\n", irq, call_status);
300                 return;
301         }
302 }
303
304 static void xics_disable_real_irq(unsigned int irq)
305 {
306         long call_status;
307         unsigned int server;
308
309         if (irq == XICS_IPI)
310                 return;
311
312         call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
313         if (call_status != 0) {
314                 printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
315                        "ibm_int_off returned %lx\n", irq, call_status);
316                 return;
317         }
318
319         server = get_irq_server(irq);
320         /* Have to set XIVE to 0xff to be able to remove a slot */
321         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
322         if (call_status != 0) {
323                 printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
324                        " returned %lx\n", irq, call_status);
325                 return;
326         }
327 }
328
329 static void xics_disable_irq(unsigned int virq)
330 {
331         unsigned int irq;
332
333         irq = virt_irq_to_real(irq_offset_down(virq));
334         xics_disable_real_irq(irq);
335 }
336
337 static void xics_end_irq(unsigned int irq)
338 {
339         int cpu = smp_processor_id();
340
341         iosync();
342         ops->xirr_info_set(cpu, ((0xff << 24) |
343                                  (virt_irq_to_real(irq_offset_down(irq)))));
344
345 }
346
347 static void xics_mask_and_ack_irq(unsigned int irq)
348 {
349         int cpu = smp_processor_id();
350
351         if (irq < irq_offset_value()) {
352                 i8259_pic.ack(irq);
353                 iosync();
354                 ops->xirr_info_set(cpu, ((0xff<<24) |
355                                          xics_irq_8259_cascade_real));
356                 iosync();
357         }
358 }
359
360 extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
361
362 int xics_get_irq(struct pt_regs *regs)
363 {
364         unsigned int cpu = smp_processor_id();
365         unsigned int vec;
366         int irq;
367
368         vec = ops->xirr_info_get(cpu);
369         /*  (vec >> 24) == old priority */
370         vec &= 0x00ffffff;
371
372         /* for sanity, this had better be < NR_IRQS - 16 */
373         if (vec == xics_irq_8259_cascade_real) {
374                 irq = i8259_irq(cpu);
375                 if (irq == -1) {
376                         /* Spurious cascaded interrupt.  Still must ack xics */
377                         xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
378
379                         irq = -1;
380                 }
381         } else if (vec == XICS_IRQ_SPURIOUS) {
382                 irq = -1;
383         } else {
384                 irq = real_irq_to_virt(vec);
385                 if (irq == NO_IRQ)
386                         irq = real_irq_to_virt_slowpath(vec);
387                 if (irq == NO_IRQ) {
388                         printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
389                                " disabling it.\n", vec);
390                         xics_disable_real_irq(vec);
391                 } else
392                         irq = irq_offset_up(irq);
393         }
394         return irq;
395 }
396
397 #ifdef CONFIG_SMP
398
399 extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
400
401 irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
402 {
403         int cpu = smp_processor_id();
404
405         ops->qirr_info(cpu, 0xff);
406
407         WARN_ON(cpu_is_offline(cpu));
408
409         while (xics_ipi_message[cpu].value) {
410                 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
411                                        &xics_ipi_message[cpu].value)) {
412                         mb();
413                         smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
414                 }
415                 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
416                                        &xics_ipi_message[cpu].value)) {
417                         mb();
418                         smp_message_recv(PPC_MSG_RESCHEDULE, regs);
419                 }
420 #if 0
421                 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
422                                        &xics_ipi_message[cpu].value)) {
423                         mb();
424                         smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
425                 }
426 #endif
427 #ifdef CONFIG_DEBUGGER
428                 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
429                                        &xics_ipi_message[cpu].value)) {
430                         mb();
431                         smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
432                 }
433 #endif
434         }
435         return IRQ_HANDLED;
436 }
437
438 void xics_cause_IPI(int cpu)
439 {
440         ops->qirr_info(cpu, IPI_PRIORITY);
441 }
442
443 void xics_setup_cpu(void)
444 {
445         int cpu = smp_processor_id();
446
447         ops->cppr_info(cpu, 0xff);
448         iosync();
449 }
450
451 #endif /* CONFIG_SMP */
452
453 void xics_init_IRQ(void)
454 {
455         int i;
456         unsigned long intr_size = 0;
457         struct device_node *np;
458         uint *ireg, ilen, indx = 0;
459         unsigned long intr_base = 0;
460         struct xics_interrupt_node {
461                 unsigned long addr;
462                 unsigned long size;
463         } inodes[NR_CPUS]; 
464
465         ppc64_boot_msg(0x20, "XICS Init");
466
467         ibm_get_xive = rtas_token("ibm,get-xive");
468         ibm_set_xive = rtas_token("ibm,set-xive");
469         ibm_int_on  = rtas_token("ibm,int-on");
470         ibm_int_off = rtas_token("ibm,int-off");
471
472         np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
473         if (!np) {
474                 printk(KERN_WARNING "Can't find Interrupt Presentation\n");
475                 udbg_printf("Can't find Interrupt Presentation\n");
476                 while (1);
477         }
478 nextnode:
479         ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
480         if (ireg) {
481                 /*
482                  * set node starting index for this node
483                  */
484                 indx = *ireg;
485         }
486
487         ireg = (uint *)get_property(np, "reg", &ilen);
488         if (!ireg) {
489                 printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
490                 udbg_printf("Can't find Interrupt Reg Property\n");
491                 while (1);
492         }
493         
494         while (ilen) {
495                 inodes[indx].addr = (unsigned long long)*ireg++ << 32;
496                 ilen -= sizeof(uint);
497                 inodes[indx].addr |= *ireg++;
498                 ilen -= sizeof(uint);
499                 inodes[indx].size = (unsigned long long)*ireg++ << 32;
500                 ilen -= sizeof(uint);
501                 inodes[indx].size |= *ireg++;
502                 ilen -= sizeof(uint);
503                 indx++;
504                 if (indx >= NR_CPUS) break;
505         }
506
507         np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
508         if ((indx < NR_CPUS) && np) goto nextnode;
509
510         /* Find the server numbers for the boot cpu. */
511         for (np = of_find_node_by_type(NULL, "cpu");
512              np;
513              np = of_find_node_by_type(np, "cpu")) {
514                 ireg = (uint *)get_property(np, "reg", &ilen);
515                 if (ireg && ireg[0] == hard_smp_processor_id()) {
516                         ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
517                         i = ilen / sizeof(int);
518                         if (ireg && i > 0) {
519                                 default_server = ireg[0];
520                                 default_distrib_server = ireg[i-1]; /* take last element */
521                         }
522                         break;
523                 }
524         }
525         of_node_put(np);
526
527         intr_base = inodes[0].addr;
528         intr_size = (ulong)inodes[0].size;
529
530         np = of_find_node_by_type(NULL, "interrupt-controller");
531         if (!np) {
532                 printk(KERN_WARNING "xics:  no ISA Interrupt Controller\n");
533                 xics_irq_8259_cascade_real = -1;
534                 xics_irq_8259_cascade = -1;
535         } else {
536                 ireg = (uint *) get_property(np, "interrupts", 0);
537                 if (!ireg) {
538                         printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
539                         udbg_printf("Can't find ISA Interrupts Property\n");
540                         while (1);
541                 }
542                 xics_irq_8259_cascade_real = *ireg;
543                 xics_irq_8259_cascade
544                         = virt_irq_create_mapping(xics_irq_8259_cascade_real);
545                 of_node_put(np);
546         }
547
548         if (systemcfg->platform == PLATFORM_PSERIES) {
549 #ifdef CONFIG_SMP
550                 for_each_cpu(i) {
551                         /* FIXME: Do this dynamically! --RR */
552                         if (!cpu_present_at_boot(i))
553                                 continue;
554                         xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr, 
555                                                     (ulong)inodes[get_hard_smp_processor_id(i)].size,
556                                                     _PAGE_NO_CACHE);
557                 }
558 #else
559                 xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
560                                             _PAGE_NO_CACHE);
561 #endif /* CONFIG_SMP */
562 #ifdef CONFIG_PPC_PSERIES
563         /* actually iSeries does not use any of xics...but it has link dependencies
564          * for now, except this new one...
565          */
566         } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
567                 ops = &pSeriesLP_ops;
568 #endif
569         }
570
571         xics_8259_pic.enable = i8259_pic.enable;
572         xics_8259_pic.disable = i8259_pic.disable;
573         for (i = 0; i < 16; ++i)
574                 get_irq_desc(i)->handler = &xics_8259_pic;
575         for (; i < NR_IRQS; ++i)
576                 get_irq_desc(i)->handler = &xics_pic;
577
578         ops->cppr_info(boot_cpuid, 0xff);
579         iosync();
580
581         ppc64_boot_msg(0x21, "XICS Done");
582 }
583
584 /*
585  * We cant do this in init_IRQ because we need the memory subsystem up for
586  * request_irq()
587  */
588 static int __init xics_setup_i8259(void)
589 {
590         if (naca->interrupt_controller == IC_PPC_XIC &&
591             xics_irq_8259_cascade != -1) {
592                 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
593                                 no_action, 0, "8259 cascade", 0))
594                         printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
595                 i8259_init();
596         }
597         return 0;
598 }
599 arch_initcall(xics_setup_i8259);
600
601 #ifdef CONFIG_SMP
602 void xics_request_IPIs(void)
603 {
604         virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
605
606         /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
607         request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
608                     "IPI", 0);
609         get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
610 }
611 #endif
612
613 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
614 {
615         unsigned int irq;
616         long status;
617         unsigned long xics_status[2];
618         unsigned long newmask;
619         cpumask_t allcpus = CPU_MASK_ALL;
620         cpumask_t tmp = CPU_MASK_NONE;
621
622         irq = virt_irq_to_real(irq_offset_down(virq));
623         if (irq == XICS_IPI || irq == NO_IRQ)
624                 return;
625
626         status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
627
628         if (status) {
629                 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive "
630                        "returns %ld\n", irq, status);
631                 return;
632         }
633
634         /* For the moment only implement delivery to all cpus or one cpu */
635         if (cpus_equal(cpumask, allcpus)) {
636                 newmask = default_distrib_server;
637         } else {
638                 cpus_and(tmp, cpu_online_map, cpumask);
639                 if (cpus_empty(tmp))
640                         return;
641                 newmask = get_hard_smp_processor_id(first_cpu(tmp));
642         }
643
644         status = rtas_call(ibm_set_xive, 3, 1, NULL,
645                                 irq, newmask, xics_status[1]);
646
647         if (status) {
648                 printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
649                        "returns %ld\n", irq, status);
650                 return;
651         }
652 }
653
654 #ifdef CONFIG_HOTPLUG_CPU
655
656 /* Interrupts are disabled. */
657 void xics_migrate_irqs_away(void)
658 {
659         int set_indicator = rtas_token("set-indicator");
660         const unsigned long giqs = 9005UL; /* Global Interrupt Queue Server */
661         unsigned long status = 0;
662         unsigned int irq, cpu = smp_processor_id();
663         unsigned long xics_status[2];
664         unsigned long flags;
665
666         BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
667
668         /* Reject any interrupt that was queued to us... */
669         ops->cppr_info(cpu, 0);
670         iosync();
671
672         /* Refuse any new interrupts... */
673         rtas_call(set_indicator, 3, 1, &status, giqs,
674                   hard_smp_processor_id(), 0UL);
675         WARN_ON(status != 0);
676
677         /* Allow IPIs again... */
678         ops->cppr_info(cpu, DEFAULT_PRIORITY);
679         iosync();
680
681         printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
682         for_each_irq(irq) {
683                 irq_desc_t *desc = get_irq_desc(irq);
684
685                 /* We need to get IPIs still. */
686                 if (irq_offset_down(irq) == XICS_IPI)
687                         continue;
688
689                 /* We only need to migrate enabled IRQS */
690                 if (desc == NULL || desc->handler == NULL
691                     || desc->action == NULL
692                     || desc->handler->set_affinity == NULL)
693                         continue;
694
695                 spin_lock_irqsave(&desc->lock, flags);
696
697                 status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status,
698                                    irq);
699                 if (status) {
700                         printk(KERN_ERR "migrate_irqs_away: irq=%d "
701                                         "ibm,get-xive returns %ld\n",
702                                         irq, status);
703                         goto unlock;
704                 }
705
706                 /*
707                  * We only support delivery to all cpus or to one cpu.
708                  * The irq has to be migrated only in the single cpu
709                  * case.
710                  */
711                 if (xics_status[0] != get_hard_smp_processor_id(cpu))
712                         goto unlock;
713
714                 printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
715                        irq, cpu);
716
717                 /* Reset affinity to all cpus */
718                 xics_status[0] = default_distrib_server;
719
720                 status = rtas_call(ibm_set_xive, 3, 1, NULL,
721                                 irq, xics_status[0], xics_status[1]);
722                 if (status)
723                         printk(KERN_ERR "migrate_irqs_away irq=%d "
724                                         "ibm,set-xive returns %ld\n",
725                                         irq, status);
726
727 unlock:
728                 spin_unlock_irqrestore(&desc->lock, flags);
729         }
730
731 }
732 #endif