This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / ppc64 / kernel / xics.c
1 /* 
2  * arch/ppc64/kernel/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/signal.h>
19 #include <linux/init.h>
20 #include <linux/gfp.h>
21 #include <linux/radix-tree.h>
22 #include <linux/cpu.h>
23 #include <asm/prom.h>
24 #include <asm/io.h>
25 #include <asm/pgtable.h>
26 #include <asm/smp.h>
27 #include <asm/naca.h>
28 #include <asm/rtas.h>
29 #include <asm/xics.h>
30 #include <asm/ppcdebug.h>
31 #include <asm/hvcall.h>
32 #include <asm/machdep.h>
33
34 #include "i8259.h"
35
36 static unsigned int xics_startup(unsigned int irq);
37 static void xics_enable_irq(unsigned int irq);
38 static void xics_disable_irq(unsigned int irq);
39 static void xics_mask_and_ack_irq(unsigned int irq);
40 static void xics_end_irq(unsigned int irq);
41 static void xics_set_affinity(unsigned int irq_nr, cpumask_t cpumask);
42
43 struct hw_interrupt_type xics_pic = {
44         .typename = " XICS     ",
45         .startup = xics_startup,
46         .enable = xics_enable_irq,
47         .disable = xics_disable_irq,
48         .ack = xics_mask_and_ack_irq,
49         .end = xics_end_irq,
50         .set_affinity = xics_set_affinity
51 };
52
53 struct hw_interrupt_type xics_8259_pic = {
54         .typename = " XICS/8259",
55         .ack = xics_mask_and_ack_irq,
56 };
57
58 /* This is used to map real irq numbers to virtual */
59 static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
60
61 #define XICS_IPI                2
62 #define XICS_IRQ_SPURIOUS       0
63
64 /* Want a priority other than 0.  Various HW issues require this. */
65 #define DEFAULT_PRIORITY        5
66
67 /* 
68  * Mark IPIs as higher priority so we can take them inside interrupts that
69  * arent marked SA_INTERRUPT
70  */
71 #define IPI_PRIORITY            4
72
73 struct xics_ipl {
74         union {
75                 u32 word;
76                 u8 bytes[4];
77         } xirr_poll;
78         union {
79                 u32 word;
80                 u8 bytes[4];
81         } xirr;
82         u32 dummy;
83         union {
84                 u32 word;
85                 u8 bytes[4];
86         } qirr;
87 };
88
89 static struct xics_ipl *xics_per_cpu[NR_CPUS];
90
91 static int xics_irq_8259_cascade = 0;
92 static int xics_irq_8259_cascade_real = 0;
93 static unsigned int default_server = 0xFF;
94 /* also referenced in smp.c... */
95 unsigned int default_distrib_server = 0;
96
97 /*
98  * XICS only has a single IPI, so encode the messages per CPU
99  */
100 struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
101
102 /* RTAS service tokens */
103 int ibm_get_xive;
104 int ibm_set_xive;
105 int ibm_int_on;
106 int ibm_int_off;
107
108 typedef struct {
109         int (*xirr_info_get)(int cpu);
110         void (*xirr_info_set)(int cpu, int val);
111         void (*cppr_info)(int cpu, u8 val);
112         void (*qirr_info)(int cpu, u8 val);
113 } xics_ops;
114
115
116 /* SMP */
117
118 static int pSeries_xirr_info_get(int n_cpu)
119 {
120         return xics_per_cpu[n_cpu]->xirr.word;
121 }
122
123 static void pSeries_xirr_info_set(int n_cpu, int value)
124 {
125         xics_per_cpu[n_cpu]->xirr.word = value;
126 }
127
128 static void pSeries_cppr_info(int n_cpu, u8 value)
129 {
130         xics_per_cpu[n_cpu]->xirr.bytes[0] = value;
131 }
132
133 static void pSeries_qirr_info(int n_cpu, u8 value)
134 {
135         xics_per_cpu[n_cpu]->qirr.bytes[0] = value;
136 }
137
138 static xics_ops pSeries_ops = {
139         pSeries_xirr_info_get,
140         pSeries_xirr_info_set,
141         pSeries_cppr_info,
142         pSeries_qirr_info
143 };
144
145 static xics_ops *ops = &pSeries_ops;
146
147
148 /* LPAR */
149
150 static inline long plpar_eoi(unsigned long xirr)
151 {
152         return plpar_hcall_norets(H_EOI, xirr);
153 }
154
155 static inline long plpar_cppr(unsigned long cppr)
156 {
157         return plpar_hcall_norets(H_CPPR, cppr);
158 }
159
160 static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr)
161 {
162         return plpar_hcall_norets(H_IPI, servernum, mfrr);
163 }
164
165 static inline long plpar_xirr(unsigned long *xirr_ret)
166 {
167         unsigned long dummy;
168         return plpar_hcall(H_XIRR, 0, 0, 0, 0, xirr_ret, &dummy, &dummy);
169 }
170
171 static int pSeriesLP_xirr_info_get(int n_cpu)
172 {
173         unsigned long lpar_rc;
174         unsigned long return_value; 
175
176         lpar_rc = plpar_xirr(&return_value);
177         if (lpar_rc != H_Success)
178                 panic(" bad return code xirr - rc = %lx \n", lpar_rc); 
179         return (int)return_value;
180 }
181
182 static void pSeriesLP_xirr_info_set(int n_cpu, int value)
183 {
184         unsigned long lpar_rc;
185         unsigned long val64 = value & 0xffffffff;
186
187         lpar_rc = plpar_eoi(val64);
188         if (lpar_rc != H_Success)
189                 panic("bad return code EOI - rc = %ld, value=%lx\n", lpar_rc,
190                       val64); 
191 }
192
193 static void pSeriesLP_cppr_info(int n_cpu, u8 value)
194 {
195         unsigned long lpar_rc;
196
197         lpar_rc = plpar_cppr(value);
198         if (lpar_rc != H_Success)
199                 panic("bad return code cppr - rc = %lx\n", lpar_rc); 
200 }
201
202 static void pSeriesLP_qirr_info(int n_cpu , u8 value)
203 {
204         unsigned long lpar_rc;
205
206         lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value);
207         if (lpar_rc != H_Success)
208                 panic("bad return code qirr - rc = %lx\n", lpar_rc); 
209 }
210
211 xics_ops pSeriesLP_ops = {
212         pSeriesLP_xirr_info_get,
213         pSeriesLP_xirr_info_set,
214         pSeriesLP_cppr_info,
215         pSeriesLP_qirr_info
216 };
217
218 static unsigned int xics_startup(unsigned int virq)
219 {
220         virq = irq_offset_down(virq);
221         if (radix_tree_insert(&irq_map, virt_irq_to_real(virq),
222                               &virt_irq_to_real_map[virq]) == -ENOMEM)
223                 printk(KERN_CRIT "Out of memory creating real -> virtual"
224                        " IRQ mapping for irq %u (real 0x%x)\n",
225                        virq, virt_irq_to_real(virq));
226         return 0;       /* return value is ignored */
227 }
228
229 static unsigned int real_irq_to_virt(unsigned int real_irq)
230 {
231         unsigned int *ptr;
232
233         ptr = radix_tree_lookup(&irq_map, real_irq);
234         if (ptr == NULL)
235                 return NO_IRQ;
236         return ptr - virt_irq_to_real_map;
237 }
238
239 #ifdef CONFIG_SMP
240 static int get_irq_server(unsigned int irq)
241 {
242         cpumask_t cpumask = irq_affinity[irq];
243         cpumask_t tmp = CPU_MASK_NONE;
244         unsigned int server;
245
246 #ifdef CONFIG_IRQ_ALL_CPUS
247         /* For the moment only implement delivery to all cpus or one cpu */
248         if (smp_threads_ready) {
249                 if (cpus_equal(cpumask, CPU_MASK_ALL)) {
250                         server = default_distrib_server;
251                 } else {
252                         cpus_and(tmp, cpu_online_map, cpumask);
253
254                         if (cpus_empty(tmp))
255                                 server = default_distrib_server;
256                         else
257                                 server = get_hard_smp_processor_id(first_cpu(tmp));
258                 }
259         } else {
260                 server = default_server;
261         }
262 #else
263         server = default_server;
264 #endif
265         return server;
266
267 }
268 #else
269 static int get_irq_server(unsigned int irq)
270 {
271         return default_server;
272 }
273 #endif
274
275 static void xics_enable_irq(unsigned int virq)
276 {
277         unsigned int irq;
278         int call_status;
279         unsigned int server;
280
281         irq = virt_irq_to_real(irq_offset_down(virq));
282         if (irq == XICS_IPI)
283                 return;
284
285         server = get_irq_server(virq);
286         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
287                                 DEFAULT_PRIORITY);
288         if (call_status != 0) {
289                 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
290                        "returned %x\n", irq, call_status);
291                 return;
292         }
293
294         /* Now unmask the interrupt (often a no-op) */
295         call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
296         if (call_status != 0) {
297                 printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
298                        "returned %x\n", irq, call_status);
299                 return;
300         }
301 }
302
303 static void xics_disable_real_irq(unsigned int irq)
304 {
305         int call_status;
306         unsigned int server;
307
308         if (irq == XICS_IPI)
309                 return;
310
311         call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
312         if (call_status != 0) {
313                 printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
314                        "ibm_int_off returned %x\n", irq, call_status);
315                 return;
316         }
317
318         server = get_irq_server(irq);
319         /* Have to set XIVE to 0xff to be able to remove a slot */
320         call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
321         if (call_status != 0) {
322                 printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
323                        " returned %x\n", irq, call_status);
324                 return;
325         }
326 }
327
328 static void xics_disable_irq(unsigned int virq)
329 {
330         unsigned int irq;
331
332         irq = virt_irq_to_real(irq_offset_down(virq));
333         xics_disable_real_irq(irq);
334 }
335
336 static void xics_end_irq(unsigned int irq)
337 {
338         int cpu = smp_processor_id();
339
340         iosync();
341         ops->xirr_info_set(cpu, ((0xff << 24) |
342                                  (virt_irq_to_real(irq_offset_down(irq)))));
343
344 }
345
346 static void xics_mask_and_ack_irq(unsigned int irq)
347 {
348         int cpu = smp_processor_id();
349
350         if (irq < irq_offset_value()) {
351                 i8259_pic.ack(irq);
352                 iosync();
353                 ops->xirr_info_set(cpu, ((0xff<<24) |
354                                          xics_irq_8259_cascade_real));
355                 iosync();
356         }
357 }
358
359 extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
360
361 int xics_get_irq(struct pt_regs *regs)
362 {
363         unsigned int cpu = smp_processor_id();
364         unsigned int vec;
365         int irq;
366
367         vec = ops->xirr_info_get(cpu);
368         /*  (vec >> 24) == old priority */
369         vec &= 0x00ffffff;
370
371         /* for sanity, this had better be < NR_IRQS - 16 */
372         if (vec == xics_irq_8259_cascade_real) {
373                 irq = i8259_irq(cpu);
374                 if (irq == -1) {
375                         /* Spurious cascaded interrupt.  Still must ack xics */
376                         xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
377
378                         irq = -1;
379                 }
380         } else if (vec == XICS_IRQ_SPURIOUS) {
381                 irq = -1;
382         } else {
383                 irq = real_irq_to_virt(vec);
384                 if (irq == NO_IRQ)
385                         irq = real_irq_to_virt_slowpath(vec);
386                 if (irq == NO_IRQ) {
387                         printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
388                                " disabling it.\n", vec);
389                         xics_disable_real_irq(vec);
390                 } else
391                         irq = irq_offset_up(irq);
392         }
393         return irq;
394 }
395
396 #ifdef CONFIG_SMP
397
398 extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
399
400 irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
401 {
402         int cpu = smp_processor_id();
403
404         ops->qirr_info(cpu, 0xff);
405
406         WARN_ON(cpu_is_offline(cpu));
407
408         while (xics_ipi_message[cpu].value) {
409                 if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION,
410                                        &xics_ipi_message[cpu].value)) {
411                         mb();
412                         smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
413                 }
414                 if (test_and_clear_bit(PPC_MSG_RESCHEDULE,
415                                        &xics_ipi_message[cpu].value)) {
416                         mb();
417                         smp_message_recv(PPC_MSG_RESCHEDULE, regs);
418                 }
419 #if 0
420                 if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
421                                        &xics_ipi_message[cpu].value)) {
422                         mb();
423                         smp_message_recv(PPC_MSG_MIGRATE_TASK, regs);
424                 }
425 #endif
426 #ifdef CONFIG_DEBUGGER
427                 if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
428                                        &xics_ipi_message[cpu].value)) {
429                         mb();
430                         smp_message_recv(PPC_MSG_DEBUGGER_BREAK, regs);
431                 }
432 #endif
433         }
434         return IRQ_HANDLED;
435 }
436
437 void xics_cause_IPI(int cpu)
438 {
439         ops->qirr_info(cpu, IPI_PRIORITY);
440 }
441
442 void xics_setup_cpu(void)
443 {
444         int cpu = smp_processor_id();
445
446         ops->cppr_info(cpu, 0xff);
447         iosync();
448 }
449
450 #endif /* CONFIG_SMP */
451
452 void xics_init_IRQ(void)
453 {
454         int i;
455         unsigned long intr_size = 0;
456         struct device_node *np;
457         uint *ireg, ilen, indx = 0;
458         unsigned long intr_base = 0;
459         struct xics_interrupt_node {
460                 unsigned long addr;
461                 unsigned long size;
462         } inodes[NR_CPUS]; 
463
464         ppc64_boot_msg(0x20, "XICS Init");
465
466         ibm_get_xive = rtas_token("ibm,get-xive");
467         ibm_set_xive = rtas_token("ibm,set-xive");
468         ibm_int_on  = rtas_token("ibm,int-on");
469         ibm_int_off = rtas_token("ibm,int-off");
470
471         np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
472         if (!np) {
473                 printk(KERN_WARNING "Can't find Interrupt Presentation\n");
474                 udbg_printf("Can't find Interrupt Presentation\n");
475                 while (1);
476         }
477 nextnode:
478         ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
479         if (ireg) {
480                 /*
481                  * set node starting index for this node
482                  */
483                 indx = *ireg;
484         }
485
486         ireg = (uint *)get_property(np, "reg", &ilen);
487         if (!ireg) {
488                 printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
489                 udbg_printf("Can't find Interrupt Reg Property\n");
490                 while (1);
491         }
492         
493         while (ilen) {
494                 inodes[indx].addr = (unsigned long long)*ireg++ << 32;
495                 ilen -= sizeof(uint);
496                 inodes[indx].addr |= *ireg++;
497                 ilen -= sizeof(uint);
498                 inodes[indx].size = (unsigned long long)*ireg++ << 32;
499                 ilen -= sizeof(uint);
500                 inodes[indx].size |= *ireg++;
501                 ilen -= sizeof(uint);
502                 indx++;
503                 if (indx >= NR_CPUS) break;
504         }
505
506         np = of_find_node_by_type(np, "PowerPC-External-Interrupt-Presentation");
507         if ((indx < NR_CPUS) && np) goto nextnode;
508
509         /* Find the server numbers for the boot cpu. */
510         for (np = of_find_node_by_type(NULL, "cpu");
511              np;
512              np = of_find_node_by_type(np, "cpu")) {
513                 ireg = (uint *)get_property(np, "reg", &ilen);
514                 if (ireg && ireg[0] == hard_smp_processor_id()) {
515                         ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
516                         i = ilen / sizeof(int);
517                         if (ireg && i > 0) {
518                                 default_server = ireg[0];
519                                 default_distrib_server = ireg[i-1]; /* take last element */
520                         }
521                         break;
522                 }
523         }
524         of_node_put(np);
525
526         intr_base = inodes[0].addr;
527         intr_size = (ulong)inodes[0].size;
528
529         np = of_find_node_by_type(NULL, "interrupt-controller");
530         if (!np) {
531                 printk(KERN_WARNING "xics:  no ISA Interrupt Controller\n");
532                 xics_irq_8259_cascade_real = -1;
533                 xics_irq_8259_cascade = -1;
534         } else {
535                 ireg = (uint *) get_property(np, "interrupts", 0);
536                 if (!ireg) {
537                         printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
538                         udbg_printf("Can't find ISA Interrupts Property\n");
539                         while (1);
540                 }
541                 xics_irq_8259_cascade_real = *ireg;
542                 xics_irq_8259_cascade
543                         = virt_irq_create_mapping(xics_irq_8259_cascade_real);
544                 of_node_put(np);
545         }
546
547         if (systemcfg->platform == PLATFORM_PSERIES) {
548 #ifdef CONFIG_SMP
549                 for_each_cpu(i) {
550                         /* FIXME: Do this dynamically! --RR */
551                         if (!cpu_present_at_boot(i))
552                                 continue;
553                         xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr, 
554                                                     (ulong)inodes[get_hard_smp_processor_id(i)].size,
555                                                     _PAGE_NO_CACHE);
556                 }
557 #else
558                 xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
559                                             _PAGE_NO_CACHE);
560 #endif /* CONFIG_SMP */
561 #ifdef CONFIG_PPC_PSERIES
562         /* actually iSeries does not use any of xics...but it has link dependencies
563          * for now, except this new one...
564          */
565         } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
566                 ops = &pSeriesLP_ops;
567 #endif
568         }
569
570         xics_8259_pic.enable = i8259_pic.enable;
571         xics_8259_pic.disable = i8259_pic.disable;
572         for (i = 0; i < 16; ++i)
573                 get_irq_desc(i)->handler = &xics_8259_pic;
574         for (; i < NR_IRQS; ++i)
575                 get_irq_desc(i)->handler = &xics_pic;
576
577         ops->cppr_info(boot_cpuid, 0xff);
578         iosync();
579
580         ppc64_boot_msg(0x21, "XICS Done");
581 }
582
583 /*
584  * We cant do this in init_IRQ because we need the memory subsystem up for
585  * request_irq()
586  */
587 static int __init xics_setup_i8259(void)
588 {
589         if (naca->interrupt_controller == IC_PPC_XIC &&
590             xics_irq_8259_cascade != -1) {
591                 if (request_irq(irq_offset_up(xics_irq_8259_cascade),
592                                 no_action, 0, "8259 cascade", 0))
593                         printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
594                 i8259_init();
595         }
596         return 0;
597 }
598 arch_initcall(xics_setup_i8259);
599
600 #ifdef CONFIG_SMP
601 void xics_request_IPIs(void)
602 {
603         virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
604
605         /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
606         request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
607                     "IPI", 0);
608         get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
609 }
610 #endif
611
612 static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
613 {
614         unsigned int irq;
615         int status;
616         int xics_status[2];
617         unsigned long newmask;
618         cpumask_t tmp = CPU_MASK_NONE;
619
620         irq = virt_irq_to_real(irq_offset_down(virq));
621         if (irq == XICS_IPI || irq == NO_IRQ)
622                 return;
623
624         status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
625
626         if (status) {
627                 printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive "
628                        "returns %d\n", irq, status);
629                 return;
630         }
631
632         /* For the moment only implement delivery to all cpus or one cpu */
633         if (cpus_equal(cpumask, CPU_MASK_ALL)) {
634                 newmask = default_distrib_server;
635         } else {
636                 cpus_and(tmp, cpu_online_map, cpumask);
637                 if (cpus_empty(tmp))
638                         return;
639                 newmask = get_hard_smp_processor_id(first_cpu(tmp));
640         }
641
642         status = rtas_call(ibm_set_xive, 3, 1, NULL,
643                                 irq, newmask, xics_status[1]);
644
645         if (status) {
646                 printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
647                        "returns %d\n", irq, status);
648                 return;
649         }
650 }
651
652 #ifdef CONFIG_HOTPLUG_CPU
653
654 /* Interrupts are disabled. */
655 void xics_migrate_irqs_away(void)
656 {
657         int set_indicator = rtas_token("set-indicator");
658         const unsigned int giqs = 9005UL; /* Global Interrupt Queue Server */
659         int status = 0;
660         unsigned int irq, cpu = smp_processor_id();
661         int xics_status[2];
662         unsigned long flags;
663
664         BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
665
666         /* Reject any interrupt that was queued to us... */
667         ops->cppr_info(cpu, 0);
668         iosync();
669
670         /* Refuse any new interrupts... */
671         rtas_call(set_indicator, 3, 1, &status, giqs,
672                   hard_smp_processor_id(), 0);
673         WARN_ON(status != 0);
674
675         /* Allow IPIs again... */
676         ops->cppr_info(cpu, DEFAULT_PRIORITY);
677         iosync();
678
679         printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
680         for_each_irq(irq) {
681                 irq_desc_t *desc = get_irq_desc(irq);
682
683                 /* We need to get IPIs still. */
684                 if (irq_offset_down(irq) == XICS_IPI)
685                         continue;
686
687                 /* We only need to migrate enabled IRQS */
688                 if (desc == NULL || desc->handler == NULL
689                     || desc->action == NULL
690                     || desc->handler->set_affinity == NULL)
691                         continue;
692
693                 spin_lock_irqsave(&desc->lock, flags);
694
695                 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
696                 if (status) {
697                         printk(KERN_ERR "migrate_irqs_away: irq=%d "
698                                         "ibm,get-xive returns %d\n",
699                                         irq, status);
700                         goto unlock;
701                 }
702
703                 /*
704                  * We only support delivery to all cpus or to one cpu.
705                  * The irq has to be migrated only in the single cpu
706                  * case.
707                  */
708                 if (xics_status[0] != get_hard_smp_processor_id(cpu))
709                         goto unlock;
710
711                 printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
712                        irq, cpu);
713
714                 /* Reset affinity to all cpus */
715                 xics_status[0] = default_distrib_server;
716
717                 status = rtas_call(ibm_set_xive, 3, 1, NULL,
718                                 irq, xics_status[0], xics_status[1]);
719                 if (status)
720                         printk(KERN_ERR "migrate_irqs_away irq=%d "
721                                         "ibm,set-xive returns %d\n",
722                                         irq, status);
723
724 unlock:
725                 spin_unlock_irqrestore(&desc->lock, flags);
726         }
727
728 }
729 #endif