#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
-#include <asm/naca.h>
#include <asm/rtas.h>
#include <asm/xics.h>
-#include <asm/ppcdebug.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
};
/* This is used to map real irq numbers to virtual */
-static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_KERNEL);
+static struct radix_tree_root irq_map = RADIX_TREE_INIT(GFP_ATOMIC);
#define XICS_IPI 2
#define XICS_IRQ_SPURIOUS 0
} qirr;
};
-static struct xics_ipl *xics_per_cpu[NR_CPUS];
+static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS];
static int xics_irq_8259_cascade = 0;
static int xics_irq_8259_cascade_real = 0;
static unsigned int default_server = 0xFF;
/* also referenced in smp.c... */
unsigned int default_distrib_server = 0;
+unsigned int interrupt_server_size = 8;
/*
* XICS only has a single IPI, so encode the messages per CPU
static int pSeries_xirr_info_get(int n_cpu)
{
- return xics_per_cpu[n_cpu]->xirr.word;
+ return in_be32(&xics_per_cpu[n_cpu]->xirr.word);
}
static void pSeries_xirr_info_set(int n_cpu, int value)
{
- xics_per_cpu[n_cpu]->xirr.word = value;
+ out_be32(&xics_per_cpu[n_cpu]->xirr.word, value);
}
static void pSeries_cppr_info(int n_cpu, u8 value)
{
- xics_per_cpu[n_cpu]->xirr.bytes[0] = value;
+ out_8(&xics_per_cpu[n_cpu]->xirr.bytes[0], value);
}
static void pSeries_qirr_info(int n_cpu, u8 value)
{
- xics_per_cpu[n_cpu]->qirr.bytes[0] = value;
+ out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value);
}
static xics_ops pSeries_ops = {
val64);
}
-static void pSeriesLP_cppr_info(int n_cpu, u8 value)
+void pSeriesLP_cppr_info(int n_cpu, u8 value)
{
unsigned long lpar_rc;
static unsigned int xics_startup(unsigned int virq)
{
- virq = irq_offset_down(virq);
- if (radix_tree_insert(&irq_map, virt_irq_to_real(virq),
- &virt_irq_to_real_map[virq]) == -ENOMEM)
+ unsigned int irq;
+
+ irq = irq_offset_down(virq);
+ if (radix_tree_insert(&irq_map, virt_irq_to_real(irq),
+ &virt_irq_to_real_map[irq]) == -ENOMEM)
printk(KERN_CRIT "Out of memory creating real -> virtual"
" IRQ mapping for irq %u (real 0x%x)\n",
- virq, virt_irq_to_real(virq));
+ virq, virt_irq_to_real(irq));
+ xics_enable_irq(virq);
return 0; /* return value is ignored */
}
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int irq)
{
+ unsigned int server;
+ /* For the moment only implement delivery to all cpus or one cpu */
cpumask_t cpumask = irq_affinity[irq];
- cpumask_t allcpus = CPU_MASK_ALL;
cpumask_t tmp = CPU_MASK_NONE;
- unsigned int server;
-#ifdef CONFIG_IRQ_ALL_CPUS
- /* For the moment only implement delivery to all cpus or one cpu */
- if (smp_threads_ready) {
- if (cpus_equal(cpumask, allcpus)) {
- server = default_distrib_server;
- } else {
- cpus_and(tmp, cpu_online_map, cpumask);
+ if (!distribute_irqs)
+ return default_server;
- if (cpus_empty(tmp))
- server = default_distrib_server;
- else
- server = get_hard_smp_processor_id(first_cpu(tmp));
- }
+ if (cpus_equal(cpumask, CPU_MASK_ALL)) {
+ server = default_distrib_server;
} else {
- server = default_server;
+ cpus_and(tmp, cpu_online_map, cpumask);
+
+ if (cpus_empty(tmp))
+ server = default_distrib_server;
+ else
+ server = get_hard_smp_processor_id(first_cpu(tmp));
}
-#else
- server = default_server;
-#endif
+
return server;
}
static void xics_enable_irq(unsigned int virq)
{
unsigned int irq;
- long call_status;
+ int call_status;
unsigned int server;
irq = virt_irq_to_real(irq_offset_down(virq));
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_set_xive "
- "returned %lx\n", irq, call_status);
+ printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_set_xive "
+ "returned %x\n", irq, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_enable_irq: irq=%x: ibm_int_on "
- "returned %lx\n", irq, call_status);
+ printk(KERN_ERR "xics_enable_irq: irq=%d: ibm_int_on "
+ "returned %x\n", irq, call_status);
return;
}
}
static void xics_disable_real_irq(unsigned int irq)
{
- long call_status;
+ int call_status;
unsigned int server;
if (irq == XICS_IPI)
call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_real_irq: irq=%x: "
- "ibm_int_off returned %lx\n", irq, call_status);
+ printk(KERN_ERR "xics_disable_real_irq: irq=%d: "
+ "ibm_int_off returned %x\n", irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 0xff);
if (call_status != 0) {
- printk(KERN_ERR "xics_disable_irq: irq=%x: ibm_set_xive(0xff)"
- " returned %lx\n", irq, call_status);
+ printk(KERN_ERR "xics_disable_irq: irq=%d: ibm_set_xive(0xff)"
+ " returned %x\n", irq, call_status);
return;
}
}
}
}
-extern unsigned int real_irq_to_virt_slowpath(unsigned int real_irq);
-
int xics_get_irq(struct pt_regs *regs)
{
unsigned int cpu = smp_processor_id();
if (irq == NO_IRQ)
irq = real_irq_to_virt_slowpath(vec);
if (irq == NO_IRQ) {
- printk(KERN_ERR "Interrupt 0x%x (real) is invalid,"
+ printk(KERN_ERR "Interrupt %d (real) is invalid,"
" disabling it.\n", vec);
xics_disable_real_irq(vec);
} else
#ifdef CONFIG_SMP
-extern struct xics_ipi_struct xics_ipi_message[NR_CPUS] __cacheline_aligned;
-
irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
{
int cpu = smp_processor_id();
struct xics_interrupt_node {
unsigned long addr;
unsigned long size;
- } inodes[NR_CPUS];
+ } intnodes[NR_CPUS];
ppc64_boot_msg(0x20, "XICS Init");
ibm_int_off = rtas_token("ibm,int-off");
np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation");
- if (!np) {
- printk(KERN_WARNING "Can't find Interrupt Presentation\n");
- udbg_printf("Can't find Interrupt Presentation\n");
- while (1);
- }
+ if (!np)
+ panic("xics_init_IRQ: can't find interrupt presentation");
+
nextnode:
- ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
+ ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", NULL);
if (ireg) {
/*
* set node starting index for this node
}
ireg = (uint *)get_property(np, "reg", &ilen);
- if (!ireg) {
- printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
- udbg_printf("Can't find Interrupt Reg Property\n");
- while (1);
- }
+ if (!ireg)
+ panic("xics_init_IRQ: can't find interrupt reg property");
while (ilen) {
- inodes[indx].addr = (unsigned long long)*ireg++ << 32;
+ intnodes[indx].addr = (unsigned long)*ireg++ << 32;
ilen -= sizeof(uint);
- inodes[indx].addr |= *ireg++;
+ intnodes[indx].addr |= *ireg++;
ilen -= sizeof(uint);
- inodes[indx].size = (unsigned long long)*ireg++ << 32;
+ intnodes[indx].size = (unsigned long)*ireg++ << 32;
ilen -= sizeof(uint);
- inodes[indx].size |= *ireg++;
+ intnodes[indx].size |= *ireg++;
ilen -= sizeof(uint);
indx++;
if (indx >= NR_CPUS) break;
np;
np = of_find_node_by_type(np, "cpu")) {
ireg = (uint *)get_property(np, "reg", &ilen);
- if (ireg && ireg[0] == hard_smp_processor_id()) {
- ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+ if (ireg && ireg[0] == boot_cpuid_phys) {
+ ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s",
+ &ilen);
i = ilen / sizeof(int);
if (ireg && i > 0) {
default_server = ireg[0];
default_distrib_server = ireg[i-1]; /* take last element */
}
+ ireg = (uint *)get_property(np,
+ "ibm,interrupt-server#-size", NULL);
+ if (ireg)
+ interrupt_server_size = *ireg;
break;
}
}
of_node_put(np);
- intr_base = inodes[0].addr;
- intr_size = (ulong)inodes[0].size;
+ intr_base = intnodes[0].addr;
+ intr_size = intnodes[0].size;
np = of_find_node_by_type(NULL, "interrupt-controller");
if (!np) {
- printk(KERN_WARNING "xics: no ISA Interrupt Controller\n");
+ printk(KERN_WARNING "xics: no ISA interrupt controller\n");
xics_irq_8259_cascade_real = -1;
xics_irq_8259_cascade = -1;
} else {
- ireg = (uint *) get_property(np, "interrupts", 0);
- if (!ireg) {
- printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
- udbg_printf("Can't find ISA Interrupts Property\n");
- while (1);
- }
+ ireg = (uint *) get_property(np, "interrupts", NULL);
+ if (!ireg)
+ panic("xics_init_IRQ: can't find ISA interrupts property");
+
xics_irq_8259_cascade_real = *ireg;
xics_irq_8259_cascade
= virt_irq_create_mapping(xics_irq_8259_cascade_real);
if (systemcfg->platform == PLATFORM_PSERIES) {
#ifdef CONFIG_SMP
for_each_cpu(i) {
+ int hard_id;
+
/* FIXME: Do this dynamically! --RR */
- if (!cpu_present_at_boot(i))
+ if (!cpu_present(i))
continue;
- xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
- (ulong)inodes[get_hard_smp_processor_id(i)].size,
- _PAGE_NO_CACHE);
+
+ hard_id = get_hard_smp_processor_id(i);
+ xics_per_cpu[i] = ioremap(intnodes[hard_id].addr,
+ intnodes[hard_id].size);
}
#else
- xics_per_cpu[0] = __ioremap((ulong)intr_base, intr_size,
- _PAGE_NO_CACHE);
+ xics_per_cpu[0] = ioremap(intr_base, intr_size);
#endif /* CONFIG_SMP */
-#ifdef CONFIG_PPC_PSERIES
- /* actually iSeries does not use any of xics...but it has link dependencies
- * for now, except this new one...
- */
} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
ops = &pSeriesLP_ops;
-#endif
}
xics_8259_pic.enable = i8259_pic.enable;
*/
static int __init xics_setup_i8259(void)
{
- if (naca->interrupt_controller == IC_PPC_XIC &&
+ if (ppc64_interrupt_controller == IC_PPC_XIC &&
xics_irq_8259_cascade != -1) {
if (request_irq(irq_offset_up(xics_irq_8259_cascade),
- no_action, 0, "8259 cascade", 0))
- printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
- i8259_init();
+ no_action, 0, "8259 cascade", NULL))
+ printk(KERN_ERR "xics_setup_i8259: couldn't get 8259 "
+ "cascade\n");
+ i8259_init(0);
}
return 0;
}
/* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */
request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, SA_INTERRUPT,
- "IPI", 0);
+ "IPI", NULL);
get_irq_desc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
}
#endif
static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
{
unsigned int irq;
- long status;
- unsigned long xics_status[2];
+ int status;
+ int xics_status[2];
unsigned long newmask;
- cpumask_t allcpus = CPU_MASK_ALL;
cpumask_t tmp = CPU_MASK_NONE;
irq = virt_irq_to_real(irq_offset_down(virq));
- if (irq == XICS_IPI)
+ if (irq == XICS_IPI || irq == NO_IRQ)
return;
- status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%d ibm,get-xive "
- "returns %ld\n", irq, status);
+ "returns %d\n", irq, status);
return;
}
/* For the moment only implement delivery to all cpus or one cpu */
- if (cpus_equal(cpumask, allcpus)) {
+ if (cpus_equal(cpumask, CPU_MASK_ALL)) {
newmask = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask);
irq, newmask, xics_status[1]);
if (status) {
- printk(KERN_ERR "xics_set_affinity irq=%d ibm,set-xive "
- "returns %ld\n", irq, status);
+ printk(KERN_ERR "xics_set_affinity: irq=%d ibm,set-xive "
+ "returns %d\n", irq, status);
return;
}
}
/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
- int set_indicator = rtas_token("set-indicator");
- const unsigned long giqs = 9005UL; /* Global Interrupt Queue Server */
- unsigned long status = 0;
- unsigned int irq, cpu = smp_processor_id();
- unsigned long xics_status[2];
- unsigned long flags;
-
- BUG_ON(set_indicator == RTAS_UNKNOWN_SERVICE);
+ int status;
+ unsigned int irq, virq, cpu = smp_processor_id();
/* Reject any interrupt that was queued to us... */
ops->cppr_info(cpu, 0);
iosync();
- /* Refuse any new interrupts... */
- rtas_call(set_indicator, 3, 1, &status, giqs,
- hard_smp_processor_id(), 0UL);
+ /* remove ourselves from the global interrupt queue */
+ status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE,
+ (1UL << interrupt_server_size) - 1 - default_distrib_server, 0);
WARN_ON(status != 0);
/* Allow IPIs again... */
ops->cppr_info(cpu, DEFAULT_PRIORITY);
iosync();
- printk(KERN_WARNING "HOTPLUG: Migrating IRQs away\n");
- for_each_irq(irq) {
- irq_desc_t *desc = get_irq_desc(irq);
+ for_each_irq(virq) {
+ irq_desc_t *desc;
+ int xics_status[2];
+ unsigned long flags;
+
+ /* We cant set affinity on ISA interrupts */
+ if (virq < irq_offset_value())
+ continue;
+
+ desc = get_irq_desc(virq);
+ irq = virt_irq_to_real(irq_offset_down(virq));
/* We need to get IPIs still. */
- if (irq_offset_down(irq) == XICS_IPI)
+ if (irq == XICS_IPI || irq == NO_IRQ)
continue;
/* We only need to migrate enabled IRQS */
spin_lock_irqsave(&desc->lock, flags);
- status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status,
- irq);
+ status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
if (status) {
printk(KERN_ERR "migrate_irqs_away: irq=%d "
- "ibm,get-xive returns %ld\n",
- irq, status);
+ "ibm,get-xive returns %d\n",
+ virq, status);
goto unlock;
}
goto unlock;
printk(KERN_WARNING "IRQ %d affinity broken off cpu %u\n",
- irq, cpu);
+ virq, cpu);
/* Reset affinity to all cpus */
xics_status[0] = default_distrib_server;
- status = rtas_call(ibm_set_xive, 3, 1, NULL,
- irq, xics_status[0], xics_status[1]);
+ status = rtas_call(ibm_set_xive, 3, 1, NULL, irq,
+ xics_status[0], xics_status[1]);
if (status)
- printk(KERN_ERR "migrate_irqs_away irq=%d "
- "ibm,set-xive returns %ld\n",
- irq, status);
+ printk(KERN_ERR "migrate_irqs_away: irq=%d "
+ "ibm,set-xive returns %d\n",
+ virq, status);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
-
}
#endif