linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / powerpc / platforms / pseries / setup.c
index 31867a7..9edeca8 100644 (file)
@@ -18,6 +18,7 @@
 
 #undef DEBUG
 
+#include <linux/config.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -59,6 +60,7 @@
 #include <asm/time.h>
 #include <asm/nvram.h>
 #include "xics.h"
+#include <asm/firmware.h>
 #include <asm/pmc.h>
 #include <asm/mpic.h>
 #include <asm/ppc-pci.h>
@@ -68,7 +70,6 @@
 
 #include "plpar_wrappers.h"
 #include "ras.h"
-#include "firmware.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
 #define DBG(fmt...)
 #endif
 
-/* move those away to a .h */
-extern void smp_init_pseries_mpic(void);
-extern void smp_init_pseries_xics(void);
 extern void find_udbg_vterm(void);
 
 int fwnmi_active;  /* TRUE if an FWNMI handler is present */
 
-static void pseries_shared_idle_sleep(void);
-static void pseries_dedicated_idle_sleep(void);
+static void pseries_shared_idle(void);
+static void pseries_dedicated_idle(void);
 
-static struct device_node *pSeries_mpic_node;
+struct mpic *pSeries_mpic;
 
 static void pSeries_show_cpuinfo(struct seq_file *m)
 {
@@ -121,98 +119,71 @@ static void __init fwnmi_init(void)
                fwnmi_active = 1;
 }
 
-void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc,
-                         struct pt_regs *regs)
+static void __init pSeries_init_mpic(void)
 {
-       unsigned int cascade_irq = i8259_irq(regs);
-       if (cascade_irq != NO_IRQ)
-               generic_handle_irq(cascade_irq, regs);
-       desc->chip->eoi(irq);
+        unsigned int *addrp;
+       struct device_node *np;
+       unsigned long intack = 0;
+
+       /* All ISUs are setup, complete initialization */
+       mpic_init(pSeries_mpic);
+
+       /* Check what kind of cascade ACK we have */
+        if (!(np = of_find_node_by_name(NULL, "pci"))
+            || !(addrp = (unsigned int *)
+                 get_property(np, "8259-interrupt-acknowledge", NULL)))
+                printk(KERN_ERR "Cannot find pci to get ack address\n");
+        else
+               intack = addrp[prom_n_addr_cells(np)-1];
+       of_node_put(np);
+
+       /* Setup the legacy interrupts & controller */
+       i8259_init(intack, 0);
+
+       /* Hook cascade to mpic */
+       mpic_setup_cascade(NUM_ISA_INTERRUPTS, i8259_irq_cascade, NULL);
 }
 
-static void __init pseries_mpic_init_IRQ(void)
+static void __init pSeries_setup_mpic(void)
 {
-       struct device_node *np, *old, *cascade = NULL;
-        unsigned int *addrp;
-       unsigned long intack = 0;
        unsigned int *opprop;
        unsigned long openpic_addr = 0;
-       unsigned int cascade_irq;
-       int naddr, n, i, opplen;
-       struct mpic *mpic;
+        unsigned char senses[NR_IRQS - NUM_ISA_INTERRUPTS];
+        struct device_node *root;
+       int irq_count;
 
-       np = of_find_node_by_path("/");
-       naddr = prom_n_addr_cells(np);
-       opprop = (unsigned int *) get_property(np, "platform-open-pic", &opplen);
+       /* Find the Open PIC if present */
+       root = of_find_node_by_path("/");
+       opprop = (unsigned int *) get_property(root, "platform-open-pic", NULL);
        if (opprop != 0) {
-               openpic_addr = of_read_number(opprop, naddr);
+               int n = prom_n_addr_cells(root);
+
+               for (openpic_addr = 0; n > 0; --n)
+                       openpic_addr = (openpic_addr << 32) + *opprop++;
                printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
        }
-       of_node_put(np);
+       of_node_put(root);
 
        BUG_ON(openpic_addr == 0);
 
+       /* Get the sense values from OF */
+       prom_get_irq_senses(senses, NUM_ISA_INTERRUPTS, NR_IRQS);
+       
        /* Setup the openpic driver */
-       mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
-                         MPIC_PRIMARY,
-                         16, 250, /* isu size, irq count */
-                         " MPIC     ");
-       BUG_ON(mpic == NULL);
-
-       /* Add ISUs */
-       opplen /= sizeof(u32);
-       for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
-               unsigned long isuaddr = of_read_number(opprop + i, naddr);
-               mpic_assign_isu(mpic, n, isuaddr);
-       }
-
-       /* All ISUs are setup, complete initialization */
-       mpic_init(mpic);
-
-       /* Look for cascade */
-       for_each_node_by_type(np, "interrupt-controller")
-               if (device_is_compatible(np, "chrp,iic")) {
-                       cascade = np;
-                       break;
-               }
-       if (cascade == NULL)
-               return;
-
-       cascade_irq = irq_of_parse_and_map(cascade, 0);
-       if (cascade == NO_IRQ) {
-               printk(KERN_ERR "xics: failed to map cascade interrupt");
-               return;
-       }
-
-       /* Check ACK type */
-       for (old = of_node_get(cascade); old != NULL ; old = np) {
-               np = of_get_parent(old);
-               of_node_put(old);
-               if (np == NULL)
-                       break;
-               if (strcmp(np->name, "pci") != 0)
-                       continue;
-               addrp = (u32 *)get_property(np, "8259-interrupt-acknowledge",
-                                           NULL);
-               if (addrp == NULL)
-                       continue;
-               naddr = prom_n_addr_cells(np);
-               intack = addrp[naddr-1];
-               if (naddr > 1)
-                       intack |= ((unsigned long)addrp[naddr-2]) << 32;
-       }
-       if (intack)
-               printk(KERN_DEBUG "mpic: PCI 8259 intack at 0x%016lx\n",
-                      intack);
-       i8259_init(cascade, intack);
-       of_node_put(cascade);
-       set_irq_chained_handler(cascade_irq, pseries_8259_cascade);
+       irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
+       pSeries_mpic = mpic_alloc(openpic_addr, MPIC_PRIMARY,
+                                 16, 16, irq_count, /* isu size, irq offset, irq count */ 
+                                 NR_IRQS - 4, /* ipi offset */
+                                 senses, irq_count, /* sense & sense size */
+                                 " MPIC     ");
 }
 
 static void pseries_lpar_enable_pmcs(void)
 {
        unsigned long set, reset;
 
+       power4_enable_pmcs();
+
        set = 1UL << 63;
        reset = 0;
        plpar_hcall_norets(H_PERFMON, set, reset);
@@ -222,67 +193,23 @@ static void pseries_lpar_enable_pmcs(void)
                get_lppaca()->pmcregs_in_use = 1;
 }
 
-#ifdef CONFIG_KEXEC
-static void pseries_kexec_cpu_down_mpic(int crash_shutdown, int secondary)
-{
-       mpic_teardown_this_cpu(secondary);
-}
-
-static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
+static void __init pSeries_setup_arch(void)
 {
-       /* Don't risk a hypervisor call if we're crashing */
-       if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
-               unsigned long vpa = __pa(get_lppaca());
-
-               if (unregister_vpa(hard_smp_processor_id(), vpa)) {
-                       printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
-                                       "failed\n", smp_processor_id(),
-                                       hard_smp_processor_id());
-               }
+       /* Fixup ppc_md depending on the type of interrupt controller */
+       if (ppc64_interrupt_controller == IC_OPEN_PIC) {
+               ppc_md.init_IRQ       = pSeries_init_mpic;
+               ppc_md.get_irq        = mpic_get_irq;
+               /* Allocate the mpic now, so that find_and_init_phbs() can
+                * fill the ISUs */
+               pSeries_setup_mpic();
+       } else {
+               ppc_md.init_IRQ       = xics_init_IRQ;
+               ppc_md.get_irq        = xics_get_irq;
        }
-       xics_teardown_cpu(secondary);
-}
-#endif /* CONFIG_KEXEC */
-
-static void __init pseries_discover_pic(void)
-{
-       struct device_node *np;
-       char *typep;
 
-       for (np = NULL; (np = of_find_node_by_name(np,
-                                                  "interrupt-controller"));) {
-               typep = (char *)get_property(np, "compatible", NULL);
-               if (strstr(typep, "open-pic")) {
-                       pSeries_mpic_node = of_node_get(np);
-                       ppc_md.init_IRQ       = pseries_mpic_init_IRQ;
-                       ppc_md.get_irq        = mpic_get_irq;
-#ifdef CONFIG_KEXEC
-                       ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_mpic;
-#endif
 #ifdef CONFIG_SMP
-                       smp_init_pseries_mpic();
+       smp_init_pSeries();
 #endif
-                       return;
-               } else if (strstr(typep, "ppc-xicp")) {
-                       ppc_md.init_IRQ       = xics_init_IRQ;
-#ifdef CONFIG_KEXEC
-                       ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
-#endif
-#ifdef CONFIG_SMP
-                       smp_init_pseries_xics();
-#endif
-                       return;
-               }
-       }
-       printk(KERN_ERR "pSeries_discover_pic: failed to recognize"
-              " interrupt-controller\n");
-}
-
-static void __init pSeries_setup_arch(void)
-{
-       /* Discover PIC type and setup ppc_md accordingly */
-       pseries_discover_pic();
-
        /* openpic global configuration register (64-bit format). */
        /* openpic Interrupt Source Unit pointer (64-bit format). */
        /* python0 facility area (mmio) (64-bit format) REAL address. */
@@ -308,17 +235,18 @@ static void __init pSeries_setup_arch(void)
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
                vpa_init(boot_cpuid);
                if (get_lppaca()->shared_proc) {
-                       printk(KERN_DEBUG "Using shared processor idle loop\n");
-                       ppc_md.power_save = pseries_shared_idle_sleep;
+                       printk(KERN_INFO "Using shared processor idle loop\n");
+                       ppc_md.idle_loop = pseries_shared_idle;
                } else {
-                       printk(KERN_DEBUG "Using dedicated idle loop\n");
-                       ppc_md.power_save = pseries_dedicated_idle_sleep;
+                       printk(KERN_INFO "Using dedicated idle loop\n");
+                       ppc_md.idle_loop = pseries_dedicated_idle;
                }
        } else {
-               printk(KERN_DEBUG "Using default idle loop\n");
+               printk(KERN_INFO "Using default idle loop\n");
+               ppc_md.idle_loop = default_idle;
        }
 
-       if (firmware_has_feature(FW_FEATURE_LPAR))
+       if (platform_is_lpar())
                ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
        else
                ppc_md.enable_pmcs = power4_enable_pmcs;
@@ -328,17 +256,94 @@ static int __init pSeries_init_panel(void)
 {
        /* Manually leave the kernel version on the panel. */
        ppc_md.progress("Linux ppc64\n", 0);
-       ppc_md.progress(system_utsname.release, 0);
+       ppc_md.progress(system_utsname.version, 0);
 
        return 0;
 }
 arch_initcall(pSeries_init_panel);
 
+
+/* Build up the ppc64_firmware_features bitmask field
+ * using contents of device-tree/ibm,hypertas-functions.
+ * Ultimately this functionality may be moved into prom.c prom_init().
+ */
+static void __init fw_feature_init(void)
+{
+       struct device_node * dn;
+       char * hypertas;
+       unsigned int len;
+
+       DBG(" -> fw_feature_init()\n");
+
+       ppc64_firmware_features = 0;
+       dn = of_find_node_by_path("/rtas");
+       if (dn == NULL) {
+               printk(KERN_ERR "WARNING ! Cannot find RTAS in device-tree !\n");
+               goto no_rtas;
+       }
+
+       hypertas = get_property(dn, "ibm,hypertas-functions", &len);
+       if (hypertas) {
+               while (len > 0){
+                       int i, hypertas_len;
+                       /* check value against table of strings */
+                       for(i=0; i < FIRMWARE_MAX_FEATURES ;i++) {
+                               if ((firmware_features_table[i].name) &&
+                                   (strcmp(firmware_features_table[i].name,hypertas))==0) {
+                                       /* we have a match */
+                                       ppc64_firmware_features |= 
+                                               (firmware_features_table[i].val);
+                                       break;
+                               } 
+                       }
+                       hypertas_len = strlen(hypertas);
+                       len -= hypertas_len +1;
+                       hypertas+= hypertas_len +1;
+               }
+       }
+
+       of_node_put(dn);
+no_rtas:
+
+       DBG(" <- fw_feature_init()\n");
+}
+
+
+static  void __init pSeries_discover_pic(void)
+{
+       struct device_node *np;
+       char *typep;
+
+       /*
+        * Setup interrupt mapping options that are needed for finish_device_tree
+        * to properly parse the OF interrupt tree & do the virtual irq mapping
+        */
+       __irq_offset_value = NUM_ISA_INTERRUPTS;
+       ppc64_interrupt_controller = IC_INVALID;
+       for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) {
+               typep = (char *)get_property(np, "compatible", NULL);
+               if (strstr(typep, "open-pic")) {
+                       ppc64_interrupt_controller = IC_OPEN_PIC;
+                       break;
+               } else if (strstr(typep, "ppc-xicp")) {
+                       ppc64_interrupt_controller = IC_PPC_XIC;
+                       break;
+               }
+       }
+       if (ppc64_interrupt_controller == IC_INVALID)
+               printk("pSeries_discover_pic: failed to recognize"
+                       " interrupt-controller\n");
+
+}
+
 static void pSeries_mach_cpu_die(void)
 {
        local_irq_disable();
        idle_task_exit();
-       xics_teardown_cpu(0);
+       /* Some hardware requires clearing the CPPR, while other hardware does not
+        * it is safe either way
+        */
+       pSeriesLP_cppr_info(0, 0);
        rtas_stop_self();
        /* Should never get here... */
        BUG();
@@ -362,11 +367,21 @@ static int pseries_set_xdabr(unsigned long dabr)
  */
 static void __init pSeries_init_early(void)
 {
+       int iommu_off = 0;
+
        DBG(" -> pSeries_init_early()\n");
 
        fw_feature_init();
+       
+       if (platform_is_lpar())
+               hpte_init_lpar();
+       else {
+               hpte_init_native();
+               iommu_off = (of_chosen &&
+                            get_property(of_chosen, "linux,iommu-off", NULL));
+       }
 
-       if (firmware_has_feature(FW_FEATURE_LPAR))
+       if (platform_is_lpar())
                find_udbg_vterm();
 
        if (firmware_has_feature(FW_FEATURE_DABR))
@@ -376,6 +391,8 @@ static void __init pSeries_init_early(void)
 
        iommu_init_early_pSeries();
 
+       pSeries_discover_pic();
+
        DBG(" <- pSeries_init_early()\n");
 }
 
@@ -408,149 +425,184 @@ static int pSeries_check_legacy_ioport(unsigned int baseport)
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
  */
+extern struct machdep_calls pSeries_md;
 
-static int __init pSeries_probe_hypertas(unsigned long node,
-                                        const char *uname, int depth,
-                                        void *data)
-{
-       if (depth != 1 ||
-           (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
-               return 0;
-
-       if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
-               powerpc_firmware_features |= FW_FEATURE_LPAR;
-
-       if (firmware_has_feature(FW_FEATURE_LPAR))
-               hpte_init_lpar();
-       else
-               hpte_init_native();
-
-       return 1;
-}
-
-static int __init pSeries_probe(void)
+static int __init pSeries_probe(int platform)
 {
-       unsigned long root = of_get_flat_dt_root();
-       char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
-                                         "device_type", NULL);
-       if (dtype == NULL)
-               return 0;
-       if (strcmp(dtype, "chrp"))
+       if (platform != PLATFORM_PSERIES &&
+           platform != PLATFORM_PSERIES_LPAR)
                return 0;
 
-       /* Cell blades firmware claims to be chrp while it's not. Until this
-        * is fixed, we need to avoid those here.
+       /* if we have some ppc_md fixups for LPAR to do, do
+        * it here ...
         */
-       if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
-           of_flat_dt_is_compatible(root, "IBM,CBEA"))
-               return 0;
-
-       DBG("pSeries detected, looking for LPAR capability...\n");
-
-       /* Now try to figure out if we are running on LPAR */
-       of_scan_flat_dt(pSeries_probe_hypertas, NULL);
-
-       DBG("Machine is%s LPAR !\n",
-           (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
 
        return 1;
 }
 
-
 DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
 
-static void pseries_dedicated_idle_sleep(void)
-{ 
-       unsigned int cpu = smp_processor_id();
-       unsigned long start_snooze;
-       unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
+static inline void dedicated_idle_sleep(unsigned int cpu)
+{
+       struct lppaca *plppaca = &lppaca[cpu ^ 1];
 
-       /*
-        * Indicate to the HV that we are idle. Now would be
-        * a good time to find other work to dispatch.
-        */
-       get_lppaca()->idle = 1;
+       /* Only sleep if the other thread is not idle */
+       if (!(plppaca->idle)) {
+               local_irq_disable();
 
-       /*
-        * We come in with interrupts disabled, and need_resched()
-        * has been checked recently.  If we should poll for a little
-        * while, do so.
-        */
-       if (*smt_snooze_delay) {
-               start_snooze = get_tb() +
-                       *smt_snooze_delay * tb_ticks_per_usec;
-               local_irq_enable();
+               /*
+                * We are about to sleep the thread and so wont be polling any
+                * more.
+                */
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+               smp_mb__after_clear_bit();
+
+               /*
+                * SMT dynamic mode. Cede will result in this thread going
+                * dormant, if the partner thread is still doing work.  Thread
+                * wakes up if partner goes idle, an interrupt is presented, or
+                * a prod occurs.  Returning from the cede enables external
+                * interrupts.
+                */
+               if (!need_resched())
+                       cede_processor();
+               else
+                       local_irq_enable();
                set_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               /*
+                * Give the HV an opportunity at the processor, since we are
+                * not doing any work.
+                */
+               poll_pending();
+       }
+}
 
-               while (get_tb() < start_snooze) {
-                       if (need_resched() || cpu_is_offline(cpu))
-                               goto out;
-                       ppc64_runlatch_off();
-                       HMT_low();
-                       HMT_very_low();
+static void pseries_dedicated_idle(void)
+{ 
+       unsigned int cpu = smp_processor_id();
+       unsigned long start_snooze;
+       unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
+       set_thread_flag(TIF_POLLING_NRFLAG);
+
+       while (1) {
+               /*
+                * Indicate to the HV that we are idle. Now would be
+                * a good time to find other work to dispatch.
+                */
+               get_lppaca()->idle = 1;
+
+               if (!need_resched()) {
+                       start_snooze = get_tb() +
+                               *smt_snooze_delay * tb_ticks_per_usec;
+
+                       while (!need_resched() && !cpu_is_offline(cpu)) {
+                               ppc64_runlatch_off();
+
+                               /*
+                                * Go into low thread priority and possibly
+                                * low power mode.
+                                */
+                               HMT_low();
+                               HMT_very_low();
+
+                               if (*smt_snooze_delay != 0 &&
+                                   get_tb() > start_snooze) {
+                                       HMT_medium();
+                                       dedicated_idle_sleep(cpu);
+                               }
+
+                       }
+
+                       HMT_medium();
                }
 
-               HMT_medium();
-               clear_thread_flag(TIF_POLLING_NRFLAG);
-               smp_mb();
-               local_irq_disable();
-               if (need_resched() || cpu_is_offline(cpu))
-                       goto out;
-       }
+               get_lppaca()->idle = 0;
+               ppc64_runlatch_on();
 
-       /*
-        * If not SMT, cede processor.  If CPU is running SMT
-        * cede if the other thread is not idle, so that it can
-        * go single-threaded.  If the other thread is idle,
-        * we ask the hypervisor if it has pending work it
-        * wants to do and cede if it does.  Otherwise we keep
-        * polling in order to reduce interrupt latency.
-        *
-        * Doing the cede when the other thread is active will
-        * result in this thread going dormant, meaning the other
-        * thread gets to run in single-threaded (ST) mode, which
-        * is slightly faster than SMT mode with this thread at
-        * very low priority.  The cede enables interrupts, which
-        * doesn't matter here.
-        */
-       if (!cpu_has_feature(CPU_FTR_SMT) || !lppaca[cpu ^ 1].idle
-           || poll_pending() == H_PENDING)
-               cede_processor();
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
 
-out:
-       HMT_medium();
-       get_lppaca()->idle = 0;
+               if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+                       cpu_die();
+       }
 }
 
-static void pseries_shared_idle_sleep(void)
+static void pseries_shared_idle(void)
 {
-       /*
-        * Indicate to the HV that we are idle. Now would be
-        * a good time to find other work to dispatch.
-        */
-       get_lppaca()->idle = 1;
+       unsigned int cpu = smp_processor_id();
 
-       /*
-        * Yield the processor to the hypervisor.  We return if
-        * an external interrupt occurs (which are driven prior
-        * to returning here) or if a prod occurs from another
-        * processor. When returning here, external interrupts
-        * are enabled.
-        */
-       cede_processor();
+       while (1) {
+               /*
+                * Indicate to the HV that we are idle. Now would be
+                * a good time to find other work to dispatch.
+                */
+               get_lppaca()->idle = 1;
+
+               while (!need_resched() && !cpu_is_offline(cpu)) {
+                       local_irq_disable();
+                       ppc64_runlatch_off();
+
+                       /*
+                        * Yield the processor to the hypervisor.  We return if
+                        * an external interrupt occurs (which are driven prior
+                        * to returning here) or if a prod occurs from another
+                        * processor. When returning here, external interrupts
+                        * are enabled.
+                        *
+                        * Check need_resched() again with interrupts disabled
+                        * to avoid a race.
+                        */
+                       if (!need_resched())
+                               cede_processor();
+                       else
+                               local_irq_enable();
+
+                       HMT_medium();
+               }
+
+               get_lppaca()->idle = 0;
+               ppc64_runlatch_on();
+
+               preempt_enable_no_resched();
+               schedule();
+               preempt_disable();
 
-       get_lppaca()->idle = 0;
+               if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+                       cpu_die();
+       }
 }
 
 static int pSeries_pci_probe_mode(struct pci_bus *bus)
 {
-       if (firmware_has_feature(FW_FEATURE_LPAR))
+       if (platform_is_lpar())
                return PCI_PROBE_DEVTREE;
        return PCI_PROBE_NORMAL;
 }
 
-define_machine(pseries) {
-       .name                   = "pSeries",
+#ifdef CONFIG_KEXEC
+static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+       /* Don't risk a hypervisor call if we're crashing */
+       if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
+               unsigned long vpa = __pa(get_lppaca());
+
+               if (unregister_vpa(hard_smp_processor_id(), vpa)) {
+                       printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
+                                       "failed\n", smp_processor_id(),
+                                       hard_smp_processor_id());
+               }
+       }
+
+       if (ppc64_interrupt_controller == IC_OPEN_PIC)
+               mpic_teardown_this_cpu(secondary);
+       else
+               xics_teardown_cpu(secondary);
+}
+#endif
+
+struct machdep_calls __initdata pSeries_md = {
        .probe                  = pSeries_probe,
        .setup_arch             = pSeries_setup_arch,
        .init_early             = pSeries_init_early,
@@ -573,6 +625,7 @@ define_machine(pseries) {
        .system_reset_exception = pSeries_system_reset_exception,
        .machine_check_exception = pSeries_machine_check_exception,
 #ifdef CONFIG_KEXEC
+       .kexec_cpu_down         = pseries_kexec_cpu_down,
        .machine_kexec          = default_machine_kexec,
        .machine_kexec_prepare  = default_machine_kexec_prepare,
        .machine_crash_shutdown = default_machine_crash_shutdown,