This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / ia64 / kernel / mca.c
index 9c5104c..3b9e545 100644 (file)
@@ -128,9 +128,9 @@ static int cmc_polling_enabled = 1;
  */
 static int cpe_poll_enabled = 1;
 
-extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
+static int cpe_vector = -1;
 
-static int mca_init;
+extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
 
 /*
  * IA64_MCA log support
@@ -274,8 +274,6 @@ ia64_mca_log_sal_error_record(int sal_info_type)
 
 #ifdef CONFIG_ACPI
 
-static int cpe_vector = -1;
-
 static irqreturn_t
 ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
 {
@@ -544,7 +542,7 @@ ia64_mca_register_cpev (int cpev)
        }
 
        IA64_MCA_DEBUG("%s: corrected platform error "
-                      "vector %#x registered\n", __FUNCTION__, cpev);
+                      "vector %#x setup and enabled\n", __FUNCTION__, cpev);
 }
 #endif /* CONFIG_ACPI */
 
@@ -553,9 +551,8 @@ ia64_mca_register_cpev (int cpev)
 /*
  * ia64_mca_cmc_vector_setup
  *
- *  Setup the corrected machine check vector register in the processor.
- *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
- *  This function is invoked on a per-processor basis.
+ *  Setup the corrected machine check vector register in the processor and
+ *  unmask interrupt.  This function is invoked on a per-processor basis.
  *
  * Inputs
  *      None
@@ -569,12 +566,12 @@ ia64_mca_cmc_vector_setup (void)
        cmcv_reg_t      cmcv;
 
        cmcv.cmcv_regval        = 0;
-       cmcv.cmcv_mask          = 1;        /* Mask/disable interrupt at first */
+       cmcv.cmcv_mask          = 0;        /* Unmask/enable interrupt */
        cmcv.cmcv_vector        = IA64_CMC_VECTOR;
        ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 
        IA64_MCA_DEBUG("%s: CPU %d corrected "
-                      "machine check vector %#x registered.\n",
+                      "machine check vector %#x setup and enabled.\n",
                       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
 
        IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
@@ -1296,7 +1293,7 @@ ia64_mca_init(void)
         */
        register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
        register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
-       ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
+       ia64_mca_cmc_vector_setup();       /* Setup vector on BSP & enable */
 
        /* Setup the MCA rendezvous interrupt vector */
        register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
@@ -1306,8 +1303,23 @@ ia64_mca_init(void)
 
 #ifdef CONFIG_ACPI
        /* Setup the CPEI/P vector and handler */
-       cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
-       register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+       {
+               irq_desc_t *desc;
+               unsigned int irq;
+
+               cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
+
+               if (cpe_vector >= 0) {
+                       for (irq = 0; irq < NR_IRQS; ++irq)
+                               if (irq_to_vector(irq) == cpe_vector) {
+                                       desc = irq_descp(irq);
+                                       desc->status |= IRQ_PER_CPU;
+                                       setup_irq(irq, &mca_cpe_irqaction);
+                               }
+                       ia64_mca_register_cpev(cpe_vector);
+               }
+               register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
+       }
 #endif
 
        /* Initialize the areas set aside by the OS to buffer the
@@ -1319,7 +1331,6 @@ ia64_mca_init(void)
        ia64_log_init(SAL_INFO_TYPE_CMC);
        ia64_log_init(SAL_INFO_TYPE_CPE);
 
-       mca_init = 1;
        printk(KERN_INFO "MCA related initialization done\n");
 }
 
@@ -1336,46 +1347,21 @@ ia64_mca_init(void)
 static int __init
 ia64_mca_late_init(void)
 {
-       if (!mca_init)
-               return 0;
-
-       /* Setup the CMCI/P vector and handler */
        init_timer(&cmc_poll_timer);
        cmc_poll_timer.function = ia64_mca_cmc_poll;
 
-       /* Unmask/enable the vector */
+       /* Reset to the correct state */
        cmc_polling_enabled = 0;
-       schedule_work(&cmc_enable_work);
-
-       IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__);
 
-#ifdef CONFIG_ACPI
-       /* Setup the CPEI/P vector and handler */
        init_timer(&cpe_poll_timer);
        cpe_poll_timer.function = ia64_mca_cpe_poll;
 
-       {
-               irq_desc_t *desc;
-               unsigned int irq;
-
-               if (cpe_vector >= 0) {
-                       /* If platform supports CPEI, enable the irq. */
-                       cpe_poll_enabled = 0;
-                       for (irq = 0; irq < NR_IRQS; ++irq)
-                               if (irq_to_vector(irq) == cpe_vector) {
-                                       desc = irq_descp(irq);
-                                       desc->status |= IRQ_PER_CPU;
-                                       setup_irq(irq, &mca_cpe_irqaction);
-                               }
-                       ia64_mca_register_cpev(cpe_vector);
-                       IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
-               } else {
-                       /* If platform doesn't support CPEI, get the timer going. */
-                       if (cpe_poll_enabled) {
-                               ia64_mca_cpe_poll(0UL);
-                               IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__);
-                       }
-               }
+#ifdef CONFIG_ACPI
+       /* If platform doesn't support CPEI, get the timer going. */
+       if (cpe_vector < 0 && cpe_poll_enabled) {
+               ia64_mca_cpe_poll(0UL);
+       } else {
+               cpe_poll_enabled = 0;
        }
 #endif