static int tolerant = 1;
static int banks;
static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
+static unsigned long console_logged;
+static int notify_user;
/*
* Lockless MCE logging infrastructure.
MCE_LOG_LEN,
};
-static void mce_log(struct mce *mce)
+void mce_log(struct mce *mce)
{
unsigned next, entry;
mce->finished = 0;
smp_wmb();
mcelog.entry[entry].finished = 1;
smp_wmb();
+
+ if (!test_and_set_bit(0, &console_logged))
+ notify_user = 1;
}
static void print_mce(struct mce *m)
panicm = m;
panicm_found = 1;
}
+
+ tainted |= TAINT_MACHINE_CHECK;
}
/* Never do anything final in the polling timer */
{
on_each_cpu(mcheck_check_cpu, NULL, 1, 1);
schedule_delayed_work(&mcheck_work, check_interval * HZ);
+
+ /*
+ * It's ok to read stale data here for notify_user and
+ * console_logged as we'll simply get the updated versions
+ * on the next mcheck_timer execution and atomic operations
+ * on console_logged act as synchronization for notify_user
+ * writes.
+ */
+ if (notify_user && console_logged) {
+ notify_user = 0;
+ clear_bit(0, &console_logged);
+ printk(KERN_INFO "Machine check events logged\n");
+ }
}
}
}
+static void __init mce_cpu_features(struct cpuinfo_x86 *c)
+{
+ switch (c->x86_vendor) {
+ case X86_VENDOR_INTEL:
+ mce_intel_feature_init(c);
+ break;
+ default:
+ break;
+ }
+}
+
/*
* Called for each booted CPU to set up machine checks.
* Must be called with preempt off.
*/
void __init mcheck_init(struct cpuinfo_x86 *c)
{
- static unsigned long mce_cpus __initdata = 0;
+ static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
mce_cpu_quirks(c);
if (mce_dont_init ||
- test_and_set_bit(smp_processor_id(), &mce_cpus) ||
+ cpu_test_and_set(smp_processor_id(), mce_cpus) ||
!mce_available(c))
return;
mce_init(NULL);
+ mce_cpu_features(c);
}
/*
memset(mcelog.entry, 0, next * sizeof(struct mce));
mcelog.next = 0;
- smp_wmb();
-
+
synchronize_kernel();
/* Collect entries that were still getting written before the synchronize. */