/*
- * (c) 2005, 2006 Advanced Micro Devices, Inc.
+ * (c) 2005 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
* Support : jacob.shin@amd.com
*
- * April 2006
- * - added support for AMD Family 0x10 processors
+ * MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
+ * MC4_MISC0 exists per physical processor.
*
- * All MC4_MISCi registers are shared between multi-cores
*/
#include <linux/cpu.h>
#include <asm/percpu.h>
#include <asm/idle.h>
-#define PFX "mce_threshold: "
-#define VERSION "version 1.1.1"
-#define NR_BANKS 6
-#define NR_BLOCKS 9
-#define THRESHOLD_MAX 0xFFF
-#define INT_TYPE_APIC 0x00020000
-#define MASK_VALID_HI 0x80000000
-#define MASK_LVTOFF_HI 0x00F00000
-#define MASK_COUNT_EN_HI 0x00080000
-#define MASK_INT_TYPE_HI 0x00060000
-#define MASK_OVERFLOW_HI 0x00010000
+#define PFX "mce_threshold: "
+#define VERSION "version 1.00.9"
+#define NR_BANKS 5
+#define THRESHOLD_MAX 0xFFF
+#define INT_TYPE_APIC 0x00020000
+#define MASK_VALID_HI 0x80000000
+#define MASK_LVTOFF_HI 0x00F00000
+#define MASK_COUNT_EN_HI 0x00080000
+#define MASK_INT_TYPE_HI 0x00060000
+#define MASK_OVERFLOW_HI 0x00010000
#define MASK_ERR_COUNT_HI 0x00000FFF
-#define MASK_BLKPTR_LO 0xFF000000
-#define MCG_XBLK_ADDR 0xC0000400
+#define MASK_OVERFLOW 0x0001000000000000L
-struct threshold_block {
- unsigned int block;
- unsigned int bank;
+struct threshold_bank {
unsigned int cpu;
- u32 address;
- u16 interrupt_enable;
+ u8 bank;
+ u8 interrupt_enable;
u16 threshold_limit;
struct kobject kobj;
- struct list_head miscj;
};
-/* defaults used early on boot */
-static struct threshold_block threshold_defaults = {
+static struct threshold_bank threshold_defaults = {
.interrupt_enable = 0,
.threshold_limit = THRESHOLD_MAX,
};
-struct threshold_bank {
- struct kobject kobj;
- struct threshold_block *blocks;
- cpumask_t cpus;
-};
-static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
-
#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
*/
/* must be called with correct cpu affinity */
-static void threshold_restart_bank(struct threshold_block *b,
+static void threshold_restart_bank(struct threshold_bank *b,
int reset, u16 old_limit)
{
u32 mci_misc_hi, mci_misc_lo;
- rdmsr(b->address, mci_misc_lo, mci_misc_hi);
+ rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
reset = 1; /* limit cannot be lower than err count */
(mci_misc_hi &= ~MASK_INT_TYPE_HI);
mci_misc_hi |= MASK_COUNT_EN_HI;
- wrmsr(b->address, mci_misc_lo, mci_misc_hi);
+ wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
}
-/* cpu init entry point, called from mce.c with preempt off */
void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- unsigned int bank, block;
+ int bank;
+ u32 mci_misc_lo, mci_misc_hi;
unsigned int cpu = smp_processor_id();
- u32 low = 0, high = 0, address = 0;
for (bank = 0; bank < NR_BANKS; ++bank) {
- for (block = 0; block < NR_BLOCKS; ++block) {
- if (block == 0)
- address = MSR_IA32_MC0_MISC + bank * 4;
- else if (block == 1)
- address = MCG_XBLK_ADDR
- + ((low & MASK_BLKPTR_LO) >> 21);
- else
- ++address;
-
- if (rdmsr_safe(address, &low, &high))
- continue;
+ rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
- if (!(high & MASK_VALID_HI)) {
- if (block)
- continue;
- else
- break;
- }
+ /* !valid, !counter present, bios locked */
+ if (!(mci_misc_hi & MASK_VALID_HI) ||
+ !(mci_misc_hi & MASK_VALID_HI >> 1) ||
+ (mci_misc_hi & MASK_VALID_HI >> 2))
+ continue;
- if (!(high & MASK_VALID_HI >> 1) ||
- (high & MASK_VALID_HI >> 2))
- continue;
+ per_cpu(bank_map, cpu) |= (1 << bank);
- if (!block)
- per_cpu(bank_map, cpu) |= (1 << bank);
#ifdef CONFIG_SMP
- if (shared_bank[bank] && c->cpu_core_id)
- break;
+ if (shared_bank[bank] && cpu_core_id[cpu])
+ continue;
#endif
- high &= ~MASK_LVTOFF_HI;
- high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20;
- wrmsr(address, low, high);
- setup_APIC_extened_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD,
- THRESHOLD_APIC_VECTOR,
- K8_APIC_EXT_INT_MSG_FIX, 0);
-
- threshold_defaults.address = address;
- threshold_restart_bank(&threshold_defaults, 0, 0);
- }
+ setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
+ threshold_defaults.cpu = cpu;
+ threshold_defaults.bank = bank;
+ threshold_restart_bank(&threshold_defaults, 0, 0);
}
}
*/
asmlinkage void mce_threshold_interrupt(void)
{
- unsigned int bank, block;
+ int bank;
struct mce m;
- u32 low = 0, high = 0, address = 0;
ack_APIC_irq();
exit_idle();
/* assume first bank caused it */
for (bank = 0; bank < NR_BANKS; ++bank) {
- for (block = 0; block < NR_BLOCKS; ++block) {
- if (block == 0)
- address = MSR_IA32_MC0_MISC + bank * 4;
- else if (block == 1)
- address = MCG_XBLK_ADDR
- + ((low & MASK_BLKPTR_LO) >> 21);
- else
- ++address;
-
- if (rdmsr_safe(address, &low, &high))
- continue;
+ m.bank = MCE_THRESHOLD_BASE + bank;
+ rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
- if (!(high & MASK_VALID_HI)) {
- if (block)
- continue;
- else
- break;
- }
-
- if (!(high & MASK_VALID_HI >> 1) ||
- (high & MASK_VALID_HI >> 2))
- continue;
-
- if (high & MASK_OVERFLOW_HI) {
- rdmsrl(address, m.misc);
- rdmsrl(MSR_IA32_MC0_STATUS + bank * 4,
- m.status);
- m.bank = K8_MCE_THRESHOLD_BASE
- + bank * NR_BLOCKS
- + block;
- mce_log(&m);
- goto out;
- }
+ if (m.misc & MASK_OVERFLOW) {
+ mce_log(&m);
+ goto out;
}
}
-out:
+ out:
irq_exit();
}
* Sysfs Interface
*/
+static struct sysdev_class threshold_sysclass = {
+ set_kset_name("threshold"),
+};
+
+static DEFINE_PER_CPU(struct sys_device, device_threshold);
+
struct threshold_attr {
- struct attribute attr;
- ssize_t(*show) (struct threshold_block *, char *);
- ssize_t(*store) (struct threshold_block *, const char *, size_t count);
+ struct attribute attr;
+ ssize_t(*show) (struct threshold_bank *, char *);
+ ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
};
+static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
+
static cpumask_t affinity_set(unsigned int cpu)
{
cpumask_t oldmask = current->cpus_allowed;
set_cpus_allowed(current, oldmask);
}
-#define SHOW_FIELDS(name) \
-static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
-{ \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
-}
+#define SHOW_FIELDS(name) \
+ static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
+ { \
+ return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+ }
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
-static ssize_t store_interrupt_enable(struct threshold_block *b,
+static ssize_t store_interrupt_enable(struct threshold_bank *b,
const char *buf, size_t count)
{
char *end;
return end - buf;
}
-static ssize_t store_threshold_limit(struct threshold_block *b,
+static ssize_t store_threshold_limit(struct threshold_bank *b,
const char *buf, size_t count)
{
char *end;
return end - buf;
}
-static ssize_t show_error_count(struct threshold_block *b, char *buf)
+static ssize_t show_error_count(struct threshold_bank *b, char *buf)
{
u32 high, low;
cpumask_t oldmask;
oldmask = affinity_set(b->cpu);
- rdmsr(b->address, low, high);
+ rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
affinity_restore(oldmask);
return sprintf(buf, "%x\n",
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
}
-static ssize_t store_error_count(struct threshold_block *b,
+static ssize_t store_error_count(struct threshold_bank *b,
const char *buf, size_t count)
{
cpumask_t oldmask;
.store = _store, \
};
-#define RW_ATTR(name) \
-static struct threshold_attr name = \
+#define ATTR_FIELDS(name) \
+ static struct threshold_attr name = \
THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
-RW_ATTR(interrupt_enable);
-RW_ATTR(threshold_limit);
-RW_ATTR(error_count);
+ATTR_FIELDS(interrupt_enable);
+ATTR_FIELDS(threshold_limit);
+ATTR_FIELDS(error_count);
static struct attribute *default_attrs[] = {
&interrupt_enable.attr,
NULL
};
-#define to_block(k) container_of(k, struct threshold_block, kobj)
-#define to_attr(a) container_of(a, struct threshold_attr, attr)
+#define to_bank(k) container_of(k,struct threshold_bank,kobj)
+#define to_attr(a) container_of(a,struct threshold_attr,attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
- struct threshold_block *b = to_block(kobj);
+ struct threshold_bank *b = to_bank(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->show ? a->show(b, buf) : -EIO;
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
- struct threshold_block *b = to_block(kobj);
+ struct threshold_bank *b = to_bank(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->store ? a->store(b, buf, count) : -EIO;
.default_attrs = default_attrs,
};
-static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
- unsigned int bank,
- unsigned int block,
- u32 address)
-{
- int err;
- u32 low, high;
- struct threshold_block *b = NULL;
-
- if ((bank >= NR_BANKS) || (block >= NR_BLOCKS))
- return 0;
-
- if (rdmsr_safe(address, &low, &high))
- goto recurse;
-
- if (!(high & MASK_VALID_HI)) {
- if (block)
- goto recurse;
- else
- return 0;
- }
-
- if (!(high & MASK_VALID_HI >> 1) ||
- (high & MASK_VALID_HI >> 2))
- goto recurse;
-
- b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
- if (!b)
- return -ENOMEM;
- memset(b, 0, sizeof(struct threshold_block));
-
- b->block = block;
- b->bank = bank;
- b->cpu = cpu;
- b->address = address;
- b->interrupt_enable = 0;
- b->threshold_limit = THRESHOLD_MAX;
-
- INIT_LIST_HEAD(&b->miscj);
-
- if (per_cpu(threshold_banks, cpu)[bank]->blocks)
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- else
- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
-
- kobject_set_name(&b->kobj, "misc%i", block);
- b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj;
- b->kobj.ktype = &threshold_ktype;
- err = kobject_register(&b->kobj);
- if (err)
- goto out_free;
-recurse:
- if (!block) {
- address = (low & MASK_BLKPTR_LO) >> 21;
- if (!address)
- return 0;
- address += MCG_XBLK_ADDR;
- } else
- ++address;
-
- err = allocate_threshold_blocks(cpu, bank, ++block, address);
- if (err)
- goto out_free;
-
- return err;
-
-out_free:
- if (b) {
- kobject_unregister(&b->kobj);
- kfree(b);
- }
- return err;
-}
-
/* symlinks sibling shared banks to first core. first core owns dir/files. */
-static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
+static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
{
- int i, err = 0;
+ int err = 0;
struct threshold_bank *b = NULL;
- cpumask_t oldmask = CPU_MASK_NONE;
- char name[32];
-
- sprintf(name, "threshold_bank%i", bank);
#ifdef CONFIG_SMP
- if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
- i = first_cpu(cpu_core_map[cpu]);
-
- /* first core not up yet */
- if (cpu_data[i].cpu_core_id)
- goto out;
-
- /* already linked */
- if (per_cpu(threshold_banks, cpu)[bank])
- goto out;
-
- b = per_cpu(threshold_banks, i)[bank];
+ if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
+ char name[16];
+ unsigned lcpu = first_cpu(cpu_core_map[cpu]);
+ if (cpu_core_id[lcpu])
+ goto out; /* first core not up yet */
+ b = per_cpu(threshold_banks, lcpu)[bank];
if (!b)
goto out;
-
- err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj,
+ sprintf(name, "bank%i", bank);
+ err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj,
&b->kobj, name);
if (err)
goto out;
-
- b->cpus = cpu_core_map[cpu];
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
#endif
- b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
+ b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
err = -ENOMEM;
goto out;
}
memset(b, 0, sizeof(struct threshold_bank));
- kobject_set_name(&b->kobj, "threshold_bank%i", bank);
- b->kobj.parent = &per_cpu(device_mce, cpu).kobj;
-#ifndef CONFIG_SMP
- b->cpus = CPU_MASK_ALL;
-#else
- b->cpus = cpu_core_map[cpu];
-#endif
- err = kobject_register(&b->kobj);
- if (err)
- goto out_free;
-
- per_cpu(threshold_banks, cpu)[bank] = b;
-
- oldmask = affinity_set(cpu);
- err = allocate_threshold_blocks(cpu, bank, 0,
- MSR_IA32_MC0_MISC + bank * 4);
- affinity_restore(oldmask);
-
- if (err)
- goto out_free;
-
- for_each_cpu_mask(i, b->cpus) {
- if (i == cpu)
- continue;
-
- err = sysfs_create_link(&per_cpu(device_mce, i).kobj,
- &b->kobj, name);
- if (err)
- goto out;
+ b->cpu = cpu;
+ b->bank = bank;
+ b->interrupt_enable = 0;
+ b->threshold_limit = THRESHOLD_MAX;
+ kobject_set_name(&b->kobj, "bank%i", bank);
+ b->kobj.parent = &per_cpu(device_threshold, cpu).kobj;
+ b->kobj.ktype = &threshold_ktype;
- per_cpu(threshold_banks, i)[bank] = b;
+ err = kobject_register(&b->kobj);
+ if (err) {
+ kfree(b);
+ goto out;
}
-
- goto out;
-
-out_free:
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- kfree(b);
-out:
+ per_cpu(threshold_banks, cpu)[bank] = b;
+ out:
return err;
}
/* create dir/files for all valid threshold banks */
static __cpuinit int threshold_create_device(unsigned int cpu)
{
- unsigned int bank;
+ int bank;
int err = 0;
+ per_cpu(device_threshold, cpu).id = cpu;
+ per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
+ err = sysdev_register(&per_cpu(device_threshold, cpu));
+ if (err)
+ goto out;
+
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
if (err)
goto out;
}
-out:
+ out:
return err;
}
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
*/
-static void deallocate_threshold_block(unsigned int cpu,
- unsigned int bank)
+/* cpu hotplug call removes all symlinks before first core dies */
+static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
{
- struct threshold_block *pos = NULL;
- struct threshold_block *tmp = NULL;
- struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
-
- if (!head)
- return;
-
- list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
- kobject_unregister(&pos->kobj);
- list_del(&pos->miscj);
- kfree(pos);
- }
-
- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
-}
-
-static void threshold_remove_bank(unsigned int cpu, int bank)
-{
- int i = 0;
struct threshold_bank *b;
- char name[32];
+ char name[16];
b = per_cpu(threshold_banks, cpu)[bank];
-
if (!b)
return;
-
- if (!b->blocks)
- goto free_out;
-
- sprintf(name, "threshold_bank%i", bank);
-
- /* sibling symlink */
- if (shared_bank[bank] && b->blocks->cpu != cpu) {
- sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name);
+ if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
+ sprintf(name, "bank%i", bank);
+ sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = NULL;
- return;
+ } else {
+ kobject_unregister(&b->kobj);
+ kfree(per_cpu(threshold_banks, cpu)[bank]);
}
+}
- /* remove all sibling symlinks before unregistering */
- for_each_cpu_mask(i, b->cpus) {
- if (i == cpu)
- continue;
+static __cpuinit void threshold_remove_device(unsigned int cpu)
+{
+ int bank;
- sysfs_remove_link(&per_cpu(device_mce, i).kobj, name);
- per_cpu(threshold_banks, i)[bank] = NULL;
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ threshold_remove_bank(cpu, bank);
}
+ sysdev_unregister(&per_cpu(device_threshold, cpu));
+}
- deallocate_threshold_block(cpu, bank);
+/* link all existing siblings when first core comes up */
+static __cpuinit int threshold_create_symlinks(unsigned int cpu)
+{
+ int bank, err = 0;
+ unsigned int lcpu = 0;
-free_out:
- kobject_unregister(&b->kobj);
- kfree(b);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
+ if (cpu_core_id[cpu])
+ return 0;
+ for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
+ if (lcpu == cpu)
+ continue;
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ if (!shared_bank[bank])
+ continue;
+ err = threshold_create_bank(lcpu, bank);
+ }
+ }
+ return err;
}
-static void threshold_remove_device(unsigned int cpu)
+/* remove all symlinks before first core dies. */
+static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
{
- unsigned int bank;
-
- for (bank = 0; bank < NR_BANKS; ++bank) {
- if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ int bank;
+ unsigned int lcpu = 0;
+ if (cpu_core_id[cpu])
+ return;
+ for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
+ if (lcpu == cpu)
continue;
- threshold_remove_bank(cpu, bank);
+ for (bank = 0; bank < NR_BANKS; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & 1 << bank))
+ continue;
+ if (!shared_bank[bank])
+ continue;
+ threshold_remove_bank(lcpu, bank);
+ }
}
}
+#else /* !CONFIG_HOTPLUG_CPU */
+static __cpuinit void threshold_create_symlinks(unsigned int cpu)
+{
+}
+static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
+{
+}
+static void threshold_remove_device(unsigned int cpu)
+{
+}
+#endif
/* get notified when a cpu comes on/off */
static int threshold_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
threshold_create_device(cpu);
+ threshold_create_symlinks(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ threshold_remove_symlinks(cpu);
+ break;
+ case CPU_DOWN_FAILED:
+ threshold_create_symlinks(cpu);
break;
case CPU_DEAD:
threshold_remove_device(cpu);
static struct notifier_block threshold_cpu_notifier = {
.notifier_call = threshold_cpu_callback,
};
-#endif /* CONFIG_HOTPLUG_CPU */
static __init int threshold_init_device(void)
{
- unsigned lcpu = 0;
+ int err;
+ int lcpu = 0;
+
+ err = sysdev_class_register(&threshold_sysclass);
+ if (err)
+ goto out;
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
- int err = threshold_create_device(lcpu);
+ err = threshold_create_device(lcpu);
if (err)
- return err;
+ goto out;
}
- register_hotcpu_notifier(&threshold_cpu_notifier);
- return 0;
+ register_cpu_notifier(&threshold_cpu_notifier);
+
+ out:
+ return err;
}
device_initcall(threshold_init_device);