1 /* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */
3 #include <linux/init.h>
4 #include <linux/slab.h>
9 #include <asm/system.h>
10 #include <asm/cpufeature.h>
11 #include <asm/tlbflush.h>
15 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled;
21 static unsigned long smp_changes_mask;
22 struct mtrr_state mtrr_state = {};
25 /* Get the MSR pair relating to a var range */
27 get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
29 rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
30 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
34 get_fixed_ranges(mtrr_type * frs)
36 unsigned int *p = (unsigned int *) frs;
39 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
41 for (i = 0; i < 2; i++)
42 rdmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
43 for (i = 0; i < 8; i++)
44 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
47 /* Grab all of the MTRR state for this CPU into *state */
48 void __init get_mtrr_state(void)
51 struct mtrr_var_range *vrs;
54 if (!mtrr_state.var_ranges) {
55 mtrr_state.var_ranges = kmalloc(num_var_ranges * sizeof (struct mtrr_var_range),
57 if (!mtrr_state.var_ranges)
60 vrs = mtrr_state.var_ranges;
62 for (i = 0; i < num_var_ranges; i++)
63 get_mtrr_var_range(i, &vrs[i]);
64 get_fixed_ranges(mtrr_state.fixed_ranges);
66 rdmsr(MTRRdefType_MSR, lo, dummy);
67 mtrr_state.def_type = (lo & 0xff);
68 mtrr_state.enabled = (lo & 0xc00) >> 10;
71 /* Free resources associated with a struct mtrr_state */
72 void __init finalize_mtrr_state(void)
74 if (mtrr_state.var_ranges)
75 kfree(mtrr_state.var_ranges);
76 mtrr_state.var_ranges = NULL;
79 /* Some BIOS's are fucked and don't set all MTRRs the same! */
80 void __init mtrr_state_warn(void)
82 unsigned long mask = smp_changes_mask;
86 if (mask & MTRR_CHANGE_MASK_FIXED)
87 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n");
88 if (mask & MTRR_CHANGE_MASK_VARIABLE)
89 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n");
90 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
91 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n");
92 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
93 printk(KERN_INFO "mtrr: corrected configuration.\n");
97 int generic_get_free_region(unsigned long base, unsigned long size)
98 /* [SUMMARY] Get a free MTRR.
99 <base> The starting (base) address of the region.
100 <size> The size (in bytes) of the region.
101 [RETURNS] The index of the region on success, else -1 on error.
109 max = num_var_ranges;
110 for (i = 0; i < max; ++i) {
111 mtrr_if->get(i, &lbase, &lsize, <ype);
118 void generic_get_mtrr(unsigned int reg, unsigned long *base,
119 unsigned int *size, mtrr_type * type)
121 unsigned int mask_lo, mask_hi, base_lo, base_hi;
123 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
124 if ((mask_lo & 0x800) == 0) {
125 /* Invalid (i.e. free) range */
132 rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
134 /* Work out the shifted address mask. */
135 mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
136 | mask_lo >> PAGE_SHIFT;
138 /* This works correctly if size is a power of two, i.e. a
141 *base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
142 *type = base_lo & 0xff;
145 static int set_fixed_ranges(mtrr_type * frs)
147 unsigned int *p = (unsigned int *) frs;
152 rdmsr(MTRRfix64K_00000_MSR, lo, hi);
153 if (p[0] != lo || p[1] != hi) {
154 wrmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
158 for (i = 0; i < 2; i++) {
159 rdmsr(MTRRfix16K_80000_MSR + i, lo, hi);
160 if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
161 wrmsr(MTRRfix16K_80000_MSR + i, p[2 + i * 2],
167 for (i = 0; i < 8; i++) {
168 rdmsr(MTRRfix4K_C0000_MSR + i, lo, hi);
169 if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
170 wrmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
178 /* Set the MSR pair relating to a var range. Returns TRUE if
180 static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
185 rdmsr(MTRRphysBase_MSR(index), lo, hi);
186 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
187 || (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
188 wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
192 rdmsr(MTRRphysMask_MSR(index), lo, hi);
194 if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
195 || (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
196 wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
202 static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
203 /* [SUMMARY] Set the MTRR state for this CPU.
204 <state> The MTRR state information to read.
205 <ctxt> Some relevant CPU context.
206 [NOTE] The CPU must already be in a safe state for MTRR changes.
207 [RETURNS] 0 if no changes made, else a mask indication what was changed.
211 unsigned long change_mask = 0;
213 for (i = 0; i < num_var_ranges; i++)
214 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
215 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
217 if (set_fixed_ranges(mtrr_state.fixed_ranges))
218 change_mask |= MTRR_CHANGE_MASK_FIXED;
220 /* Set_mtrr_restore restores the old value of MTRRdefType,
221 so to set it we fiddle with the saved value */
222 if ((deftype_lo & 0xff) != mtrr_state.def_type
223 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
224 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10);
225 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
232 static unsigned long cr4 = 0;
233 static u32 deftype_lo, deftype_hi;
234 static DEFINE_SPINLOCK(set_atomicity_lock);
237 * Since we are disabling the cache don't allow any interrupts - they
238 * would run extremely slow and would only increase the pain. The caller must
239 * ensure that local interrupts are disabled and are reenabled after post_set()
243 static void prepare_set(void)
247 /* Note that this is not ideal, since the cache is only flushed/disabled
248 for this CPU while the MTRRs are changed, but changing this requires
249 more invasive changes to the way the kernel boots */
251 spin_lock(&set_atomicity_lock);
253 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
254 cr0 = read_cr0() | 0x40000000; /* set CD flag */
258 /* Save value of CR4 and clear Page Global Enable (bit 7) */
261 write_cr4(cr4 & ~X86_CR4_PGE);
264 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
267 /* Save MTRR state */
268 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
270 /* Disable MTRRs, and set the default type to uncached */
271 wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi);
274 static void post_set(void)
276 /* Flush TLBs (no need to flush caches - they are disabled) */
279 /* Intel (P6) standard MTRRs */
280 wrmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
283 write_cr0(read_cr0() & 0xbfffffff);
285 /* Restore value of CR4 */
288 spin_unlock(&set_atomicity_lock);
291 static void generic_set_all(void)
293 unsigned long mask, count;
296 local_irq_save(flags);
299 /* Actually set the state */
300 mask = set_mtrr_state(deftype_lo,deftype_hi);
303 local_irq_restore(flags);
305 /* Use the atomic bitops to update the global mask */
306 for (count = 0; count < sizeof mask * 8; ++count) {
308 set_bit(count, &smp_changes_mask);
314 static void generic_set_mtrr(unsigned int reg, unsigned long base,
315 unsigned long size, mtrr_type type)
316 /* [SUMMARY] Set variable MTRR register on the local CPU.
317 <reg> The register to set.
318 <base> The base address of the region.
319 <size> The size of the region. If this is 0 the region is disabled.
320 <type> The type of the region.
321 <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
328 local_irq_save(flags);
332 /* The invalid bit is kept in the mask, so we simply clear the
333 relevant mask register to disable a range. */
334 wrmsr(MTRRphysMask_MSR(reg), 0, 0);
336 wrmsr(MTRRphysBase_MSR(reg), base << PAGE_SHIFT | type,
337 (base & size_and_mask) >> (32 - PAGE_SHIFT));
338 wrmsr(MTRRphysMask_MSR(reg), -size << PAGE_SHIFT | 0x800,
339 (-size & size_and_mask) >> (32 - PAGE_SHIFT));
343 local_irq_restore(flags);
346 int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
348 unsigned long lbase, last;
350 /* For Intel PPro stepping <= 7, must be 4 MiB aligned
351 and not touch 0x70000000->0x7003FFFF */
352 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
353 boot_cpu_data.x86_model == 1 &&
354 boot_cpu_data.x86_mask <= 7) {
355 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
356 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
359 if (!(base + size < 0x70000000 || base > 0x7003FFFF) &&
360 (type == MTRR_TYPE_WRCOMB
361 || type == MTRR_TYPE_WRBACK)) {
362 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
367 if (base + size < 0x100) {
368 printk(KERN_WARNING "mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
372 /* Check upper bits of base and last are equal and lower bits are 0
373 for base and 1 for last */
374 last = base + size - 1;
375 for (lbase = base; !(lbase & 1) && (last & 1);
376 lbase = lbase >> 1, last = last >> 1) ;
378 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
386 int generic_have_wrcomb(void)
388 unsigned long config, dummy;
389 rdmsr(MTRRcap_MSR, config, dummy);
390 return (config & (1 << 10));
393 int positive_have_wrcomb(void)
398 /* generic structure...
400 struct mtrr_ops generic_mtrr_ops = {
402 .set_all = generic_set_all,
403 .get = generic_get_mtrr,
404 .get_free_region = generic_get_free_region,
405 .set = generic_set_mtrr,
406 .validate_add_page = generic_validate_add_page,
407 .have_wrcomb = generic_have_wrcomb,