1 /* Generic MTRR (Memory Type Range Register) driver.
3 Copyright (C) 1997-2000 Richard Gooch
4 Copyright (c) 2002 Patrick Mochel
6 This library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Library General Public
8 License as published by the Free Software Foundation; either
9 version 2 of the License, or (at your option) any later version.
11 This library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Library General Public License for more details.
16 You should have received a copy of the GNU Library General Public
17 License along with this library; if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 Richard Gooch may be reached by email at rgooch@atnf.csiro.au
21 The postal address is:
22 Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
24 Source: "Pentium Pro Family Developer's Manual, Volume 3:
25 Operating System Writer's Guide" (Intel document number 242692),
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro).
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/pci.h>
37 #include <linux/smp.h>
38 #include <linux/cpu.h>
42 #include <asm/uaccess.h>
43 #include <asm/processor.h>
47 #define MTRR_VERSION "2.0 (20020519)"
49 u32 num_var_ranges = 0;
51 unsigned int *usage_table;
52 static DECLARE_MUTEX(main_lock);
54 u32 size_or_mask, size_and_mask;
56 static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {};
58 struct mtrr_ops * mtrr_if = NULL;
60 __initdata char *mtrr_if_name[] = {
61 "none", "Intel", "AMD K6", "Cyrix ARR", "Centaur MCR"
64 static void set_mtrr(unsigned int reg, unsigned long base,
65 unsigned long size, mtrr_type type);
67 extern int arr3_protected;
69 void set_mtrr_ops(struct mtrr_ops * ops)
71 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
72 mtrr_ops[ops->vendor] = ops;
75 /* Returns non-zero if we have the write-combining memory type */
76 static int have_wrcomb(void)
80 if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) {
81 /* ServerWorks LE chipsets have problems with write-combining
82 Don't allow it and leave room for other chipsets to be tagged */
83 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
84 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
85 printk(KERN_INFO "mtrr: Serverworks LE detected. Write-combining disabled.\n");
88 /* Intel 450NX errata # 23. Non ascending cachline evictions to
89 write combining memory may resulting in data corruption */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX)
93 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
97 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
100 /* This function returns the number of variable MTRRs */
101 void __init set_num_var_ranges(void)
103 unsigned long config = 0, dummy;
106 rdmsr(MTRRcap_MSR, config, dummy);
107 } else if (is_cpu(AMD))
109 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
111 num_var_ranges = config & 0xff;
114 static void __init init_table(void)
118 max = num_var_ranges;
119 if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
121 printk(KERN_ERR "mtrr: could not allocate\n");
124 for (i = 0; i < max; i++)
128 struct set_mtrr_data {
131 unsigned long smp_base;
132 unsigned long smp_size;
133 unsigned int smp_reg;
139 static void ipi_handler(void *info)
140 /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
144 struct set_mtrr_data *data = info;
147 local_irq_save(flags);
149 atomic_dec(&data->count);
150 while(!atomic_read(&data->gate)) {
155 /* The master has cleared me to execute */
156 if (data->smp_reg != ~0U)
157 mtrr_if->set(data->smp_reg, data->smp_base,
158 data->smp_size, data->smp_type);
162 atomic_dec(&data->count);
163 while(atomic_read(&data->gate)) {
167 atomic_dec(&data->count);
168 local_irq_restore(flags);
174 * set_mtrr - update mtrrs on all processors
175 * @reg: mtrr in question
180 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
182 * 1. Send IPI to do the following:
183 * 2. Disable Interrupts
184 * 3. Wait for all procs to do so
185 * 4. Enter no-fill cache mode
189 * 8. Disable all range registers
190 * 9. Update the MTRRs
191 * 10. Enable all range registers
192 * 11. Flush all TLBs and caches again
193 * 12. Enter normal cache mode and reenable caching
195 * 14. Wait for buddies to catch up
196 * 15. Enable interrupts.
198 * What does that mean for us? Well, first we set data.count to the number
199 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
200 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
201 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
202 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it
203 * differently, so we call mtrr_if->set() callback and let them take care of it.
204 * When they're done, they again decrement data->count and wait for data.gate to
206 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
207 * Everyone then enables interrupts and we all continue on.
209 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
212 static void set_mtrr(unsigned int reg, unsigned long base,
213 unsigned long size, mtrr_type type)
215 struct set_mtrr_data data;
219 data.smp_base = base;
220 data.smp_size = size;
221 data.smp_type = type;
222 atomic_set(&data.count, num_booting_cpus() - 1);
223 atomic_set(&data.gate,0);
225 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 1, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n");
229 local_irq_save(flags);
231 while(atomic_read(&data.count)) {
235 /* ok, reset count and toggle gate */
236 atomic_set(&data.count, num_booting_cpus() - 1);
237 atomic_set(&data.gate,1);
239 /* do our MTRR business */
242 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special...
248 mtrr_if->set(reg,base,size,type);
250 /* wait for the others */
251 while(atomic_read(&data.count)) {
255 atomic_set(&data.count, num_booting_cpus() - 1);
256 atomic_set(&data.gate,0);
259 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data'
262 while(atomic_read(&data.count)) {
266 local_irq_restore(flags);
270 * mtrr_add_page - Add a memory type region
271 * @base: Physical base address of region in pages (4 KB)
272 * @size: Physical size of region in pages (4 KB)
273 * @type: Type of MTRR desired
274 * @increment: If this is true do usage counting on the region
276 * Memory type region registers control the caching on newer Intel and
277 * non Intel processors. This function allows drivers to request an
278 * MTRR is added. The details and hardware specifics of each processor's
279 * implementation are hidden from the caller, but nevertheless the
280 * caller should expect to need to provide a power of two size on an
281 * equivalent power of two boundary.
283 * If the region cannot be added either because all regions are in use
284 * or the CPU cannot support it a negative value is returned. On success
285 * the register number for this entry is returned, but should be treated
288 * On a multiprocessor machine the changes are made to all processors.
289 * This is required on x86 by the Intel processors.
291 * The available types are
293 * %MTRR_TYPE_UNCACHABLE - No caching
295 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
297 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
299 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
301 * BUGS: Needs a quiet flag for the cases where drivers do not mind
302 * failures and do not wish system log messages to be sent.
305 int mtrr_add_page(unsigned long base, unsigned long size,
306 unsigned int type, char increment)
317 if ((error = mtrr_if->validate_add_page(base,size,type)))
320 if (type >= MTRR_NUM_TYPES) {
321 printk(KERN_WARNING "mtrr: type: %u invalid\n", type);
325 /* If the type is WC, check that this processor supports it */
326 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
328 "mtrr: your processor doesn't support write-combining\n");
332 if (base & size_or_mask || size & size_or_mask) {
333 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
339 /* Search for existing MTRR */
341 for (i = 0; i < num_var_ranges; ++i) {
342 mtrr_if->get(i, &lbase, &lsize, <ype);
343 if (base >= lbase + lsize)
345 if ((base < lbase) && (base + size <= lbase))
347 /* At this point we know there is some kind of overlap/enclosure */
348 if ((base < lbase) || (base + size > lbase + lsize)) {
350 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
351 " 0x%lx000,0x%x000\n", base, size, lbase,
355 /* New region is enclosed by an existing region */
357 if (type == MTRR_TYPE_UNCACHABLE)
359 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
360 base, size, mtrr_attrib_to_str(ltype),
361 mtrr_attrib_to_str(type));
369 /* Search for an empty MTRR */
370 i = mtrr_if->get_free_region(base, size);
372 set_mtrr(i, base, size, type);
375 printk(KERN_INFO "mtrr: no more MTRRs available\n");
383 * mtrr_add - Add a memory type region
384 * @base: Physical base address of region
385 * @size: Physical size of region
386 * @type: Type of MTRR desired
387 * @increment: If this is true do usage counting on the region
389 * Memory type region registers control the caching on newer Intel and
390 * non Intel processors. This function allows drivers to request an
391 * MTRR is added. The details and hardware specifics of each processor's
392 * implementation are hidden from the caller, but nevertheless the
393 * caller should expect to need to provide a power of two size on an
394 * equivalent power of two boundary.
396 * If the region cannot be added either because all regions are in use
397 * or the CPU cannot support it a negative value is returned. On success
398 * the register number for this entry is returned, but should be treated
401 * On a multiprocessor machine the changes are made to all processors.
402 * This is required on x86 by the Intel processors.
404 * The available types are
406 * %MTRR_TYPE_UNCACHABLE - No caching
408 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
410 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
412 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
414 * BUGS: Needs a quiet flag for the cases where drivers do not mind
415 * failures and do not wish system log messages to be sent.
419 mtrr_add(unsigned long base, unsigned long size, unsigned int type,
422 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
423 printk(KERN_WARNING "mtrr: size and base must be multiples of 4 kiB\n");
424 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
427 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
432 * mtrr_del_page - delete a memory type region
433 * @reg: Register returned by mtrr_add
434 * @base: Physical base address
435 * @size: Size of region
437 * If register is supplied then base and size are ignored. This is
438 * how drivers should call it.
440 * Releases an MTRR region. If the usage count drops to zero the
441 * register is freed and the region returns to default state.
442 * On success the register is returned, on failure a negative error
446 int mtrr_del_page(int reg, unsigned long base, unsigned long size)
457 max = num_var_ranges;
460 /* Search for existing MTRR */
461 for (i = 0; i < max; ++i) {
462 mtrr_if->get(i, &lbase, &lsize, <ype);
463 if (lbase == base && lsize == size) {
469 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
475 printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
478 if (is_cpu(CYRIX) && !use_intel()) {
479 if ((reg == 3) && arr3_protected) {
480 printk(KERN_WARNING "mtrr: ARR3 cannot be changed\n");
484 mtrr_if->get(reg, &lbase, &lsize, <ype);
486 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
489 if (usage_table[reg] < 1) {
490 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
493 if (--usage_table[reg] < 1)
494 set_mtrr(reg, 0, 0, 0);
501 * mtrr_del - delete a memory type region
502 * @reg: Register returned by mtrr_add
503 * @base: Physical base address
504 * @size: Size of region
506 * If register is supplied then base and size are ignored. This is
507 * how drivers should call it.
509 * Releases an MTRR region. If the usage count drops to zero the
510 * register is freed and the region returns to default state.
511 * On success the register is returned, on failure a negative error
516 mtrr_del(int reg, unsigned long base, unsigned long size)
518 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
519 printk(KERN_INFO "mtrr: size and base must be multiples of 4 kiB\n");
520 printk(KERN_DEBUG "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
523 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
526 EXPORT_SYMBOL(mtrr_add);
527 EXPORT_SYMBOL(mtrr_del);
530 * These should be called implicitly, but we can't yet until all the initcall
533 extern void amd_init_mtrr(void);
534 extern void cyrix_init_mtrr(void);
535 extern void centaur_init_mtrr(void);
537 static void __init init_ifs(void)
544 static void __init init_other_cpus(void)
549 /* bring up the other processors */
553 finalize_mtrr_state();
565 static struct mtrr_value * mtrr_state;
567 static int mtrr_save(struct sys_device * sysdev, u32 state)
570 int size = num_var_ranges * sizeof(struct mtrr_value);
572 mtrr_state = kmalloc(size,GFP_ATOMIC);
574 memset(mtrr_state,0,size);
578 for (i = 0; i < num_var_ranges; i++) {
580 &mtrr_state[i].lbase,
581 &mtrr_state[i].lsize,
582 &mtrr_state[i].ltype);
587 static int mtrr_restore(struct sys_device * sysdev)
591 for (i = 0; i < num_var_ranges; i++) {
592 if (mtrr_state[i].lsize)
596 mtrr_state[i].ltype);
604 static struct sysdev_driver mtrr_sysdev_driver = {
605 .suspend = mtrr_save,
606 .resume = mtrr_restore,
611 * mtrr_init - initialize mtrrs on the boot CPU
613 * This needs to be called early; before any of the other CPUs are
614 * initialized (i.e. before smp_init()).
617 static int __init mtrr_init(void)
622 mtrr_if = &generic_mtrr_ops;
623 size_or_mask = 0xff000000; /* 36 bits */
624 size_and_mask = 0x00f00000;
626 switch (boot_cpu_data.x86_vendor) {
628 /* The original Athlon docs said that
629 total addressable memory is 44 bits wide.
630 It was not really clear whether its MTRRs
631 follow this or not. (Read: 44 or 36 bits).
632 However, "x86-64_overview.pdf" explicitly
633 states that "previous implementations support
634 36 bit MTRRs" and also provides a way to
635 query the width (in bits) of the physical
636 addressable memory on the Hammer family.
638 if (boot_cpu_data.x86 == 15
639 && (cpuid_eax(0x80000000) >= 0x80000008)) {
641 phys_addr = cpuid_eax(0x80000008) & 0xff;
643 ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
644 size_and_mask = ~size_or_mask & 0xfff00000;
646 /* Athlon MTRRs use an Intel-compatible interface for
647 * getting and setting */
649 case X86_VENDOR_CENTAUR:
650 if (boot_cpu_data.x86 == 6) {
651 /* VIA Cyrix family have Intel style MTRRs, but don't support PAE */
652 size_or_mask = 0xfff00000; /* 32 bits */
661 switch (boot_cpu_data.x86_vendor) {
663 if (cpu_has_k6_mtrr) {
664 /* Pre-Athlon (K6) AMD CPU MTRRs */
665 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
666 size_or_mask = 0xfff00000; /* 32 bits */
670 case X86_VENDOR_CENTAUR:
671 if (cpu_has_centaur_mcr) {
672 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
673 size_or_mask = 0xfff00000; /* 32 bits */
677 case X86_VENDOR_CYRIX:
678 if (cpu_has_cyrix_arr) {
679 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
680 size_or_mask = 0xfff00000; /* 32 bits */
688 printk(KERN_INFO "mtrr: v%s\n",MTRR_VERSION);
691 set_num_var_ranges();
695 return sysdev_driver_register(&cpu_sysdev_class,
696 &mtrr_sysdev_driver);
701 subsys_initcall(mtrr_init);