1 #include <linux/config.h>
2 #include <linux/init.h>
3 #include <linux/kernel.h>
5 #include <linux/string.h>
6 #include <linux/bitops.h>
8 #include <linux/thread_info.h>
10 #include <asm/processor.h>
12 #include <asm/uaccess.h>
16 #ifdef CONFIG_X86_LOCAL_APIC
17 #include <asm/mpspec.h>
19 #include <mach_apic.h>
22 extern int trap_init_f00f_bug(void);
24 #ifdef CONFIG_X86_INTEL_USERCOPY
26 * Alignment at which movsl is preferred for bulk memory copies.
28 struct movsl_mask movsl_mask;
31 void __init early_intel_workaround(struct cpuinfo_x86 *c)
33 if (c->x86_vendor != X86_VENDOR_INTEL)
35 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
36 if (c->x86 == 15 && c->x86_cache_alignment == 64)
37 c->x86_cache_alignment = 128;
41 * Early probe support logic for ppro memory erratum #50
43 * This is called before we do cpu ident work
46 int __init ppro_with_ram_bug(void)
48 /* Uses data from early_cpu_detect now */
49 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
50 boot_cpu_data.x86 == 6 &&
51 boot_cpu_data.x86_model == 1 &&
52 boot_cpu_data.x86_mask < 8) {
53 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
67 unsigned char descriptor;
72 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
73 static struct _cache_table cache_table[] __initdata =
75 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
76 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
77 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
78 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
79 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
81 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
84 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
85 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
86 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
87 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
88 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
89 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
90 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
91 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
92 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
93 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
94 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
95 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
96 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
97 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
98 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
99 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
100 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
101 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
102 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
103 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
104 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
105 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
106 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
107 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
108 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
109 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
110 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
111 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
112 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
117 * P4 Xeon errata 037 workaround.
118 * Hardware prefetcher may cause stale data to be loaded into the cache.
120 static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
122 unsigned long lo, hi;
124 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
125 rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
126 if ((lo & (1<<9)) == 0) {
127 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
128 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
129 lo |= (1<<9); /* Disable hw prefetching */
130 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
136 static void __init init_intel(struct cpuinfo_x86 *c)
139 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
141 #ifdef CONFIG_X86_F00F_BUG
143 * All current models of Pentium and Pentium with MMX technology CPUs
144 * have the F0 0F bug, which lets nonprivileged users lock up the system.
145 * Note that the workaround only should be initialized once...
149 static int f00f_workaround_enabled = 0;
152 if ( !f00f_workaround_enabled ) {
153 trap_init_f00f_bug();
154 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
155 f00f_workaround_enabled = 1;
160 select_idle_routine(c);
161 if (c->cpuid_level > 1) {
162 /* supports eax=2 call */
165 unsigned char *dp = (unsigned char *)regs;
167 /* Number of times to iterate */
168 n = cpuid_eax(2) & 0xFF;
170 for ( i = 0 ; i < n ; i++ ) {
171 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
173 /* If bit 31 is set, this is an unknown format */
174 for ( j = 0 ; j < 3 ; j++ ) {
175 if ( regs[j] < 0 ) regs[j] = 0;
178 /* Byte 0 is level count, not a descriptor */
179 for ( j = 1 ; j < 16 ; j++ ) {
180 unsigned char des = dp[j];
183 /* look up this descriptor in the table */
184 while (cache_table[k].descriptor != 0)
186 if (cache_table[k].descriptor == des) {
187 switch (cache_table[k].cache_type) {
189 l1i += cache_table[k].size;
192 l1d += cache_table[k].size;
195 l2 += cache_table[k].size;
198 l3 += cache_table[k].size;
201 trace += cache_table[k].size;
214 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
216 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
218 printk(", L1 D cache: %dK\n", l1d);
222 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
224 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
227 * This assumes the L3 cache is shared; it typically lives in
228 * the northbridge. The L1 caches are included by the L2
229 * cache, and so should not be included for the purpose of
230 * SMP switching weights.
232 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
235 /* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
236 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
237 clear_bit(X86_FEATURE_SEP, c->x86_capability);
239 /* Names for the Pentium II/Celeron processors
240 detectable only by also checking the cache size.
241 Dixon is NOT a Celeron. */
243 switch (c->x86_model) {
245 if (c->x86_mask == 0) {
247 p = "Celeron (Covington)";
249 p = "Mobile Pentium II (Dixon)";
255 p = "Celeron (Mendocino)";
256 else if (c->x86_mask == 0 || c->x86_mask == 5)
262 p = "Celeron (Coppermine)";
268 strcpy(c->x86_model_id, p);
271 if (cpu_has(c, X86_FEATURE_HT)) {
272 extern int phys_proc_id[NR_CPUS];
274 u32 eax, ebx, ecx, edx;
275 int index_lsb, index_msb, tmp;
276 int cpu = smp_processor_id();
278 cpuid(1, &eax, &ebx, &ecx, &edx);
279 smp_num_siblings = (ebx & 0xff0000) >> 16;
281 if (smp_num_siblings == 1) {
282 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
283 } else if (smp_num_siblings > 1 ) {
287 if (smp_num_siblings > NR_CPUS) {
288 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
289 smp_num_siblings = 1;
290 goto too_many_siblings;
292 tmp = smp_num_siblings;
293 while ((tmp & 1) == 0) {
297 tmp = smp_num_siblings;
298 while ((tmp & 0x80000000 ) == 0) {
302 if (index_lsb != index_msb )
304 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
306 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
315 /* Work around errata */
316 Intel_errata_workarounds(c);
318 #ifdef CONFIG_X86_INTEL_USERCOPY
320 * Set up the preferred alignment for movsl bulk memory moves
323 case 4: /* 486: untested */
325 case 5: /* Old Pentia: untested */
327 case 6: /* PII/PIII only like movsl with 8-byte alignment */
330 case 15: /* P4 is OK down to 8-byte alignment */
337 set_bit(X86_FEATURE_P4, c->x86_capability);
339 set_bit(X86_FEATURE_P3, c->x86_capability);
343 static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
345 /* Intel PIII Tualatin. This comes in two flavours.
346 * One has 256kb of cache, the other 512. We have no way
347 * to determine which, so we use a boottime override
348 * for the 512kb model, and assume 256 otherwise.
350 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
355 static struct cpu_dev intel_cpu_dev __initdata = {
357 .c_ident = { "GenuineIntel" },
359 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
361 [0] = "486 DX-25/33",
372 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
374 [0] = "Pentium 60/66 A-step",
375 [1] = "Pentium 60/66",
376 [2] = "Pentium 75 - 200",
377 [3] = "OverDrive PODP5V83",
379 [7] = "Mobile Pentium 75 - 200",
380 [8] = "Mobile Pentium MMX"
383 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
385 [0] = "Pentium Pro A-step",
387 [3] = "Pentium II (Klamath)",
388 [4] = "Pentium II (Deschutes)",
389 [5] = "Pentium II (Deschutes)",
390 [6] = "Mobile Pentium II",
391 [7] = "Pentium III (Katmai)",
392 [8] = "Pentium III (Coppermine)",
393 [10] = "Pentium III (Cascades)",
394 [11] = "Pentium III (Tualatin)",
397 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
399 [0] = "Pentium 4 (Unknown)",
400 [1] = "Pentium 4 (Willamette)",
401 [2] = "Pentium 4 (Northwood)",
402 [4] = "Pentium 4 (Foster)",
403 [5] = "Pentium 4 (Foster)",
407 .c_init = init_intel,
408 .c_identify = generic_identify,
409 .c_size_cache = intel_size_cache,
412 __init int intel_cpu_init(void)
414 cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
418 // arch_initcall(intel_cpu_init);