#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
+#include <linux/module.h>
#include <asm/processor.h>
#include <asm/msr.h>
/*
* Alignment at which movsl is preferred for bulk memory copies.
*/
-struct movsl_mask movsl_mask;
+struct movsl_mask movsl_mask __read_mostly;
#endif
+void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor != X86_VENDOR_INTEL)
+ return;
+ /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
+ if (c->x86 == 15 && c->x86_cache_alignment == 64)
+ c->x86_cache_alignment = 128;
+}
+
/*
* Early probe support logic for ppro memory erratum #50
*
* This is called before we do cpu ident work
*/
-int __init ppro_with_ram_bug(void)
+int __devinit ppro_with_ram_bug(void)
{
- char vendor_id[16];
- int ident;
-
- /* Must have CPUID */
- if(!have_cpuid_p())
- return 0;
- if(cpuid_eax(0)<1)
- return 0;
-
- /* Must be Intel */
- cpuid(0, &ident,
- (int *)&vendor_id[0],
- (int *)&vendor_id[8],
- (int *)&vendor_id[4]);
-
- if(memcmp(vendor_id, "IntelInside", 12))
- return 0;
-
- ident = cpuid_eax(1);
-
- /* Model 6 */
-
- if(((ident>>8)&15)!=6)
- return 0;
-
- /* Pentium Pro */
-
- if(((ident>>4)&15)!=1)
- return 0;
-
- if((ident&15) < 8)
- {
+ /* Uses data from early_cpu_detect now */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 1 &&
+ boot_cpu_data.x86_mask < 8) {
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
return 1;
}
- printk(KERN_INFO "Your Pentium Pro seems ok.\n");
return 0;
}
-#define LVL_1_INST 1
-#define LVL_1_DATA 2
-#define LVL_2 3
-#define LVL_3 4
-#define LVL_TRACE 5
-
-struct _cache_table
-{
- unsigned char descriptor;
- char cache_type;
- short size;
-};
-
-/* all the cache descriptor types we care about (no TLB or trace cache entries) */
-static struct _cache_table cache_table[] __initdata =
-{
- { 0x06, LVL_1_INST, 8 },
- { 0x08, LVL_1_INST, 16 },
- { 0x0a, LVL_1_DATA, 8 },
- { 0x0c, LVL_1_DATA, 16 },
- { 0x22, LVL_3, 512 },
- { 0x23, LVL_3, 1024 },
- { 0x25, LVL_3, 2048 },
- { 0x29, LVL_3, 4096 },
- { 0x2c, LVL_1_DATA, 32 },
- { 0x30, LVL_1_INST, 32 },
- { 0x39, LVL_2, 128 },
- { 0x3b, LVL_2, 128 },
- { 0x3c, LVL_2, 256 },
- { 0x41, LVL_2, 128 },
- { 0x42, LVL_2, 256 },
- { 0x43, LVL_2, 512 },
- { 0x44, LVL_2, 1024 },
- { 0x45, LVL_2, 2048 },
- { 0x66, LVL_1_DATA, 8 },
- { 0x67, LVL_1_DATA, 16 },
- { 0x68, LVL_1_DATA, 32 },
- { 0x70, LVL_TRACE, 12 },
- { 0x71, LVL_TRACE, 16 },
- { 0x72, LVL_TRACE, 32 },
- { 0x79, LVL_2, 128 },
- { 0x7a, LVL_2, 256 },
- { 0x7b, LVL_2, 512 },
- { 0x7c, LVL_2, 1024 },
- { 0x82, LVL_2, 256 },
- { 0x83, LVL_2, 512 },
- { 0x84, LVL_2, 1024 },
- { 0x85, LVL_2, 2048 },
- { 0x86, LVL_2, 512 },
- { 0x87, LVL_2, 1024 },
- { 0x00, 0, 0}
-};
/*
* P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache.
*/
-static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
+static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
unsigned long lo, hi;
}
-static void __init init_intel(struct cpuinfo_x86 *c)
+/*
+ * find out the number of processor cores on the die
+ */
+static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
{
+ unsigned int eax, ebx, ecx, edx;
+
+ if (c->cpuid_level < 4)
+ return 1;
+
+ /* Intel has a non-standard dependency on %ecx for this CPUID level. */
+ cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
+ if (eax & 0x1f)
+ return ((eax >> 26) + 1);
+ else
+ return 1;
+}
+
+static void __devinit init_intel(struct cpuinfo_x86 *c)
+{
+ unsigned int l2 = 0;
char *p = NULL;
- unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
#ifdef CONFIG_X86_F00F_BUG
/*
#endif
select_idle_routine(c);
- if (c->cpuid_level > 1) {
- /* supports eax=2 call */
- int i, j, n;
- int regs[4];
- unsigned char *dp = (unsigned char *)regs;
-
- /* Number of times to iterate */
- n = cpuid_eax(2) & 0xFF;
-
- for ( i = 0 ; i < n ; i++ ) {
- cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
-
- /* If bit 31 is set, this is an unknown format */
- for ( j = 0 ; j < 3 ; j++ ) {
- if ( regs[j] < 0 ) regs[j] = 0;
- }
-
- /* Byte 0 is level count, not a descriptor */
- for ( j = 1 ; j < 16 ; j++ ) {
- unsigned char des = dp[j];
- unsigned char k = 0;
-
- /* look up this descriptor in the table */
- while (cache_table[k].descriptor != 0)
- {
- if (cache_table[k].descriptor == des) {
- switch (cache_table[k].cache_type) {
- case LVL_1_INST:
- l1i += cache_table[k].size;
- break;
- case LVL_1_DATA:
- l1d += cache_table[k].size;
- break;
- case LVL_2:
- l2 += cache_table[k].size;
- break;
- case LVL_3:
- l3 += cache_table[k].size;
- break;
- case LVL_TRACE:
- trace += cache_table[k].size;
- break;
- }
-
- break;
- }
-
- k++;
- }
- }
- }
-
- if ( trace )
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
- if ( l1d )
- printk(", L1 D cache: %dK\n", l1d);
- else
- printk("\n");
- if ( l2 )
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
- if ( l3 )
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
-
- /*
- * This assumes the L3 cache is shared; it typically lives in
- * the northbridge. The L1 caches are included by the L2
- * cache, and so should not be included for the purpose of
- * SMP switching weights.
- */
- c->x86_cache_size = l2 ? l2 : (l1i+l1d);
- }
+ l2 = init_intel_cacheinfo(c);
/* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until model 3 mask 3 */
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
if ( p )
strcpy(c->x86_model_id, p);
-#ifdef CONFIG_X86_HT
- if (cpu_has(c, X86_FEATURE_HT)) {
- extern int phys_proc_id[NR_CPUS];
-
- u32 eax, ebx, ecx, edx;
- int index_lsb, index_msb, tmp;
- int cpu = smp_processor_id();
-
- cpuid(1, &eax, &ebx, &ecx, &edx);
- smp_num_siblings = (ebx & 0xff0000) >> 16;
-
- if (smp_num_siblings == 1) {
- printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
- } else if (smp_num_siblings > 1 ) {
- index_lsb = 0;
- index_msb = 31;
-
- if (smp_num_siblings > NR_CPUS) {
- printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
- smp_num_siblings = 1;
- goto too_many_siblings;
- }
- tmp = smp_num_siblings;
- while ((tmp & 1) == 0) {
- tmp >>=1 ;
- index_lsb++;
- }
- tmp = smp_num_siblings;
- while ((tmp & 0x80000000 ) == 0) {
- tmp <<=1 ;
- index_msb--;
- }
- if (index_lsb != index_msb )
- index_msb++;
- phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
-
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
- }
+ c->x86_max_cores = num_cpu_cores(c);
- }
-too_many_siblings:
-
-#endif
+ detect_ht(c);
/* Work around errata */
Intel_errata_workarounds(c);
}
#endif
- if (c->x86 == 15)
+ if (c->x86 == 15)
set_bit(X86_FEATURE_P4, c->x86_capability);
if (c->x86 == 6)
set_bit(X86_FEATURE_P3, c->x86_capability);
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
+ set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
}
return size;
}
-static struct cpu_dev intel_cpu_dev __initdata = {
+static struct cpu_dev intel_cpu_dev __devinitdata = {
.c_vendor = "Intel",
.c_ident = { "GenuineIntel" },
.c_models = {
return 0;
}
+#ifndef CONFIG_X86_CMPXCHG
+unsigned long cmpxchg_386_u8(volatile void *ptr, u8 old, u8 new)
+{
+ u8 prev;
+ unsigned long flags;
+
+ /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+ local_irq_save(flags);
+ prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = new;
+ local_irq_restore(flags);
+ return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u8);
+
+unsigned long cmpxchg_386_u16(volatile void *ptr, u16 old, u16 new)
+{
+ u16 prev;
+ unsigned long flags;
+
+ /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+ local_irq_save(flags);
+ prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = new;
+ local_irq_restore(flags);
+ return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u16);
+
+unsigned long cmpxchg_386_u32(volatile void *ptr, u32 old, u32 new)
+{
+ u32 prev;
+ unsigned long flags;
+
+ /* Poor man's cmpxchg for 386. Unsuitable for SMP */
+ local_irq_save(flags);
+ prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = new;
+ local_irq_restore(flags);
+ return prev;
+}
+EXPORT_SYMBOL(cmpxchg_386_u32);
+#endif
+
// arch_initcall(intel_cpu_init);