#include <linux/string.h>
#include <linux/delay.h>
#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include "cpu.h"
+DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+
static int cachesize_override __initdata = -1;
static int disable_x86_fxsr __initdata = 0;
static int disable_x86_serial_nr __initdata = 1;
}
-
-void __init get_cpu_vendor(struct cpuinfo_x86 *c)
+void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
{
char *v = c->x86_vendor_id;
int i;
(cpu_devs[i]->c_ident[1] &&
!strcmp(v,cpu_devs[i]->c_ident[1]))) {
c->x86_vendor = i;
- this_cpu = cpu_devs[i];
+ if (!early)
+ this_cpu = cpu_devs[i];
break;
}
}
return flag_is_changeable_p(X86_EFLAGS_ID);
}
+/* Do minimum CPU detection early.
+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+ The others are not touched to avoid unwanted side effects. */
+void __init early_cpu_detect(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ c->x86_cache_alignment = 32;
+
+ if (!have_cpuid_p())
+ return;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 1);
+
+ c->x86 = 4;
+ if (c->cpuid_level >= 0x00000001) {
+ u32 junk, tfms, cap0, misc;
+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ if (cap0 & (1<<19))
+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+ }
+
+ early_intel_workaround(c);
+}
+
void __init generic_identify(struct cpuinfo_x86 * c)
{
u32 tfms, xlvl;
(int *)&c->x86_vendor_id[8],
(int *)&c->x86_vendor_id[4]);
- get_cpu_vendor(c);
+ get_cpu_vendor(c, 0);
/* Initialize the standard set of capabilities */
/* Note that the vendor-specific code below might override */
generic_identify(c);
- printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
+ printk(KERN_DEBUG "CPU: After generic identify, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0],
c->x86_capability[1],
c->x86_capability[2],
if (this_cpu->c_identify) {
this_cpu->c_identify(c);
- printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
+ printk(KERN_DEBUG "CPU: After vendor identify, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0],
c->x86_capability[1],
c->x86_capability[2],
/* Now the feature flags better reflect actual CPU features! */
- printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
+ printk(KERN_DEBUG "CPU: After all inits, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0],
c->x86_capability[1],
c->x86_capability[2],
void __init dodgy_tsc(void)
{
- get_cpu_vendor(&boot_cpu_data);
if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
extern int rise_init_cpu(void);
extern int nexgen_init_cpu(void);
extern int umc_init_cpu(void);
+void early_cpu_detect(void);
void __init early_cpu_init(void)
{
rise_init_cpu();
nexgen_init_cpu();
umc_init_cpu();
+ early_cpu_detect();
#ifdef CONFIG_DEBUG_PAGEALLOC
/* pse is not compatible with on-the-fly unmapping,
void __init cpu_init (void)
{
int cpu = smp_processor_id();
- struct tss_struct * t = init_tss + cpu;
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
struct thread_struct *thread = ¤t->thread;
if (test_and_set_bit(cpu, &cpu_initialized)) {
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
- if (cpu) {
- memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
- cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
- cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
- }
+ memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table,
+ GDT_SIZE);
+ cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
+ cpu_gdt_descr[cpu].address =
+ (unsigned long)&per_cpu(cpu_gdt_table, cpu);
+
/*
* Set up the per-thread TLS descriptor cache:
*/
- memcpy(thread->tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
+ memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
+ GDT_ENTRY_TLS_ENTRIES * 8);
__asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
__asm__ __volatile__("lidt %0" : : "m" (idt_descr));
load_esp0(t, thread);
set_tss_desc(cpu,t);
- cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
load_TR_desc();
load_LDT(&init_mm.context);
/* Set up doublefault TSS pointer in the GDT */
__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
- cpu_gdt_table[cpu][GDT_ENTRY_DOUBLEFAULT_TSS].b &= 0xfffffdff;
/* Clear %fs and %gs. */
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");