2 * X86-64 specific CPU setup.
3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog.
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
11 #include <linux/bootmem.h>
12 #include <linux/bitops.h>
13 #include <linux/module.h>
14 #include <asm/bootsetup.h>
16 #include <asm/pgtable.h>
17 #include <asm/processor.h>
19 #include <asm/atomic.h>
20 #include <asm/mmu_context.h>
23 #include <asm/percpu.h>
24 #include <asm/proto.h>
25 #include <asm/sections.h>
27 char x86_boot_params[BOOT_PARAM_SIZE] __initdata;
29 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
31 struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
32 EXPORT_SYMBOL(_cpu_pda);
33 struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
35 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
37 char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
39 unsigned long __supported_pte_mask __read_mostly = ~0UL;
40 EXPORT_SYMBOL(__supported_pte_mask);
41 static int do_not_nx __cpuinitdata = 0;
45 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
46 * Always point %gs to its beginning
48 void __init setup_per_cpu_areas(void)
53 #ifdef CONFIG_HOTPLUG_CPU
54 prefill_possible_map();
57 /* Copy section for each CPU (we discard the original) */
58 size = PERCPU_ENOUGH_ROOM;
60 printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", size);
61 for_each_cpu_mask (i, cpu_possible_map) {
64 if (!NODE_DATA(cpu_to_node(i))) {
65 printk("cpu with no node %d, num_online_nodes %d\n",
66 i, num_online_nodes());
67 ptr = alloc_bootmem(size);
69 ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
72 panic("Cannot allocate cpu data for CPU %d\n", i);
73 cpu_pda(i)->data_offset = ptr - __per_cpu_start;
74 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
78 void pda_init(int cpu)
80 struct x8664_pda *pda = cpu_pda(cpu);
82 /* Setup up data that may be needed in __get_free_pages early */
83 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
84 /* Memory clobbers used to order PDA accessed */
86 wrmsrl(MSR_GS_BASE, pda);
92 (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
93 pda->active_mm = &init_mm;
97 /* others are initialized in smpboot.c */
98 pda->pcurrent = &init_task;
99 pda->irqstackptr = boot_cpu_stack;
101 pda->irqstackptr = (char *)
102 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
103 if (!pda->irqstackptr)
104 panic("cannot allocate irqstack for cpu %d", cpu);
108 pda->irqstackptr += IRQSTACKSIZE-64;
111 char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
112 __attribute__((section(".bss.page_aligned")));
114 /* May not be marked __init: used by software suspend */
115 void syscall_init(void)
118 * LSTAR and STAR live in a bit strange symbiosis.
119 * They both write to the same internal register. STAR allows to set CS/DS
120 * but only a 32bit target. LSTAR sets the 64bit rip.
122 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
123 wrmsrl(MSR_LSTAR, system_call);
125 #ifdef CONFIG_IA32_EMULATION
126 syscall32_cpu_init ();
129 /* Flags to clear on syscall */
130 wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
133 void __cpuinit check_efer(void)
137 rdmsrl(MSR_EFER, efer);
138 if (!(efer & EFER_NX) || do_not_nx) {
139 __supported_pte_mask &= ~_PAGE_NX;
143 unsigned long kernel_eflags;
146 * cpu_init() initializes state that is per-CPU. Some data is already
147 * initialized (naturally) in the bootstrap process, such as the GDT
148 * and IDT. We reload them nevertheless, this function acts as a
149 * 'CPU state barrier', nothing should get across.
150 * A lot of state is already set up in PDA init.
152 void __cpuinit cpu_init (void)
154 int cpu = stack_smp_processor_id();
155 struct tss_struct *t = &per_cpu(init_tss, cpu);
156 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
158 char *estacks = NULL;
159 struct task_struct *me;
162 /* CPU 0 is initialised in head64.c */
165 zap_low_mappings(cpu);
167 estacks = boot_exception_stacks;
171 if (cpu_test_and_set(cpu, cpu_initialized))
172 panic("CPU#%d already initialized!\n", cpu);
174 printk("Initializing CPU#%d\n", cpu);
176 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
179 * Initialize the per-CPU GDT with the boot GDT,
180 * and set up the GDT descriptor:
183 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
185 cpu_gdt_descr[cpu].size = GDT_SIZE;
186 asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
187 asm volatile("lidt %0" :: "m" (idt_descr));
189 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
192 wrmsrl(MSR_FS_BASE, 0);
193 wrmsrl(MSR_KERNEL_GS_BASE, 0);
199 * set up and load the per-CPU TSS
201 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
202 static const unsigned int order[N_EXCEPTION_STACKS] = {
203 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
204 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
207 estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
209 panic("Cannot allocate exception stack %ld %d\n",
212 estacks += PAGE_SIZE << order[v];
213 orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
216 t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
218 * <= is required because the CPU will access up to
219 * 8 bits beyond the end of the IO permission bitmap.
221 for (i = 0; i <= IO_BITMAP_LONGS; i++)
222 t->io_bitmap[i] = ~0UL;
224 atomic_inc(&init_mm.mm_count);
225 me->active_mm = &init_mm;
228 enter_lazy_tlb(&init_mm, me);
230 set_tss_desc(cpu, t);
232 load_LDT(&init_mm.context);
235 * Clear all 6 debug registers:
238 set_debugreg(0UL, 0);
239 set_debugreg(0UL, 1);
240 set_debugreg(0UL, 2);
241 set_debugreg(0UL, 3);
242 set_debugreg(0UL, 6);
243 set_debugreg(0UL, 7);
247 raw_local_save_flags(kernel_eflags);