2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
16 #include <asm/segment.h>
19 #include <asm/cache.h>
21 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
22 * because we need identity-mapped pages on setup so define __START_KERNEL to
23 * 0x100000 for this stage
29 /* %bx: 1 if coming from smp trampoline on secondary cpu */
33 * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
34 * paging disabled and the point of this file is to switch to 64bit
35 * long mode with a kernel mapping for kerneland to jump into the
36 * kernel virtual addresses.
37 * There is no stack until we set one up.
40 movl %ebx,%ebp /* Save trampoline flag */
42 movl $__KERNEL_DS,%eax
45 /* If the CPU doesn't support CPUID this will double fault.
46 * Unfortunately it is hard to check for CPUID without a stack.
49 /* Check if extended functions are implemented */
50 movl $0x80000000, %eax
52 cmpl $0x80000000, %eax
54 /* Check if long mode is implemented */
63 * Prepare for entering 64bits mode
66 /* Enable PAE mode and PGE */
72 /* Setup early boot stage 4 level pagetables */
73 movl $(init_level4_pgt - __START_KERNEL_map), %eax
76 /* Setup EFER (Extended Feature Enable Register) */
80 /* Enable Long Mode */
82 /* Enable System Call */
85 /* No Execute supported? */
91 /* Make changes effective */
95 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
96 btsl $0, %eax /* Enable protected mode */
97 btsl $1, %eax /* Enable MP */
98 btsl $4, %eax /* Enable ET */
99 btsl $5, %eax /* Enable NE */
100 btsl $16, %eax /* Enable WP */
101 btsl $18, %eax /* Enable AM */
102 /* Make changes effective */
104 jmp reach_compatibility_mode
105 reach_compatibility_mode:
108 * At this point we're in long mode but in 32bit compatibility mode
109 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
110 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
111 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
114 testw %bp,%bp /* secondary CPU? */
117 /* Load new GDT with the 64bit segment using 32bit descriptor */
118 movl $(pGDT32 - __START_KERNEL_map), %eax
122 movl $(ljumpvector - __START_KERNEL_map), %eax
123 /* Finally jump in 64bit mode */
129 movq init_rsp(%rip),%rsp
131 /* zero EFLAGS after setting rsp */
136 * We must switch to a new descriptor in kernel space for the GDT
137 * because soon the kernel won't have access anymore to the userspace
138 * addresses where we're currently running on. We have to do that here
139 * because in 32bit we couldn't load a 64bit linear address.
144 * Setup up a dummy PDA. this is just for some early bootup code
145 * that does in_interrupt()
147 movl $MSR_GS_BASE,%ecx
148 movq $empty_zero_page,%rax
153 /* set up data segments. actually 0 would do too */
154 movl $__KERNEL_DS,%eax
159 /* esi is pointer to real mode structure with interesting info.
163 /* Finally jump to run C code and to be on real kernel address
164 * Since we are running on identity-mapped space we have to jump
165 * to the full 64bit address , this is only possible as indirect
168 movq initial_code(%rip),%rax
171 /* SMP bootup changes these two */
174 .quad x86_64_start_kernel
177 .quad init_thread_union+THREAD_SIZE-8
179 ENTRY(early_idt_handler)
181 movq 8(%rsp),%rsi # get rip
184 leaq early_idt_msg(%rip),%rdi
190 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
194 /* This isn't an x86-64 CPU so hang */
201 .word gdt32_end-gdt_table32
202 .long gdt_table32-__START_KERNEL_map
206 .long reach_long64-__START_KERNEL_map
213 * This default setting generates an ident mapping at address 0x100000
214 * and a mapping for the kernel that precisely maps virtual address
215 * 0xffffffff80000000 to physical address 0x000000. (always using
216 * 2Mbyte large pages provided by PAE mode)
219 ENTRY(init_level4_pgt)
220 .quad 0x0000000000102007 /* -> level3_ident_pgt */
222 .quad 0x000000000010a007
224 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
225 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
228 ENTRY(level3_ident_pgt)
229 .quad 0x0000000000104007
233 ENTRY(level3_kernel_pgt)
235 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
236 .quad 0x0000000000105007 /* -> level2_kernel_pgt */
240 ENTRY(level2_ident_pgt)
241 /* 40MB for bootup. */
242 .quad 0x0000000000000283
243 .quad 0x0000000000200183
244 .quad 0x0000000000400183
245 .quad 0x0000000000600183
246 .quad 0x0000000000800183
247 .quad 0x0000000000A00183
248 .quad 0x0000000000C00183
249 .quad 0x0000000000E00183
250 .quad 0x0000000001000183
251 .quad 0x0000000001200183
252 .quad 0x0000000001400183
253 .quad 0x0000000001600183
254 .quad 0x0000000001800183
255 .quad 0x0000000001A00183
256 .quad 0x0000000001C00183
257 .quad 0x0000000001E00183
258 .quad 0x0000000002000183
259 .quad 0x0000000002200183
260 .quad 0x0000000002400183
261 .quad 0x0000000002600183
262 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
263 .globl temp_boot_pmds
268 ENTRY(level2_kernel_pgt)
269 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
270 When you change this change KERNEL_TEXT_SIZE in page.h too. */
271 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
272 .quad 0x0000000000000183
273 .quad 0x0000000000200183
274 .quad 0x0000000000400183
275 .quad 0x0000000000600183
276 .quad 0x0000000000800183
277 .quad 0x0000000000A00183
278 .quad 0x0000000000C00183
279 .quad 0x0000000000E00183
280 .quad 0x0000000001000183
281 .quad 0x0000000001200183
282 .quad 0x0000000001400183
283 .quad 0x0000000001600183
284 .quad 0x0000000001800183
285 .quad 0x0000000001A00183
286 .quad 0x0000000001C00183
287 .quad 0x0000000001E00183
288 .quad 0x0000000002000183
289 .quad 0x0000000002200183
290 .quad 0x0000000002400183
291 .quad 0x0000000002600183
292 /* Module mapping starts here */
296 ENTRY(empty_zero_page)
299 ENTRY(empty_bad_page)
302 ENTRY(empty_bad_pte_table)
305 ENTRY(empty_bad_pmd_table)
308 ENTRY(level3_physmem_pgt)
309 .quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
312 #ifdef CONFIG_ACPI_SLEEP
313 ENTRY(wakeup_level4_pgt)
314 .quad 0x0000000000102007 /* -> level3_ident_pgt */
316 .quad 0x000000000010a007
318 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
319 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
327 .word gdt_end-cpu_gdt_table
338 .quad 0x0000000000000000 /* This one is magic */
339 .quad 0x0000000000000000 /* unused */
340 .quad 0x00af9a000000ffff /* __KERNEL_CS */
343 /* We need valid kernel segments for data and code in long mode too
344 * IRET will check the segment types kkeil 2000/10/28
345 * Also sysret mandates a special GDT layout
348 .align L1_CACHE_BYTES
350 /* The TLS descriptors are currently at a different place compared to i386.
351 Hopefully nobody expects them at a fixed place (Wine?) */
354 .quad 0x0000000000000000 /* NULL descriptor */
355 .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
356 .quad 0x00af9a000000ffff /* __KERNEL_CS */
357 .quad 0x00cf92000000ffff /* __KERNEL_DS */
358 .quad 0x00cffa000000ffff /* __USER32_CS */
359 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
360 .quad 0x00affa000000ffff /* __USER_CS */
361 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
364 .quad 0,0,0 /* three TLS descriptors */
365 .quad 0 /* unused now */
366 .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
367 /* base must be patched for real base address. */
369 /* asm/segment.h:GDT_ENTRIES must match this */
370 /* This should be a multiple of the cache line size */
371 /* GDTs of other CPUs: */
372 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
374 .align L1_CACHE_BYTES