2 * include/asm-x86_64/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_X86_64_PROCESSOR_H
8 #define __ASM_X86_64_PROCESSOR_H
10 #include <asm/segment.h>
12 #include <asm/types.h>
13 #include <asm/sigcontext.h>
14 #include <asm/cpufeature.h>
15 #include <linux/config.h>
16 #include <linux/threads.h>
18 #include <asm/current.h>
19 #include <asm/system.h>
20 #include <asm/mmsegment.h>
21 #include <linux/personality.h>
23 #define TF_MASK 0x00000100
24 #define IF_MASK 0x00000200
25 #define IOPL_MASK 0x00003000
26 #define NT_MASK 0x00004000
27 #define VM_MASK 0x00020000
28 #define AC_MASK 0x00040000
29 #define VIF_MASK 0x00080000 /* virtual interrupt flag */
30 #define VIP_MASK 0x00100000 /* virtual interrupt pending */
31 #define ID_MASK 0x00200000
33 #define desc_empty(desc) \
34 (!((desc)->a + (desc)->b))
36 #define desc_equal(desc1, desc2) \
37 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
40 * Default implementation of macro that returns current
41 * instruction pointer ("program counter").
43 #define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
46 * CPU type and hardware bug flags. Kept separately for each CPU.
47 * Members of this structure are referenced in head.S, so think twice
48 * before touching them. [mj]
52 __u8 x86; /* CPU family */
53 __u8 x86_vendor; /* CPU vendor */
56 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
57 __u32 x86_capability[NCAPINTS];
58 char x86_vendor_id[16];
59 char x86_model_id[64];
60 int x86_cache_size; /* in KB */
62 int x86_cache_alignment;
63 int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
64 __u8 x86_virt_bits, x86_phys_bits;
66 unsigned long loops_per_jiffy;
67 } ____cacheline_aligned;
69 #define X86_VENDOR_INTEL 0
70 #define X86_VENDOR_CYRIX 1
71 #define X86_VENDOR_AMD 2
72 #define X86_VENDOR_UMC 3
73 #define X86_VENDOR_NEXGEN 4
74 #define X86_VENDOR_CENTAUR 5
75 #define X86_VENDOR_RISE 6
76 #define X86_VENDOR_TRANSMETA 7
77 #define X86_VENDOR_NUM 8
78 #define X86_VENDOR_UNKNOWN 0xff
80 extern struct cpuinfo_x86 boot_cpu_data;
81 extern struct tss_struct init_tss[NR_CPUS];
84 extern struct cpuinfo_x86 cpu_data[];
85 #define current_cpu_data cpu_data[smp_processor_id()]
87 #define cpu_data &boot_cpu_data
88 #define current_cpu_data boot_cpu_data
91 extern char ignore_irq13;
93 extern void identify_cpu(struct cpuinfo_x86 *);
94 extern void print_cpu_info(struct cpuinfo_x86 *);
95 extern void dodgy_tsc(void);
100 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
101 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
102 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
103 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
104 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
105 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
106 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
107 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
108 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
109 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
110 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
111 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
112 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
113 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
114 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
115 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
116 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
119 * Intel CPU features in CR4
121 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
122 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
123 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
124 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
125 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
126 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
127 #define X86_CR4_MCE 0x0040 /* Machine check enable */
128 #define X86_CR4_PGE 0x0080 /* enable global pages */
129 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
130 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
131 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
134 * Save the cr4 feature set we're using (ie
135 * Pentium 4MB enable and PPro Global page
136 * enable), so that any CPU's that boot up
137 * after us can get the correct flags.
139 extern unsigned long mmu_cr4_features;
141 static inline void set_in_cr4 (unsigned long mask)
143 mmu_cr4_features |= mask;
144 __asm__("movq %%cr4,%%rax\n\t"
151 static inline void clear_in_cr4 (unsigned long mask)
153 mmu_cr4_features &= ~mask;
154 __asm__("movq %%cr4,%%rax\n\t"
165 #define MCA_bus__is_a_macro
169 * User space process size: 512GB - 1GB (default).
171 #define TASK_SIZE (0x0000007fc0000000)
173 /* This decides where the kernel will search for a free chunk of vm
174 * space during mmap's.
176 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
177 #define TASK_UNMAPPED_32 PAGE_ALIGN(IA32_PAGE_OFFSET/3)
178 #define TASK_UNMAPPED_64 PAGE_ALIGN(TASK_SIZE/3)
179 #define TASK_UNMAPPED_BASE \
180 (test_thread_flag(TIF_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
183 * Size of io_bitmap, covering ports 0 to 0x3ff.
185 #define IO_BITMAP_BITS 1024
186 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
187 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
188 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
189 #define INVALID_IO_BITMAP_OFFSET 0x8000
191 struct i387_fxsave_struct {
200 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
201 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
203 } __attribute__ ((aligned (16)));
206 struct i387_fxsave_struct fxsave;
221 * The extra 1 is there because the CPU will access an
222 * additional byte beyond the end of the IO permission
223 * bitmap. The extra byte must be all 1 bits, and must
224 * be within the limit. Thus we have:
226 * 128 bytes, the bitmap itself, for ports 0..0x3ff
227 * 8 bytes, for an extra "long" of ~0UL
229 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
230 } __attribute__((packed)) ____cacheline_aligned;
232 struct thread_struct {
235 unsigned long userrsp; /* Copy from PDA */
238 unsigned short es, ds, fsindex, gsindex;
239 /* Hardware debugging registers */
240 unsigned long debugreg0;
241 unsigned long debugreg1;
242 unsigned long debugreg2;
243 unsigned long debugreg3;
244 unsigned long debugreg6;
245 unsigned long debugreg7;
247 unsigned long cr2, trap_no, error_code;
248 /* floating point info */
249 union i387_union i387;
250 /* IO permissions. the bitmap could be moved into the GDT, that would make
251 switch faster for a limited number of ioperm using tasks. -AK */
253 unsigned long *io_bitmap_ptr;
254 /* cached TLS descriptors. */
255 u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
258 #define INIT_THREAD {}
261 { &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
263 #define STACKFAULT_STACK 1
264 #define DOUBLEFAULT_STACK 2
266 #define DEBUG_STACK 4
268 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
269 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
270 #define EXCEPTION_STACK_ORDER 0
272 #define start_thread(regs,new_rip,new_rsp) do { \
273 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
275 (regs)->rip = (new_rip); \
276 (regs)->rsp = (new_rsp); \
277 write_pda(oldrsp, (new_rsp)); \
278 (regs)->cs = __USER_CS; \
279 (regs)->ss = __USER_DS; \
280 (regs)->eflags = 0x200; \
287 /* Free all resources held by a thread. */
288 extern void release_thread(struct task_struct *);
290 /* Prepare to copy thread state - unlazy all lazy status */
291 extern void prepare_to_copy(struct task_struct *tsk);
294 * create a kernel thread without removing it from tasklists
296 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
299 * Return saved PC of a blocked thread.
300 * What is this good for? it will be always the scheduler or ret_from_fork.
302 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
304 extern unsigned long get_wchan(struct task_struct *p);
305 #define KSTK_EIP(tsk) \
306 (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
307 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
310 struct microcode_header {
318 unsigned int datasize;
319 unsigned int totalsize;
320 unsigned int reserved[3];
324 struct microcode_header hdr;
325 unsigned int bits[0];
328 typedef struct microcode microcode_t;
329 typedef struct microcode_header microcode_header_t;
331 /* microcode format is extended from prescott processors */
332 struct extended_signature {
338 struct extended_sigtable {
341 unsigned int reserved[3];
342 struct extended_signature sigs[0];
345 /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
346 #define MICROCODE_IOCFREE _IO('6',0)
349 #define ASM_NOP1 K8_NOP1
350 #define ASM_NOP2 K8_NOP2
351 #define ASM_NOP3 K8_NOP3
352 #define ASM_NOP4 K8_NOP4
353 #define ASM_NOP5 K8_NOP5
354 #define ASM_NOP6 K8_NOP6
355 #define ASM_NOP7 K8_NOP7
356 #define ASM_NOP8 K8_NOP8
359 #define K8_NOP1 ".byte 0x90\n"
360 #define K8_NOP2 ".byte 0x66,0x90\n"
361 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
362 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
363 #define K8_NOP5 K8_NOP3 K8_NOP2
364 #define K8_NOP6 K8_NOP3 K8_NOP3
365 #define K8_NOP7 K8_NOP4 K8_NOP3
366 #define K8_NOP8 K8_NOP4 K8_NOP4
368 #define ASM_NOP_MAX 8
370 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
371 extern inline void rep_nop(void)
373 __asm__ __volatile__("rep;nop": : :"memory");
376 /* Stop speculative execution */
377 extern inline void sync_core(void)
380 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
383 #define cpu_has_fpu 1
385 #define ARCH_HAS_PREFETCH
386 static inline void prefetch(void *x)
388 asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
391 #define ARCH_HAS_PREFETCHW 1
392 static inline void prefetchw(void *x)
394 alternative_input(ASM_NOP5,
400 #define ARCH_HAS_SPINLOCK_PREFETCH 1
402 #define spin_lock_prefetch(x) prefetchw(x)
404 #define cpu_relax() rep_nop()
407 * NSC/Cyrix CPU configuration register indexes
409 #define CX86_CCR0 0xc0
410 #define CX86_CCR1 0xc1
411 #define CX86_CCR2 0xc2
412 #define CX86_CCR3 0xc3
413 #define CX86_CCR4 0xe8
414 #define CX86_CCR5 0xe9
415 #define CX86_CCR6 0xea
416 #define CX86_CCR7 0xeb
417 #define CX86_DIR0 0xfe
418 #define CX86_DIR1 0xff
419 #define CX86_ARR_BASE 0xc4
420 #define CX86_RCR_BASE 0xdc
423 * NSC/Cyrix CPU indexed register access macros
426 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
428 #define setCx86(reg, data) do { \
430 outb((data), 0x23); \
433 static inline void __monitor(const void *eax, unsigned long ecx,
436 /* "monitor %eax,%ecx,%edx;" */
438 ".byte 0x0f,0x01,0xc8;"
439 : :"a" (eax), "c" (ecx), "d"(edx));
442 static inline void __mwait(unsigned long eax, unsigned long ecx)
444 /* "mwait %eax,%ecx;" */
446 ".byte 0x0f,0x01,0xc9;"
447 : :"a" (eax), "c" (ecx));
450 #define stack_current() \
452 struct thread_info *ti; \
453 asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
457 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
459 #endif /* __ASM_X86_64_PROCESSOR_H */