2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/config.h>
21 #include <linux/threads.h>
23 /* flag for disabling the tsc */
24 extern int tsc_disable;
30 #define desc_empty(desc) \
31 (!((desc)->a + (desc)->b))
33 #define desc_equal(desc1, desc2) \
34 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
36 * Default implementation of macro that returns current
37 * instruction pointer ("program counter").
39 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
42 * CPU type and hardware bug flags. Kept separately for each CPU.
43 * Members of this structure are referenced in head.S, so think twice
44 * before touching them. [mj]
48 __u8 x86; /* CPU family */
49 __u8 x86_vendor; /* CPU vendor */
52 char wp_works_ok; /* It doesn't on 386's */
53 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
56 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
57 unsigned long x86_capability[NCAPINTS];
58 char x86_vendor_id[16];
59 char x86_model_id[64];
60 int x86_cache_size; /* in KB - valid for CPUS which support this
62 int x86_cache_alignment; /* In bytes */
66 unsigned long loops_per_jiffy;
67 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
69 #define X86_VENDOR_INTEL 0
70 #define X86_VENDOR_CYRIX 1
71 #define X86_VENDOR_AMD 2
72 #define X86_VENDOR_UMC 3
73 #define X86_VENDOR_NEXGEN 4
74 #define X86_VENDOR_CENTAUR 5
75 #define X86_VENDOR_RISE 6
76 #define X86_VENDOR_TRANSMETA 7
77 #define X86_VENDOR_NSC 8
78 #define X86_VENDOR_NUM 9
79 #define X86_VENDOR_UNKNOWN 0xff
82 * capabilities of CPUs
85 extern struct cpuinfo_x86 boot_cpu_data;
86 extern struct cpuinfo_x86 new_cpu_data;
87 extern struct tss_struct init_tss[NR_CPUS];
88 extern struct tss_struct doublefault_tss;
91 extern struct cpuinfo_x86 cpu_data[];
92 #define current_cpu_data cpu_data[smp_processor_id()]
94 #define cpu_data (&boot_cpu_data)
95 #define current_cpu_data boot_cpu_data
98 extern char ignore_fpu_irq;
100 extern void identify_cpu(struct cpuinfo_x86 *);
101 extern void print_cpu_info(struct cpuinfo_x86 *);
102 extern void dodgy_tsc(void);
107 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
108 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
109 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
110 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
111 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
112 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
113 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
114 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
115 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
116 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
117 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
118 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
119 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
120 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
121 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
122 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
123 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
126 * Generic CPUID function
128 static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
139 * CPUID functions returning a single datum
141 static inline unsigned int cpuid_eax(unsigned int op)
151 static inline unsigned int cpuid_ebx(unsigned int op)
153 unsigned int eax, ebx;
156 : "=a" (eax), "=b" (ebx)
161 static inline unsigned int cpuid_ecx(unsigned int op)
163 unsigned int eax, ecx;
166 : "=a" (eax), "=c" (ecx)
171 static inline unsigned int cpuid_edx(unsigned int op)
173 unsigned int eax, edx;
176 : "=a" (eax), "=d" (edx)
182 #define load_cr3(pgdir) \
183 asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
187 * Intel CPU features in CR4
189 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
190 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
191 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
192 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
193 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
194 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
195 #define X86_CR4_MCE 0x0040 /* Machine check enable */
196 #define X86_CR4_PGE 0x0080 /* enable global pages */
197 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
198 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
199 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
202 * Save the cr4 feature set we're using (ie
203 * Pentium 4MB enable and PPro Global page
204 * enable), so that any CPU's that boot up
205 * after us can get the correct flags.
207 extern unsigned long mmu_cr4_features;
209 static inline void set_in_cr4 (unsigned long mask)
211 mmu_cr4_features |= mask;
212 __asm__("movl %%cr4,%%eax\n\t"
219 static inline void clear_in_cr4 (unsigned long mask)
221 mmu_cr4_features &= ~mask;
222 __asm__("movl %%cr4,%%eax\n\t"
230 * NSC/Cyrix CPU configuration register indexes
233 #define CX86_PCR0 0x20
234 #define CX86_GCR 0xb8
235 #define CX86_CCR0 0xc0
236 #define CX86_CCR1 0xc1
237 #define CX86_CCR2 0xc2
238 #define CX86_CCR3 0xc3
239 #define CX86_CCR4 0xe8
240 #define CX86_CCR5 0xe9
241 #define CX86_CCR6 0xea
242 #define CX86_CCR7 0xeb
243 #define CX86_PCR1 0xf0
244 #define CX86_DIR0 0xfe
245 #define CX86_DIR1 0xff
246 #define CX86_ARR_BASE 0xc4
247 #define CX86_RCR_BASE 0xdc
250 * NSC/Cyrix CPU indexed register access macros
253 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
255 #define setCx86(reg, data) do { \
257 outb((data), 0x23); \
261 * Bus types (default is ISA, but people can check others with these..)
262 * pc98 indicates PC98 systems (CBUS)
265 #ifdef CONFIG_X86_PC9800
271 static inline void __monitor(const void *eax, unsigned long ecx,
274 /* "monitor %eax,%ecx,%edx;" */
276 ".byte 0x0f,0x01,0xc8;"
277 : :"a" (eax), "c" (ecx), "d"(edx));
280 static inline void __mwait(unsigned long eax, unsigned long ecx)
282 /* "mwait %eax,%ecx;" */
284 ".byte 0x0f,0x01,0xc9;"
285 : :"a" (eax), "c" (ecx));
288 /* from system description table in BIOS. Mostly for MCA use, but
289 others may find it useful. */
290 extern unsigned int machine_id;
291 extern unsigned int machine_submodel_id;
292 extern unsigned int BIOS_revision;
293 extern unsigned int mca_pentium_flag;
295 /* This decides where the kernel will search for a free chunk of vm
296 * space during mmap's.
298 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
300 #define SHLIB_BASE 0x00111000
302 #define __HAVE_ARCH_ALIGN_STACK
303 extern unsigned long arch_align_stack(unsigned long sp);
305 #define __HAVE_ARCH_MMAP_TOP
306 extern unsigned long mmap_top(void);
309 * Size of io_bitmap, covering ports 0 to 0x3ff.
311 #define IO_BITMAP_BITS 1024
312 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
313 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
314 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
315 #define INVALID_IO_BITMAP_OFFSET 0x8000
317 struct i387_fsave_struct {
325 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
326 long status; /* software status information */
329 struct i387_fxsave_struct {
340 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
341 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
343 } __attribute__ ((aligned (16)));
345 struct i387_soft_struct {
353 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
354 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
356 unsigned long entry_eip;
360 struct i387_fsave_struct fsave;
361 struct i387_fxsave_struct fxsave;
362 struct i387_soft_struct soft;
370 unsigned short back_link,__blh;
372 unsigned short ss0,__ss0h;
374 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
376 unsigned short ss2,__ss2h;
379 unsigned long eflags;
380 unsigned long eax,ecx,edx,ebx;
385 unsigned short es, __esh;
386 unsigned short cs, __csh;
387 unsigned short ss, __ssh;
388 unsigned short ds, __dsh;
389 unsigned short fs, __fsh;
390 unsigned short gs, __gsh;
391 unsigned short ldt, __ldth;
392 unsigned short trace, io_bitmap_base;
394 * The extra 1 is there because the CPU will access an
395 * additional byte beyond the end of the IO permission
396 * bitmap. The extra byte must be all 1 bits, and must
397 * be within the limit.
399 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
401 * pads the TSS to be cacheline-aligned (size is 0x100)
403 unsigned long __cacheline_filler[5];
405 * .. and then another 0x100 bytes for emergency kernel stack
407 unsigned long stack[64];
408 } __attribute__((packed));
410 #define ARCH_MIN_TASKALIGN 16
413 #define STACK_PAGE_COUNT (4096/PAGE_SIZE)
418 struct thread_struct {
419 /* cached TLS descriptors. */
420 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
421 void *stack_page[STACK_PAGE_COUNT];
423 unsigned long sysenter_cs;
428 /* Hardware debugging registers */
429 unsigned long debugreg[8]; /* %%db0-7 debug registers */
431 unsigned long cr2, trap_no, error_code;
432 /* floating point info */
433 union i387_union i387;
434 /* virtual 86 mode info */
435 struct vm86_struct __user * vm86_info;
436 unsigned long screen_bitmap;
437 unsigned long v86flags, v86mask, saved_esp0;
438 unsigned int saved_fs, saved_gs;
440 unsigned long *io_bitmap_ptr;
443 #define INIT_THREAD { \
445 .sysenter_cs = __KERNEL_CS, \
446 .io_bitmap_ptr = NULL, \
450 * Note that the .io_bitmap member must be extra-big. This is because
451 * the CPU will access an additional byte beyond the end of the IO
452 * permission bitmap. The extra byte must be all 1 bits, and must
453 * be within the limit.
456 .esp0 = sizeof(init_stack) + (long)&init_stack, \
457 .ss0 = __KERNEL_DS, \
458 .esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \
459 .ss1 = __KERNEL_CS, \
460 .ldt = GDT_ENTRY_LDT, \
461 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
462 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
466 load_esp0(struct tss_struct *tss, struct thread_struct *thread)
468 tss->esp0 = thread->esp0;
469 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
470 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
471 tss->ss1 = thread->sysenter_cs;
472 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
476 #define start_thread(regs, new_eip, new_esp) do { \
477 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
479 regs->xds = __USER_DS; \
480 regs->xes = __USER_DS; \
481 regs->xss = __USER_DS; \
482 regs->xcs = __USER_CS; \
483 regs->eip = new_eip; \
484 regs->esp = new_esp; \
485 load_user_cs_desc(smp_processor_id(), current->mm); \
488 /* Forward declaration, a strange C thing */
492 /* Free all resources held by a thread. */
493 extern void release_thread(struct task_struct *);
495 /* Prepare to copy thread state - unlazy all lazy status */
496 extern void prepare_to_copy(struct task_struct *tsk);
499 * create a kernel thread without removing it from tasklists
501 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
503 #ifdef CONFIG_X86_HIGH_ENTRY
504 #define virtual_esp0(tsk) \
505 ((unsigned long)(tsk)->thread_info->virtual_stack + ((tsk)->thread.esp0 - (unsigned long)(tsk)->thread_info->real_stack))
507 # define virtual_esp0(tsk) ((tsk)->thread.esp0)
510 #define load_virtual_esp0(tss, task) \
512 tss->esp0 = virtual_esp0(task); \
513 if (likely(cpu_has_sep) && unlikely(tss->ss1 != task->thread.sysenter_cs)) { \
514 tss->ss1 = task->thread.sysenter_cs; \
515 wrmsr(MSR_IA32_SYSENTER_CS, \
516 task->thread.sysenter_cs, 0); \
520 extern unsigned long thread_saved_pc(struct task_struct *tsk);
521 void show_trace(struct task_struct *task, unsigned long *stack);
523 unsigned long get_wchan(struct task_struct *p);
525 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
526 #define KSTK_TOP(info) \
528 unsigned long *__ptr = (unsigned long *)(info); \
529 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
532 #define task_pt_regs(task) \
534 struct pt_regs *__regs__; \
535 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
539 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
540 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
543 struct microcode_header {
551 unsigned int datasize;
552 unsigned int totalsize;
553 unsigned int reserved[3];
557 struct microcode_header hdr;
558 unsigned int bits[0];
561 typedef struct microcode microcode_t;
562 typedef struct microcode_header microcode_header_t;
564 /* microcode format is extended from prescott processors */
565 struct extended_signature {
571 struct extended_sigtable {
574 unsigned int reserved[3];
575 struct extended_signature sigs[0];
577 /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
578 #define MICROCODE_IOCFREE _IO('6',0)
580 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
581 static inline void rep_nop(void)
583 __asm__ __volatile__("rep;nop": : :"memory");
586 #define cpu_relax() rep_nop()
588 /* generic versions from gas */
589 #define GENERIC_NOP1 ".byte 0x90\n"
590 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
591 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
592 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
593 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
594 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
595 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
596 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
599 #define K8_NOP1 GENERIC_NOP1
600 #define K8_NOP2 ".byte 0x66,0x90\n"
601 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
602 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
603 #define K8_NOP5 K8_NOP3 K8_NOP2
604 #define K8_NOP6 K8_NOP3 K8_NOP3
605 #define K8_NOP7 K8_NOP4 K8_NOP3
606 #define K8_NOP8 K8_NOP4 K8_NOP4
609 /* uses eax dependencies (arbitary choice) */
610 #define K7_NOP1 GENERIC_NOP1
611 #define K7_NOP2 ".byte 0x8b,0xc0\n"
612 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
613 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
614 #define K7_NOP5 K7_NOP4 ASM_NOP1
615 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
616 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
617 #define K7_NOP8 K7_NOP7 ASM_NOP1
620 #define ASM_NOP1 K8_NOP1
621 #define ASM_NOP2 K8_NOP2
622 #define ASM_NOP3 K8_NOP3
623 #define ASM_NOP4 K8_NOP4
624 #define ASM_NOP5 K8_NOP5
625 #define ASM_NOP6 K8_NOP6
626 #define ASM_NOP7 K8_NOP7
627 #define ASM_NOP8 K8_NOP8
628 #elif defined(CONFIG_MK7)
629 #define ASM_NOP1 K7_NOP1
630 #define ASM_NOP2 K7_NOP2
631 #define ASM_NOP3 K7_NOP3
632 #define ASM_NOP4 K7_NOP4
633 #define ASM_NOP5 K7_NOP5
634 #define ASM_NOP6 K7_NOP6
635 #define ASM_NOP7 K7_NOP7
636 #define ASM_NOP8 K7_NOP8
638 #define ASM_NOP1 GENERIC_NOP1
639 #define ASM_NOP2 GENERIC_NOP2
640 #define ASM_NOP3 GENERIC_NOP3
641 #define ASM_NOP4 GENERIC_NOP4
642 #define ASM_NOP5 GENERIC_NOP5
643 #define ASM_NOP6 GENERIC_NOP6
644 #define ASM_NOP7 GENERIC_NOP7
645 #define ASM_NOP8 GENERIC_NOP8
648 #define ASM_NOP_MAX 8
650 /* Prefetch instructions for Pentium III and AMD Athlon */
651 /* It's not worth to care about 3dnow! prefetches for the K6
652 because they are microcoded there and very slow.
653 However we don't do prefetches for pre XP Athlons currently
654 That should be fixed. */
655 #define ARCH_HAS_PREFETCH
656 extern inline void prefetch(const void *x)
658 alternative_input(ASM_NOP4,
664 #define ARCH_HAS_PREFETCH
665 #define ARCH_HAS_PREFETCHW
666 #define ARCH_HAS_SPINLOCK_PREFETCH
668 /* 3dnow! prefetch to get an exclusive cache line. Useful for
669 spinlocks to avoid one state transition in the cache coherency protocol. */
670 extern inline void prefetchw(const void *x)
672 alternative_input(ASM_NOP4,
677 #define spin_lock_prefetch(x) prefetchw(x)
679 extern void select_idle_routine(const struct cpuinfo_x86 *c);
681 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
683 #ifdef CONFIG_SCHED_SMT
684 #define ARCH_HAS_SCHED_DOMAIN
685 #define ARCH_HAS_SCHED_WAKE_IDLE
688 #endif /* __ASM_I386_PROCESSOR_H */