2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/config.h>
21 #include <linux/threads.h>
23 /* flag for disabling the tsc */
24 extern int tsc_disable;
30 #define desc_empty(desc) \
31 (!((desc)->a + (desc)->b))
33 #define desc_equal(desc1, desc2) \
34 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
36 * Default implementation of macro that returns current
37 * instruction pointer ("program counter").
39 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
42 * CPU type and hardware bug flags. Kept separately for each CPU.
43 * Members of this structure are referenced in head.S, so think twice
44 * before touching them. [mj]
48 __u8 x86; /* CPU family */
49 __u8 x86_vendor; /* CPU vendor */
52 char wp_works_ok; /* It doesn't on 386's */
53 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
56 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
57 unsigned long x86_capability[NCAPINTS];
58 char x86_vendor_id[16];
59 char x86_model_id[64];
60 int x86_cache_size; /* in KB - valid for CPUS which support this
62 int x86_cache_alignment; /* In bytes */
66 unsigned long loops_per_jiffy;
67 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
69 #define X86_VENDOR_INTEL 0
70 #define X86_VENDOR_CYRIX 1
71 #define X86_VENDOR_AMD 2
72 #define X86_VENDOR_UMC 3
73 #define X86_VENDOR_NEXGEN 4
74 #define X86_VENDOR_CENTAUR 5
75 #define X86_VENDOR_RISE 6
76 #define X86_VENDOR_TRANSMETA 7
77 #define X86_VENDOR_NSC 8
78 #define X86_VENDOR_NUM 9
79 #define X86_VENDOR_UNKNOWN 0xff
82 * capabilities of CPUs
85 extern struct cpuinfo_x86 boot_cpu_data;
86 extern struct cpuinfo_x86 new_cpu_data;
87 extern struct tss_struct init_tss[NR_CPUS];
88 extern struct tss_struct doublefault_tss;
91 extern struct cpuinfo_x86 cpu_data[];
92 #define current_cpu_data cpu_data[smp_processor_id()]
94 #define cpu_data (&boot_cpu_data)
95 #define current_cpu_data boot_cpu_data
98 extern char ignore_fpu_irq;
100 extern void identify_cpu(struct cpuinfo_x86 *);
101 extern void print_cpu_info(struct cpuinfo_x86 *);
102 extern void dodgy_tsc(void);
107 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
108 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
109 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
110 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
111 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
112 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
113 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
114 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
115 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
116 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
117 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
118 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
119 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
120 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
121 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
122 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
123 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
126 * Generic CPUID function
128 static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
139 * CPUID functions returning a single datum
141 static inline unsigned int cpuid_eax(unsigned int op)
151 static inline unsigned int cpuid_ebx(unsigned int op)
153 unsigned int eax, ebx;
156 : "=a" (eax), "=b" (ebx)
161 static inline unsigned int cpuid_ecx(unsigned int op)
163 unsigned int eax, ecx;
166 : "=a" (eax), "=c" (ecx)
171 static inline unsigned int cpuid_edx(unsigned int op)
173 unsigned int eax, edx;
176 : "=a" (eax), "=d" (edx)
182 #define load_cr3(pgdir) \
183 asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
187 * Intel CPU features in CR4
189 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
190 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
191 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
192 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
193 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
194 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
195 #define X86_CR4_MCE 0x0040 /* Machine check enable */
196 #define X86_CR4_PGE 0x0080 /* enable global pages */
197 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
198 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
199 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
202 * Save the cr4 feature set we're using (ie
203 * Pentium 4MB enable and PPro Global page
204 * enable), so that any CPU's that boot up
205 * after us can get the correct flags.
207 extern unsigned long mmu_cr4_features;
209 static inline void set_in_cr4 (unsigned long mask)
211 mmu_cr4_features |= mask;
212 __asm__("movl %%cr4,%%eax\n\t"
219 static inline void clear_in_cr4 (unsigned long mask)
221 mmu_cr4_features &= ~mask;
222 __asm__("movl %%cr4,%%eax\n\t"
230 * NSC/Cyrix CPU configuration register indexes
233 #define CX86_PCR0 0x20
234 #define CX86_GCR 0xb8
235 #define CX86_CCR0 0xc0
236 #define CX86_CCR1 0xc1
237 #define CX86_CCR2 0xc2
238 #define CX86_CCR3 0xc3
239 #define CX86_CCR4 0xe8
240 #define CX86_CCR5 0xe9
241 #define CX86_CCR6 0xea
242 #define CX86_CCR7 0xeb
243 #define CX86_PCR1 0xf0
244 #define CX86_DIR0 0xfe
245 #define CX86_DIR1 0xff
246 #define CX86_ARR_BASE 0xc4
247 #define CX86_RCR_BASE 0xdc
250 * NSC/Cyrix CPU indexed register access macros
253 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
255 #define setCx86(reg, data) do { \
257 outb((data), 0x23); \
261 * Bus types (default is ISA, but people can check others with these..)
262 * pc98 indicates PC98 systems (CBUS)
265 #ifdef CONFIG_X86_PC9800
271 static inline void __monitor(const void *eax, unsigned long ecx,
274 /* "monitor %eax,%ecx,%edx;" */
276 ".byte 0x0f,0x01,0xc8;"
277 : :"a" (eax), "c" (ecx), "d"(edx));
280 static inline void __mwait(unsigned long eax, unsigned long ecx)
282 /* "mwait %eax,%ecx;" */
284 ".byte 0x0f,0x01,0xc9;"
285 : :"a" (eax), "c" (ecx));
288 /* from system description table in BIOS. Mostly for MCA use, but
289 others may find it useful. */
290 extern unsigned int machine_id;
291 extern unsigned int machine_submodel_id;
292 extern unsigned int BIOS_revision;
293 extern unsigned int mca_pentium_flag;
296 * User space process size: 3GB (default).
298 #define TASK_SIZE (PAGE_OFFSET)
300 /* This decides where the kernel will search for a free chunk of vm
301 * space during mmap's.
303 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
306 * Size of io_bitmap, covering ports 0 to 0x3ff.
308 #define IO_BITMAP_BITS 1024
309 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
310 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
311 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
312 #define INVALID_IO_BITMAP_OFFSET 0x8000
314 struct i387_fsave_struct {
322 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
323 long status; /* software status information */
326 struct i387_fxsave_struct {
337 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
338 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
340 } __attribute__ ((aligned (16)));
342 struct i387_soft_struct {
350 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
351 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
353 unsigned long entry_eip;
357 struct i387_fsave_struct fsave;
358 struct i387_fxsave_struct fxsave;
359 struct i387_soft_struct soft;
367 unsigned short back_link,__blh;
369 unsigned short ss0,__ss0h;
371 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
373 unsigned short ss2,__ss2h;
376 unsigned long eflags;
377 unsigned long eax,ecx,edx,ebx;
382 unsigned short es, __esh;
383 unsigned short cs, __csh;
384 unsigned short ss, __ssh;
385 unsigned short ds, __dsh;
386 unsigned short fs, __fsh;
387 unsigned short gs, __gsh;
388 unsigned short ldt, __ldth;
389 unsigned short trace, io_bitmap_base;
391 * The extra 1 is there because the CPU will access an
392 * additional byte beyond the end of the IO permission
393 * bitmap. The extra byte must be all 1 bits, and must
394 * be within the limit.
396 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
398 * pads the TSS to be cacheline-aligned (size is 0x100)
400 unsigned long __cacheline_filler[5];
402 * .. and then another 0x100 bytes for emergency kernel stack
404 unsigned long stack[64];
405 } __attribute__((packed));
407 #define ARCH_MIN_TASKALIGN 16
409 struct thread_struct {
410 /* cached TLS descriptors. */
411 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
413 unsigned long sysenter_cs;
418 /* Hardware debugging registers */
419 unsigned long debugreg[8]; /* %%db0-7 debug registers */
421 unsigned long cr2, trap_no, error_code;
422 /* floating point info */
423 union i387_union i387;
424 /* virtual 86 mode info */
425 struct vm86_struct __user * vm86_info;
426 unsigned long screen_bitmap;
427 unsigned long v86flags, v86mask, saved_esp0;
428 unsigned int saved_fs, saved_gs;
430 unsigned long *io_bitmap_ptr;
433 #define INIT_THREAD { \
435 .sysenter_cs = __KERNEL_CS, \
436 .io_bitmap_ptr = NULL, \
440 * Note that the .io_bitmap member must be extra-big. This is because
441 * the CPU will access an additional byte beyond the end of the IO
442 * permission bitmap. The extra byte must be all 1 bits, and must
443 * be within the limit.
446 .esp0 = sizeof(init_stack) + (long)&init_stack, \
447 .ss0 = __KERNEL_DS, \
448 .esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \
449 .ss1 = __KERNEL_CS, \
450 .ldt = GDT_ENTRY_LDT, \
451 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
452 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
455 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
457 tss->esp0 = thread->esp0;
458 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
459 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
460 tss->ss1 = thread->sysenter_cs;
461 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
465 #define start_thread(regs, new_eip, new_esp) do { \
466 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
468 regs->xds = __USER_DS; \
469 regs->xes = __USER_DS; \
470 regs->xss = __USER_DS; \
471 regs->xcs = __USER_CS; \
472 regs->eip = new_eip; \
473 regs->esp = new_esp; \
476 /* Forward declaration, a strange C thing */
480 /* Free all resources held by a thread. */
481 extern void release_thread(struct task_struct *);
483 /* Prepare to copy thread state - unlazy all lazy status */
484 extern void prepare_to_copy(struct task_struct *tsk);
487 * create a kernel thread without removing it from tasklists
489 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
491 extern unsigned long thread_saved_pc(struct task_struct *tsk);
492 void show_trace(struct task_struct *task, unsigned long *stack);
494 unsigned long get_wchan(struct task_struct *p);
496 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
497 #define KSTK_TOP(info) \
499 unsigned long *__ptr = (unsigned long *)(info); \
500 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
503 #define task_pt_regs(task) \
505 struct pt_regs *__regs__; \
506 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
510 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
511 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
514 struct microcode_header {
522 unsigned int datasize;
523 unsigned int totalsize;
524 unsigned int reserved[3];
528 struct microcode_header hdr;
529 unsigned int bits[0];
532 typedef struct microcode microcode_t;
533 typedef struct microcode_header microcode_header_t;
535 /* microcode format is extended from prescott processors */
536 struct extended_signature {
542 struct extended_sigtable {
545 unsigned int reserved[3];
546 struct extended_signature sigs[0];
548 /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
549 #define MICROCODE_IOCFREE _IO('6',0)
551 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
552 static inline void rep_nop(void)
554 __asm__ __volatile__("rep;nop": : :"memory");
557 #define cpu_relax() rep_nop()
559 /* generic versions from gas */
560 #define GENERIC_NOP1 ".byte 0x90\n"
561 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
562 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
563 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
564 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
565 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
566 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
567 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
570 #define K8_NOP1 GENERIC_NOP1
571 #define K8_NOP2 ".byte 0x66,0x90\n"
572 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
573 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
574 #define K8_NOP5 K8_NOP3 K8_NOP2
575 #define K8_NOP6 K8_NOP3 K8_NOP3
576 #define K8_NOP7 K8_NOP4 K8_NOP3
577 #define K8_NOP8 K8_NOP4 K8_NOP4
580 /* uses eax dependencies (arbitary choice) */
581 #define K7_NOP1 GENERIC_NOP1
582 #define K7_NOP2 ".byte 0x8b,0xc0\n"
583 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
584 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
585 #define K7_NOP5 K7_NOP4 ASM_NOP1
586 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
587 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
588 #define K7_NOP8 K7_NOP7 ASM_NOP1
591 #define ASM_NOP1 K8_NOP1
592 #define ASM_NOP2 K8_NOP2
593 #define ASM_NOP3 K8_NOP3
594 #define ASM_NOP4 K8_NOP4
595 #define ASM_NOP5 K8_NOP5
596 #define ASM_NOP6 K8_NOP6
597 #define ASM_NOP7 K8_NOP7
598 #define ASM_NOP8 K8_NOP8
599 #elif defined(CONFIG_MK7)
600 #define ASM_NOP1 K7_NOP1
601 #define ASM_NOP2 K7_NOP2
602 #define ASM_NOP3 K7_NOP3
603 #define ASM_NOP4 K7_NOP4
604 #define ASM_NOP5 K7_NOP5
605 #define ASM_NOP6 K7_NOP6
606 #define ASM_NOP7 K7_NOP7
607 #define ASM_NOP8 K7_NOP8
609 #define ASM_NOP1 GENERIC_NOP1
610 #define ASM_NOP2 GENERIC_NOP2
611 #define ASM_NOP3 GENERIC_NOP3
612 #define ASM_NOP4 GENERIC_NOP4
613 #define ASM_NOP5 GENERIC_NOP5
614 #define ASM_NOP6 GENERIC_NOP6
615 #define ASM_NOP7 GENERIC_NOP7
616 #define ASM_NOP8 GENERIC_NOP8
619 #define ASM_NOP_MAX 8
621 /* Prefetch instructions for Pentium III and AMD Athlon */
622 /* It's not worth to care about 3dnow! prefetches for the K6
623 because they are microcoded there and very slow.
624 However we don't do prefetches for pre XP Athlons currently
625 That should be fixed. */
626 #define ARCH_HAS_PREFETCH
627 extern inline void prefetch(const void *x)
629 alternative_input(ASM_NOP4,
635 #define ARCH_HAS_PREFETCH
636 #define ARCH_HAS_PREFETCHW
637 #define ARCH_HAS_SPINLOCK_PREFETCH
639 /* 3dnow! prefetch to get an exclusive cache line. Useful for
640 spinlocks to avoid one state transition in the cache coherency protocol. */
641 extern inline void prefetchw(const void *x)
643 alternative_input(ASM_NOP4,
648 #define spin_lock_prefetch(x) prefetchw(x)
650 extern void select_idle_routine(const struct cpuinfo_x86 *c);
652 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
654 #ifdef CONFIG_SCHED_SMT
655 #define ARCH_HAS_SCHED_DOMAIN
656 #define ARCH_HAS_SCHED_WAKE_IDLE
659 #endif /* __ASM_I386_PROCESSOR_H */