2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/config.h>
21 #include <linux/threads.h>
22 #include <asm/percpu.h>
24 /* flag for disabling the tsc */
25 extern int tsc_disable;
31 #define desc_empty(desc) \
32 (!((desc)->a + (desc)->b))
34 #define desc_equal(desc1, desc2) \
35 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
37 * Default implementation of macro that returns current
38 * instruction pointer ("program counter").
40 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
43 * CPU type and hardware bug flags. Kept separately for each CPU.
44 * Members of this structure are referenced in head.S, so think twice
45 * before touching them. [mj]
49 __u8 x86; /* CPU family */
50 __u8 x86_vendor; /* CPU vendor */
53 char wp_works_ok; /* It doesn't on 386's */
54 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
57 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
58 unsigned long x86_capability[NCAPINTS];
59 char x86_vendor_id[16];
60 char x86_model_id[64];
61 int x86_cache_size; /* in KB - valid for CPUS which support this
63 int x86_cache_alignment; /* In bytes */
67 unsigned long loops_per_jiffy;
68 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
70 #define X86_VENDOR_INTEL 0
71 #define X86_VENDOR_CYRIX 1
72 #define X86_VENDOR_AMD 2
73 #define X86_VENDOR_UMC 3
74 #define X86_VENDOR_NEXGEN 4
75 #define X86_VENDOR_CENTAUR 5
76 #define X86_VENDOR_RISE 6
77 #define X86_VENDOR_TRANSMETA 7
78 #define X86_VENDOR_NSC 8
79 #define X86_VENDOR_NUM 9
80 #define X86_VENDOR_UNKNOWN 0xff
83 * capabilities of CPUs
86 extern struct cpuinfo_x86 boot_cpu_data;
87 extern struct cpuinfo_x86 new_cpu_data;
88 extern struct tss_struct doublefault_tss;
89 DECLARE_PER_CPU(struct tss_struct, init_tss);
92 extern struct cpuinfo_x86 cpu_data[];
93 #define current_cpu_data cpu_data[smp_processor_id()]
95 #define cpu_data (&boot_cpu_data)
96 #define current_cpu_data boot_cpu_data
99 extern char ignore_fpu_irq;
101 extern void identify_cpu(struct cpuinfo_x86 *);
102 extern void print_cpu_info(struct cpuinfo_x86 *);
103 extern void dodgy_tsc(void);
108 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
109 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
110 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
111 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
112 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
113 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
114 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
115 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
116 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
117 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
118 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
119 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
120 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
121 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
122 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
123 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
124 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
127 * Generic CPUID function
129 static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
140 * CPUID functions returning a single datum
142 static inline unsigned int cpuid_eax(unsigned int op)
152 static inline unsigned int cpuid_ebx(unsigned int op)
154 unsigned int eax, ebx;
157 : "=a" (eax), "=b" (ebx)
162 static inline unsigned int cpuid_ecx(unsigned int op)
164 unsigned int eax, ecx;
167 : "=a" (eax), "=c" (ecx)
172 static inline unsigned int cpuid_edx(unsigned int op)
174 unsigned int eax, edx;
177 : "=a" (eax), "=d" (edx)
183 #define load_cr3(pgdir) \
184 asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
188 * Intel CPU features in CR4
190 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
191 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
192 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
193 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
194 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
195 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
196 #define X86_CR4_MCE 0x0040 /* Machine check enable */
197 #define X86_CR4_PGE 0x0080 /* enable global pages */
198 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
199 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
200 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
203 * Save the cr4 feature set we're using (ie
204 * Pentium 4MB enable and PPro Global page
205 * enable), so that any CPU's that boot up
206 * after us can get the correct flags.
208 extern unsigned long mmu_cr4_features;
210 static inline void set_in_cr4 (unsigned long mask)
212 mmu_cr4_features |= mask;
213 __asm__("movl %%cr4,%%eax\n\t"
220 static inline void clear_in_cr4 (unsigned long mask)
222 mmu_cr4_features &= ~mask;
223 __asm__("movl %%cr4,%%eax\n\t"
231 * NSC/Cyrix CPU configuration register indexes
234 #define CX86_PCR0 0x20
235 #define CX86_GCR 0xb8
236 #define CX86_CCR0 0xc0
237 #define CX86_CCR1 0xc1
238 #define CX86_CCR2 0xc2
239 #define CX86_CCR3 0xc3
240 #define CX86_CCR4 0xe8
241 #define CX86_CCR5 0xe9
242 #define CX86_CCR6 0xea
243 #define CX86_CCR7 0xeb
244 #define CX86_PCR1 0xf0
245 #define CX86_DIR0 0xfe
246 #define CX86_DIR1 0xff
247 #define CX86_ARR_BASE 0xc4
248 #define CX86_RCR_BASE 0xdc
251 * NSC/Cyrix CPU indexed register access macros
254 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
256 #define setCx86(reg, data) do { \
258 outb((data), 0x23); \
262 * Bus types (default is ISA, but people can check others with these..)
266 static inline void __monitor(const void *eax, unsigned long ecx,
269 /* "monitor %eax,%ecx,%edx;" */
271 ".byte 0x0f,0x01,0xc8;"
272 : :"a" (eax), "c" (ecx), "d"(edx));
275 static inline void __mwait(unsigned long eax, unsigned long ecx)
277 /* "mwait %eax,%ecx;" */
279 ".byte 0x0f,0x01,0xc9;"
280 : :"a" (eax), "c" (ecx));
283 /* from system description table in BIOS. Mostly for MCA use, but
284 others may find it useful. */
285 extern unsigned int machine_id;
286 extern unsigned int machine_submodel_id;
287 extern unsigned int BIOS_revision;
288 extern unsigned int mca_pentium_flag;
291 * User space process size: (3GB default).
293 #define __TASK_SIZE (__PAGE_OFFSET)
294 #define TASK_SIZE ((unsigned long)__TASK_SIZE)
296 /* This decides where the kernel will search for a free chunk of vm
297 * space during mmap's.
299 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
301 #define HAVE_ARCH_PICK_MMAP_LAYOUT
306 #define IO_BITMAP_BITS 65536
307 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
308 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
309 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
310 #define INVALID_IO_BITMAP_OFFSET 0x8000
311 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
313 struct i387_fsave_struct {
321 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
322 long status; /* software status information */
325 struct i387_fxsave_struct {
336 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
337 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
339 } __attribute__ ((aligned (16)));
341 struct i387_soft_struct {
349 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
350 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
352 unsigned long entry_eip;
356 struct i387_fsave_struct fsave;
357 struct i387_fxsave_struct fxsave;
358 struct i387_soft_struct soft;
365 struct thread_struct;
368 unsigned short back_link,__blh;
370 unsigned short ss0,__ss0h;
372 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
374 unsigned short ss2,__ss2h;
377 unsigned long eflags;
378 unsigned long eax,ecx,edx,ebx;
383 unsigned short es, __esh;
384 unsigned short cs, __csh;
385 unsigned short ss, __ssh;
386 unsigned short ds, __dsh;
387 unsigned short fs, __fsh;
388 unsigned short gs, __gsh;
389 unsigned short ldt, __ldth;
390 unsigned short trace, io_bitmap_base;
392 * The extra 1 is there because the CPU will access an
393 * additional byte beyond the end of the IO permission
394 * bitmap. The extra byte must be all 1 bits, and must
395 * be within the limit.
397 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
399 * Cache the current maximum and the last task that used the bitmap:
401 unsigned long io_bitmap_max;
402 struct thread_struct *io_bitmap_owner;
404 * pads the TSS to be cacheline-aligned (size is 0x100)
406 unsigned long __cacheline_filler[35];
408 * .. and then another 0x100 bytes for emergency kernel stack
410 unsigned long stack[64];
411 } __attribute__((packed));
413 #define ARCH_MIN_TASKALIGN 16
415 struct thread_struct {
416 /* cached TLS descriptors. */
417 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
419 unsigned long sysenter_cs;
424 /* Hardware debugging registers */
425 unsigned long debugreg[8]; /* %%db0-7 debug registers */
427 unsigned long cr2, trap_no, error_code;
428 /* floating point info */
429 union i387_union i387;
430 /* virtual 86 mode info */
431 struct vm86_struct __user * vm86_info;
432 unsigned long screen_bitmap;
433 unsigned long v86flags, v86mask, saved_esp0;
434 unsigned int saved_fs, saved_gs;
436 unsigned long *io_bitmap_ptr;
437 /* max allowed port in the bitmap, in bytes: */
438 unsigned long io_bitmap_max;
441 #define INIT_THREAD { \
443 .sysenter_cs = __KERNEL_CS, \
444 .io_bitmap_ptr = NULL, \
448 * Note that the .io_bitmap member must be extra-big. This is because
449 * the CPU will access an additional byte beyond the end of the IO
450 * permission bitmap. The extra byte must be all 1 bits, and must
451 * be within the limit.
454 .esp0 = sizeof(init_stack) + (long)&init_stack, \
455 .ss0 = __KERNEL_DS, \
456 .ss1 = __KERNEL_CS, \
457 .ldt = GDT_ENTRY_LDT, \
458 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
459 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
462 static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
464 tss->esp0 = thread->esp0;
465 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
466 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
467 tss->ss1 = thread->sysenter_cs;
468 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
472 #define start_thread(regs, new_eip, new_esp) do { \
473 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
475 regs->xds = __USER_DS; \
476 regs->xes = __USER_DS; \
477 regs->xss = __USER_DS; \
478 regs->xcs = __USER_CS; \
479 regs->eip = new_eip; \
480 regs->esp = new_esp; \
483 /* Forward declaration, a strange C thing */
487 /* Free all resources held by a thread. */
488 extern void release_thread(struct task_struct *);
490 /* Prepare to copy thread state - unlazy all lazy status */
491 extern void prepare_to_copy(struct task_struct *tsk);
494 * create a kernel thread without removing it from tasklists
496 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
498 extern unsigned long thread_saved_pc(struct task_struct *tsk);
499 void show_trace(struct task_struct *task, unsigned long *stack);
501 unsigned long get_wchan(struct task_struct *p);
503 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
504 #define KSTK_TOP(info) \
506 unsigned long *__ptr = (unsigned long *)(info); \
507 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
510 #define task_pt_regs(task) \
512 struct pt_regs *__regs__; \
513 __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
517 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
518 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
521 struct microcode_header {
529 unsigned int datasize;
530 unsigned int totalsize;
531 unsigned int reserved[3];
535 struct microcode_header hdr;
536 unsigned int bits[0];
539 typedef struct microcode microcode_t;
540 typedef struct microcode_header microcode_header_t;
542 /* microcode format is extended from prescott processors */
543 struct extended_signature {
549 struct extended_sigtable {
552 unsigned int reserved[3];
553 struct extended_signature sigs[0];
555 /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
556 #define MICROCODE_IOCFREE _IO('6',0)
558 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
559 static inline void rep_nop(void)
561 __asm__ __volatile__("rep;nop": : :"memory");
564 #define cpu_relax() rep_nop()
566 /* generic versions from gas */
567 #define GENERIC_NOP1 ".byte 0x90\n"
568 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
569 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
570 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
571 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
572 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
573 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
574 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
577 #define K8_NOP1 GENERIC_NOP1
578 #define K8_NOP2 ".byte 0x66,0x90\n"
579 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
580 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
581 #define K8_NOP5 K8_NOP3 K8_NOP2
582 #define K8_NOP6 K8_NOP3 K8_NOP3
583 #define K8_NOP7 K8_NOP4 K8_NOP3
584 #define K8_NOP8 K8_NOP4 K8_NOP4
587 /* uses eax dependencies (arbitary choice) */
588 #define K7_NOP1 GENERIC_NOP1
589 #define K7_NOP2 ".byte 0x8b,0xc0\n"
590 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
591 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
592 #define K7_NOP5 K7_NOP4 ASM_NOP1
593 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
594 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
595 #define K7_NOP8 K7_NOP7 ASM_NOP1
598 #define ASM_NOP1 K8_NOP1
599 #define ASM_NOP2 K8_NOP2
600 #define ASM_NOP3 K8_NOP3
601 #define ASM_NOP4 K8_NOP4
602 #define ASM_NOP5 K8_NOP5
603 #define ASM_NOP6 K8_NOP6
604 #define ASM_NOP7 K8_NOP7
605 #define ASM_NOP8 K8_NOP8
606 #elif defined(CONFIG_MK7)
607 #define ASM_NOP1 K7_NOP1
608 #define ASM_NOP2 K7_NOP2
609 #define ASM_NOP3 K7_NOP3
610 #define ASM_NOP4 K7_NOP4
611 #define ASM_NOP5 K7_NOP5
612 #define ASM_NOP6 K7_NOP6
613 #define ASM_NOP7 K7_NOP7
614 #define ASM_NOP8 K7_NOP8
616 #define ASM_NOP1 GENERIC_NOP1
617 #define ASM_NOP2 GENERIC_NOP2
618 #define ASM_NOP3 GENERIC_NOP3
619 #define ASM_NOP4 GENERIC_NOP4
620 #define ASM_NOP5 GENERIC_NOP5
621 #define ASM_NOP6 GENERIC_NOP6
622 #define ASM_NOP7 GENERIC_NOP7
623 #define ASM_NOP8 GENERIC_NOP8
626 #define ASM_NOP_MAX 8
628 /* Prefetch instructions for Pentium III and AMD Athlon */
629 /* It's not worth to care about 3dnow! prefetches for the K6
630 because they are microcoded there and very slow.
631 However we don't do prefetches for pre XP Athlons currently
632 That should be fixed. */
633 #define ARCH_HAS_PREFETCH
634 extern inline void prefetch(const void *x)
636 alternative_input(ASM_NOP4,
642 #define ARCH_HAS_PREFETCH
643 #define ARCH_HAS_PREFETCHW
644 #define ARCH_HAS_SPINLOCK_PREFETCH
646 /* 3dnow! prefetch to get an exclusive cache line. Useful for
647 spinlocks to avoid one state transition in the cache coherency protocol. */
648 extern inline void prefetchw(const void *x)
650 alternative_input(ASM_NOP4,
655 #define spin_lock_prefetch(x) prefetchw(x)
657 extern void select_idle_routine(const struct cpuinfo_x86 *c);
659 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
661 #endif /* __ASM_I386_PROCESSOR_H */