2 * include/asm-i386/processor.h
4 * Copyright (C) 1994 Linus Torvalds
7 #ifndef __ASM_I386_PROCESSOR_H
8 #define __ASM_I386_PROCESSOR_H
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
14 #include <asm/types.h>
15 #include <asm/sigcontext.h>
16 #include <asm/cpufeature.h>
18 #include <asm/system.h>
19 #include <linux/cache.h>
20 #include <linux/config.h>
21 #include <linux/threads.h>
22 #include <asm/percpu.h>
23 #include <linux/cpumask.h>
24 #include <xen/interface/physdev.h>
26 /* flag for disabling the tsc */
27 extern int tsc_disable;
33 #define desc_empty(desc) \
34 (!((desc)->a | (desc)->b))
36 #define desc_equal(desc1, desc2) \
37 (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
39 * Default implementation of macro that returns current
40 * instruction pointer ("program counter").
42 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
45 * CPU type and hardware bug flags. Kept separately for each CPU.
46 * Members of this structure are referenced in head.S, so think twice
47 * before touching them. [mj]
51 __u8 x86; /* CPU family */
52 __u8 x86_vendor; /* CPU vendor */
55 char wp_works_ok; /* It doesn't on 386's */
56 char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
59 int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
60 unsigned long x86_capability[NCAPINTS];
61 char x86_vendor_id[16];
62 char x86_model_id[64];
63 int x86_cache_size; /* in KB - valid for CPUS which support this
65 int x86_cache_alignment; /* In bytes */
71 unsigned long loops_per_jiffy;
73 cpumask_t llc_shared_map; /* cpus sharing the last level cache */
75 unsigned char x86_max_cores; /* cpuid returned max cores value */
76 unsigned char booted_cores; /* number of cores as seen by OS */
78 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
80 #define X86_VENDOR_INTEL 0
81 #define X86_VENDOR_CYRIX 1
82 #define X86_VENDOR_AMD 2
83 #define X86_VENDOR_UMC 3
84 #define X86_VENDOR_NEXGEN 4
85 #define X86_VENDOR_CENTAUR 5
86 #define X86_VENDOR_RISE 6
87 #define X86_VENDOR_TRANSMETA 7
88 #define X86_VENDOR_NSC 8
89 #define X86_VENDOR_NUM 9
90 #define X86_VENDOR_UNKNOWN 0xff
93 * capabilities of CPUs
96 extern struct cpuinfo_x86 boot_cpu_data;
97 extern struct cpuinfo_x86 new_cpu_data;
98 #ifndef CONFIG_X86_NO_TSS
99 extern struct tss_struct doublefault_tss;
100 DECLARE_PER_CPU(struct tss_struct, init_tss);
104 extern struct cpuinfo_x86 cpu_data[];
105 #define current_cpu_data cpu_data[smp_processor_id()]
107 #define cpu_data (&boot_cpu_data)
108 #define current_cpu_data boot_cpu_data
111 extern int phys_proc_id[NR_CPUS];
112 extern int cpu_core_id[NR_CPUS];
113 extern int cpu_llc_id[NR_CPUS];
114 extern char ignore_fpu_irq;
116 extern void identify_cpu(struct cpuinfo_x86 *);
117 extern void print_cpu_info(struct cpuinfo_x86 *);
118 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
121 extern void detect_ht(struct cpuinfo_x86 *c);
123 static inline void detect_ht(struct cpuinfo_x86 *c) {}
129 #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
130 #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
131 #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
132 #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
133 #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
134 #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
135 #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
136 #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
137 #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
138 #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
139 #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
140 #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
141 #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
142 #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
143 #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
144 #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
145 #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
148 * Generic CPUID function
149 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
150 * resulting in stale register contents being returned.
152 static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
162 /* Some CPUID calls want 'count' to be placed in ecx */
163 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
171 : "0" (op), "c" (count));
175 * CPUID functions returning a single datum
177 static inline unsigned int cpuid_eax(unsigned int op)
187 static inline unsigned int cpuid_ebx(unsigned int op)
189 unsigned int eax, ebx;
192 : "=a" (eax), "=b" (ebx)
197 static inline unsigned int cpuid_ecx(unsigned int op)
199 unsigned int eax, ecx;
202 : "=a" (eax), "=c" (ecx)
207 static inline unsigned int cpuid_edx(unsigned int op)
209 unsigned int eax, edx;
212 : "=a" (eax), "=d" (edx)
218 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
221 * Intel CPU features in CR4
223 #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
224 #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
225 #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
226 #define X86_CR4_DE 0x0008 /* enable debugging extensions */
227 #define X86_CR4_PSE 0x0010 /* enable page size extensions */
228 #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
229 #define X86_CR4_MCE 0x0040 /* Machine check enable */
230 #define X86_CR4_PGE 0x0080 /* enable global pages */
231 #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
232 #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
233 #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
236 * Save the cr4 feature set we're using (ie
237 * Pentium 4MB enable and PPro Global page
238 * enable), so that any CPU's that boot up
239 * after us can get the correct flags.
241 extern unsigned long mmu_cr4_features;
243 static inline void set_in_cr4 (unsigned long mask)
246 mmu_cr4_features |= mask;
252 static inline void clear_in_cr4 (unsigned long mask)
255 mmu_cr4_features &= ~mask;
262 * NSC/Cyrix CPU configuration register indexes
265 #define CX86_PCR0 0x20
266 #define CX86_GCR 0xb8
267 #define CX86_CCR0 0xc0
268 #define CX86_CCR1 0xc1
269 #define CX86_CCR2 0xc2
270 #define CX86_CCR3 0xc3
271 #define CX86_CCR4 0xe8
272 #define CX86_CCR5 0xe9
273 #define CX86_CCR6 0xea
274 #define CX86_CCR7 0xeb
275 #define CX86_PCR1 0xf0
276 #define CX86_DIR0 0xfe
277 #define CX86_DIR1 0xff
278 #define CX86_ARR_BASE 0xc4
279 #define CX86_RCR_BASE 0xdc
282 * NSC/Cyrix CPU indexed register access macros
285 #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
287 #define setCx86(reg, data) do { \
289 outb((data), 0x23); \
292 /* Stop speculative execution */
293 static inline void sync_core(void)
296 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
299 static inline void __monitor(const void *eax, unsigned long ecx,
302 /* "monitor %eax,%ecx,%edx;" */
304 ".byte 0x0f,0x01,0xc8;"
305 : :"a" (eax), "c" (ecx), "d"(edx));
308 static inline void __mwait(unsigned long eax, unsigned long ecx)
310 /* "mwait %eax,%ecx;" */
312 ".byte 0x0f,0x01,0xc9;"
313 : :"a" (eax), "c" (ecx));
316 /* from system description table in BIOS. Mostly for MCA use, but
317 others may find it useful. */
318 extern unsigned int machine_id;
319 extern unsigned int machine_submodel_id;
320 extern unsigned int BIOS_revision;
321 extern unsigned int mca_pentium_flag;
323 /* Boot loader type from the setup header */
324 extern int bootloader_type;
327 * User space process size: 3GB (default).
329 #define TASK_SIZE (PAGE_OFFSET)
331 /* This decides where the kernel will search for a free chunk of vm
332 * space during mmap's.
334 #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
336 #define __HAVE_ARCH_ALIGN_STACK
338 #define HAVE_ARCH_PICK_MMAP_LAYOUT
343 #define IO_BITMAP_BITS 65536
344 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
345 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
346 #ifndef CONFIG_X86_NO_TSS
347 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
349 #define INVALID_IO_BITMAP_OFFSET 0x8000
350 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
352 struct i387_fsave_struct {
360 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
361 long status; /* software status information */
364 struct i387_fxsave_struct {
375 long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
376 long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
378 } __attribute__ ((aligned (16)));
380 struct i387_soft_struct {
388 long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
389 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
391 unsigned long entry_eip;
395 struct i387_fsave_struct fsave;
396 struct i387_fxsave_struct fxsave;
397 struct i387_soft_struct soft;
404 struct thread_struct;
406 #ifndef CONFIG_X86_NO_TSS
408 unsigned short back_link,__blh;
410 unsigned short ss0,__ss0h;
412 unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
414 unsigned short ss2,__ss2h;
417 unsigned long eflags;
418 unsigned long eax,ecx,edx,ebx;
423 unsigned short es, __esh;
424 unsigned short cs, __csh;
425 unsigned short ss, __ssh;
426 unsigned short ds, __dsh;
427 unsigned short fs, __fsh;
428 unsigned short gs, __gsh;
429 unsigned short ldt, __ldth;
430 unsigned short trace, io_bitmap_base;
432 * The extra 1 is there because the CPU will access an
433 * additional byte beyond the end of the IO permission
434 * bitmap. The extra byte must be all 1 bits, and must
435 * be within the limit.
437 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
439 * Cache the current maximum and the last task that used the bitmap:
441 unsigned long io_bitmap_max;
442 struct thread_struct *io_bitmap_owner;
444 * pads the TSS to be cacheline-aligned (size is 0x100)
446 unsigned long __cacheline_filler[35];
448 * .. and then another 0x100 bytes for emergency kernel stack
450 unsigned long stack[64];
451 } __attribute__((packed));
454 #define ARCH_MIN_TASKALIGN 16
456 struct thread_struct {
457 /* cached TLS descriptors. */
458 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
460 unsigned long sysenter_cs;
465 /* Hardware debugging registers */
466 unsigned long debugreg[8]; /* %%db0-7 debug registers */
468 unsigned long cr2, trap_no, error_code;
469 /* floating point info */
470 union i387_union i387;
471 /* virtual 86 mode info */
472 struct vm86_struct __user * vm86_info;
473 unsigned long screen_bitmap;
474 unsigned long v86flags, v86mask, saved_esp0;
475 unsigned int saved_fs, saved_gs;
477 unsigned long *io_bitmap_ptr;
479 /* max allowed port in the bitmap, in bytes: */
480 unsigned long io_bitmap_max;
483 #define INIT_THREAD { \
485 .sysenter_cs = __KERNEL_CS, \
486 .io_bitmap_ptr = NULL, \
489 #ifndef CONFIG_X86_NO_TSS
491 * Note that the .io_bitmap member must be extra-big. This is because
492 * the CPU will access an additional byte beyond the end of the IO
493 * permission bitmap. The extra byte must be all 1 bits, and must
494 * be within the limit.
497 .esp0 = sizeof(init_stack) + (long)&init_stack, \
498 .ss0 = __KERNEL_DS, \
499 .ss1 = __KERNEL_CS, \
500 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
501 .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
504 static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
506 tss->esp0 = thread->esp0;
507 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
508 if (unlikely(tss->ss1 != thread->sysenter_cs)) {
509 tss->ss1 = thread->sysenter_cs;
510 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
513 #define load_esp0(tss, thread) \
514 __load_esp0(tss, thread)
516 #define load_esp0(tss, thread) \
517 HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)
520 #define start_thread(regs, new_eip, new_esp) do { \
521 __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
523 regs->xds = __USER_DS; \
524 regs->xes = __USER_DS; \
525 regs->xss = __USER_DS; \
526 regs->xcs = __USER_CS; \
527 regs->eip = new_eip; \
528 regs->esp = new_esp; \
530 load_user_cs_desc(smp_processor_id(), current->mm); \
535 * These special macros can be used to get or set a debugging register
537 #define get_debugreg(var, register) \
538 (var) = HYPERVISOR_get_debugreg((register))
539 #define set_debugreg(value, register) \
540 HYPERVISOR_set_debugreg((register), (value))
543 * Set IOPL bits in EFLAGS from given mask
545 static inline void set_iopl_mask(unsigned mask)
547 struct physdev_set_iopl set_iopl;
549 /* Force the change at ring 0. */
550 set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
551 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
554 /* Forward declaration, a strange C thing */
558 /* Free all resources held by a thread. */
559 extern void release_thread(struct task_struct *);
561 /* Prepare to copy thread state - unlazy all lazy status */
562 extern void prepare_to_copy(struct task_struct *tsk);
565 * create a kernel thread without removing it from tasklists
567 extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
569 extern unsigned long thread_saved_pc(struct task_struct *tsk);
570 void show_trace(struct task_struct *task, unsigned long *stack);
572 unsigned long get_wchan(struct task_struct *p);
574 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
575 #define KSTK_TOP(info) \
577 unsigned long *__ptr = (unsigned long *)(info); \
578 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
582 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
583 * This is necessary to guarantee that the entire "struct pt_regs"
584 * is accessable even if the CPU haven't stored the SS/ESP registers
585 * on the stack (interrupt gate does not save these registers
586 * when switching to the same priv ring).
587 * Therefore beware: accessing the xss/esp fields of the
588 * "struct pt_regs" is possible, but they may contain the
589 * completely wrong values.
591 #define task_pt_regs(task) \
593 struct pt_regs *__regs__; \
594 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
598 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
599 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
602 struct microcode_header {
610 unsigned int datasize;
611 unsigned int totalsize;
612 unsigned int reserved[3];
616 struct microcode_header hdr;
617 unsigned int bits[0];
620 typedef struct microcode microcode_t;
621 typedef struct microcode_header microcode_header_t;
623 /* microcode format is extended from prescott processors */
624 struct extended_signature {
630 struct extended_sigtable {
633 unsigned int reserved[3];
634 struct extended_signature sigs[0];
637 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
638 static inline void rep_nop(void)
640 __asm__ __volatile__("rep;nop": : :"memory");
643 #define cpu_relax() rep_nop()
645 /* generic versions from gas */
646 #define GENERIC_NOP1 ".byte 0x90\n"
647 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
648 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
649 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
650 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
651 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
652 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
653 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
656 #define K8_NOP1 GENERIC_NOP1
657 #define K8_NOP2 ".byte 0x66,0x90\n"
658 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
659 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
660 #define K8_NOP5 K8_NOP3 K8_NOP2
661 #define K8_NOP6 K8_NOP3 K8_NOP3
662 #define K8_NOP7 K8_NOP4 K8_NOP3
663 #define K8_NOP8 K8_NOP4 K8_NOP4
666 /* uses eax dependencies (arbitary choice) */
667 #define K7_NOP1 GENERIC_NOP1
668 #define K7_NOP2 ".byte 0x8b,0xc0\n"
669 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
670 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
671 #define K7_NOP5 K7_NOP4 ASM_NOP1
672 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
673 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
674 #define K7_NOP8 K7_NOP7 ASM_NOP1
677 #define ASM_NOP1 K8_NOP1
678 #define ASM_NOP2 K8_NOP2
679 #define ASM_NOP3 K8_NOP3
680 #define ASM_NOP4 K8_NOP4
681 #define ASM_NOP5 K8_NOP5
682 #define ASM_NOP6 K8_NOP6
683 #define ASM_NOP7 K8_NOP7
684 #define ASM_NOP8 K8_NOP8
685 #elif defined(CONFIG_MK7)
686 #define ASM_NOP1 K7_NOP1
687 #define ASM_NOP2 K7_NOP2
688 #define ASM_NOP3 K7_NOP3
689 #define ASM_NOP4 K7_NOP4
690 #define ASM_NOP5 K7_NOP5
691 #define ASM_NOP6 K7_NOP6
692 #define ASM_NOP7 K7_NOP7
693 #define ASM_NOP8 K7_NOP8
695 #define ASM_NOP1 GENERIC_NOP1
696 #define ASM_NOP2 GENERIC_NOP2
697 #define ASM_NOP3 GENERIC_NOP3
698 #define ASM_NOP4 GENERIC_NOP4
699 #define ASM_NOP5 GENERIC_NOP5
700 #define ASM_NOP6 GENERIC_NOP6
701 #define ASM_NOP7 GENERIC_NOP7
702 #define ASM_NOP8 GENERIC_NOP8
705 #define ASM_NOP_MAX 8
707 /* Prefetch instructions for Pentium III and AMD Athlon */
708 /* It's not worth to care about 3dnow! prefetches for the K6
709 because they are microcoded there and very slow.
710 However we don't do prefetches for pre XP Athlons currently
711 That should be fixed. */
712 #define ARCH_HAS_PREFETCH
713 static inline void prefetch(const void *x)
715 alternative_input(ASM_NOP4,
721 #define ARCH_HAS_PREFETCH
722 #define ARCH_HAS_PREFETCHW
723 #define ARCH_HAS_SPINLOCK_PREFETCH
725 /* 3dnow! prefetch to get an exclusive cache line. Useful for
726 spinlocks to avoid one state transition in the cache coherency protocol. */
727 static inline void prefetchw(const void *x)
729 alternative_input(ASM_NOP4,
734 #define spin_lock_prefetch(x) prefetchw(x)
736 extern void select_idle_routine(const struct cpuinfo_x86 *c);
738 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
740 extern unsigned long boot_option_idle_override;
741 extern void enable_sep_cpu(void);
742 extern int sysenter_setup(void);
745 extern void mtrr_ap_init(void);
746 extern void mtrr_bp_init(void);
748 #define mtrr_ap_init() do {} while (0)
749 #define mtrr_bp_init() do {} while (0)
752 #ifdef CONFIG_X86_MCE
753 extern void mcheck_init(struct cpuinfo_x86 *c);
755 #define mcheck_init(c) do {} while(0)
758 #endif /* __ASM_I386_PROCESSOR_H */