X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fprocessor.h;h=e6712d64e3f89538f5e79ccc8960207345fb3206;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=6fcdb7d1d493d5adef2470365d54054388941473;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 6fcdb7d1d..e6712d64e 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include /* flag for disabling the tsc */ extern int tsc_disable; @@ -28,7 +30,7 @@ struct desc_struct { }; #define desc_empty(desc) \ - (!((desc)->a + (desc)->b)) + (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) @@ -60,10 +62,18 @@ struct cpuinfo_x86 { int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ - int fdiv_bug; - int f00f_bug; - int coma_bug; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; + int x86_power; unsigned long loops_per_jiffy; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif + unsigned char x86_max_cores; /* cpuid returned max cores value */ + unsigned char booted_cores; /* number of cores as seen by OS */ + unsigned char apicid; } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -84,8 +94,8 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 new_cpu_data; -extern struct tss_struct init_tss[NR_CPUS]; extern struct tss_struct doublefault_tss; +DECLARE_PER_CPU(struct tss_struct, init_tss); #ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; @@ -95,11 +105,20 @@ extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data boot_cpu_data #endif +extern int phys_proc_id[NR_CPUS]; +extern int cpu_core_id[NR_CPUS]; +extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); -extern void dodgy_tsc(void); +extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); + +#ifdef CONFIG_X86_HT +extern void detect_ht(struct cpuinfo_x86 *c); +#else +static inline void detect_ht(struct cpuinfo_x86 *c) {} +#endif /* * EFLAGS bits @@ -124,15 +143,29 @@ extern void dodgy_tsc(void); /* * Generic CPUID function + * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx + * resulting in stale register contents being returned. */ -static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx) +static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) +{ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (op), "c"(0)); +} + +/* Some CPUID calls want 'count' to be placed in ecx */ +static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, + int *edx) { __asm__("cpuid" : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) - : "0" (op)); + : "0" (op), "c" (count)); } /* @@ -179,9 +212,7 @@ static inline unsigned int cpuid_edx(unsigned int op) return edx; } -#define load_cr3(pgdir) \ - asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))) - +#define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 @@ -208,22 +239,20 @@ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { + unsigned cr4; mmu_cr4_features |= mask; - __asm__("movl %%cr4,%%eax\n\t" - "orl %0,%%eax\n\t" - "movl %%eax,%%cr4\n" - : : "irg" (mask) - :"ax"); + cr4 = read_cr4(); + cr4 |= mask; + write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { + unsigned cr4; mmu_cr4_features &= ~mask; - __asm__("movl %%cr4,%%eax\n\t" - "andl %0,%%eax\n\t" - "movl %%eax,%%cr4\n" - : : "irg" (~mask) - :"ax"); + cr4 = read_cr4(); + cr4 &= ~mask; + write_cr4(cr4); } /* @@ -257,10 +286,12 @@ static inline void clear_in_cr4 (unsigned long mask) outb((data), 0x23); \ } while (0) -/* - * Bus types (default is ISA, but people can check others with these..) - */ -extern int MCA_bus; +/* Stop speculative execution */ +static inline void sync_core(void) +{ + int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); +} static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) @@ -286,6 +317,9 @@ extern unsigned int machine_submodel_id; extern unsigned int BIOS_revision; extern unsigned int mca_pentium_flag; +/* Boot loader type from the setup header */ +extern int bootloader_type; + /* * User space process size: 3GB (default). */ @@ -294,7 +328,12 @@ extern unsigned int mca_pentium_flag; /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) + +#define __HAVE_ARCH_ALIGN_STACK +extern unsigned long arch_align_stack(unsigned long sp); + +#define HAVE_ARCH_PICK_MMAP_LAYOUT /* * Size of io_bitmap. @@ -304,6 +343,7 @@ extern unsigned int mca_pentium_flag; #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 +#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000 struct i387_fsave_struct { long cwd; @@ -357,6 +397,8 @@ typedef struct { unsigned long seg; } mm_segment_t; +struct thread_struct; + struct tss_struct { unsigned short back_link,__blh; unsigned long esp0; @@ -388,10 +430,15 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; + /* + * Cache the current maximum and the last task that used the bitmap: + */ + unsigned long io_bitmap_max; + struct thread_struct *io_bitmap_owner; /* * pads the TSS to be cacheline-aligned (size is 0x100) */ - unsigned long __cacheline_filler[37]; + unsigned long __cacheline_filler[35]; /* * .. and then another 0x100 bytes for emergency kernel stack */ @@ -422,6 +469,9 @@ struct thread_struct { unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; + unsigned long iopl; +/* max allowed port in the bitmap, in bytes: */ + unsigned long io_bitmap_max; }; #define INIT_THREAD { \ @@ -439,9 +489,7 @@ struct thread_struct { #define INIT_TSS { \ .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ - .esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \ .ss1 = __KERNEL_CS, \ - .ldt = GDT_ENTRY_LDT, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } @@ -465,8 +513,38 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ + preempt_disable(); \ + load_user_cs_desc(smp_processor_id(), current->mm); \ + preempt_enable(); \ } while (0) +/* + * These special macros can be used to get or set a debugging register + */ +#define get_debugreg(var, register) \ + __asm__("movl %%db" #register ", %0" \ + :"=r" (var)) +#define set_debugreg(value, register) \ + __asm__("movl %0,%%db" #register \ + : /* no output */ \ + :"r" (value)) + +/* + * Set IOPL bits in EFLAGS from given mask + */ +static inline void set_iopl_mask(unsigned mask) +{ + unsigned int reg; + __asm__ __volatile__ ("pushfl;" + "popl %0;" + "andl %1, %0;" + "orl %2, %0;" + "pushl %0;" + "popfl" + : "=&r" (reg) + : "i" (~X86_EFLAGS_IOPL), "r" (mask)); +} + /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; @@ -494,10 +572,20 @@ unsigned long get_wchan(struct task_struct *p); (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) +/* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * This is necessary to guarantee that the entire "struct pt_regs" + * is accessable even if the CPU haven't stored the SS/ESP registers + * on the stack (interrupt gate does not save these registers + * when switching to the same priv ring). + * Therefore beware: accessing the xss/esp fields of the + * "struct pt_regs" is possible, but they may contain the + * completely wrong values. + */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ + __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) @@ -539,8 +627,6 @@ struct extended_sigtable { unsigned int reserved[3]; struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) @@ -618,7 +704,7 @@ static inline void rep_nop(void) However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH -extern inline void prefetch(const void *x) +static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", @@ -632,7 +718,7 @@ extern inline void prefetch(const void *x) /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ -extern inline void prefetchw(const void *x) +static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", @@ -645,9 +731,22 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) -#ifdef CONFIG_SCHED_SMT -#define ARCH_HAS_SCHED_DOMAIN -#define ARCH_HAS_SCHED_WAKE_IDLE +extern unsigned long boot_option_idle_override; +extern void enable_sep_cpu(void); +extern int sysenter_setup(void); + +#ifdef CONFIG_MTRR +extern void mtrr_ap_init(void); +extern void mtrr_bp_init(void); +#else +#define mtrr_ap_init() do {} while (0) +#define mtrr_bp_init() do {} while (0) +#endif + +#ifdef CONFIG_X86_MCE +extern void mcheck_init(struct cpuinfo_x86 *c); +#else +#define mcheck_init(c) do {} while(0) #endif #endif /* __ASM_I386_PROCESSOR_H */