X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fprocessor.h;h=cd1c7b6dd6ac963a7ed1f0a0f0473436c2c97c87;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=3b9b35d92c29fc3678a43ac4e6c4a8845d240c3f;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 3b9b35d92..cd1c7b6dd 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -17,9 +17,9 @@ #include #include #include -#include #include #include +#include /* flag for disabling the tsc */ extern int tsc_disable; @@ -29,7 +29,7 @@ struct desc_struct { }; #define desc_empty(desc) \ - (!((desc)->a + (desc)->b)) + (!((desc)->a | (desc)->b)) #define desc_equal(desc1, desc2) \ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) @@ -61,11 +61,22 @@ struct cpuinfo_x86 { int x86_cache_size; /* in KB - valid for CPUS which support this call */ int x86_cache_alignment; /* In bytes */ - int fdiv_bug; - int f00f_bug; - int coma_bug; + char fdiv_bug; + char f00f_bug; + char coma_bug; + char pad0; + int x86_power; unsigned long loops_per_jiffy; - unsigned char x86_num_cores; +#ifdef CONFIG_SMP + cpumask_t llc_shared_map; /* cpus sharing the last level cache */ +#endif + unsigned char x86_max_cores; /* cpuid returned max cores value */ + unsigned char apicid; +#ifdef CONFIG_SMP + unsigned char booted_cores; /* number of cores as seen by OS */ + __u8 phys_proc_id; /* Physical processor id. */ + __u8 cpu_core_id; /* Core id */ +#endif } __attribute__((__aligned__(SMP_CACHE_BYTES))); #define X86_VENDOR_INTEL 0 @@ -97,13 +108,13 @@ extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data boot_cpu_data #endif -extern int phys_proc_id[NR_CPUS]; +extern int cpu_llc_id[NR_CPUS]; extern char ignore_fpu_irq; extern void identify_cpu(struct cpuinfo_x86 *); extern void print_cpu_info(struct cpuinfo_x86 *); extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); -extern void dodgy_tsc(void); +extern unsigned short num_cache_leaves; #ifdef CONFIG_X86_HT extern void detect_ht(struct cpuinfo_x86 *c); @@ -137,7 +148,7 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx * resulting in stale register contents being returned. */ -static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx) +static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), @@ -147,6 +158,18 @@ static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx) : "0" (op), "c"(0)); } +/* Some CPUID calls want 'count' to be placed in ecx */ +static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, + int *edx) +{ + __asm__("cpuid" + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (op), "c" (count)); +} + /* * CPUID functions returning a single datum */ @@ -191,9 +214,7 @@ static inline unsigned int cpuid_edx(unsigned int op) return edx; } -#define load_cr3(pgdir) \ - asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir))) - +#define load_cr3(pgdir) write_cr3(__pa(pgdir)) /* * Intel CPU features in CR4 @@ -220,22 +241,20 @@ extern unsigned long mmu_cr4_features; static inline void set_in_cr4 (unsigned long mask) { + unsigned cr4; mmu_cr4_features |= mask; - __asm__("movl %%cr4,%%eax\n\t" - "orl %0,%%eax\n\t" - "movl %%eax,%%cr4\n" - : : "irg" (mask) - :"ax"); + cr4 = read_cr4(); + cr4 |= mask; + write_cr4(cr4); } static inline void clear_in_cr4 (unsigned long mask) { + unsigned cr4; mmu_cr4_features &= ~mask; - __asm__("movl %%cr4,%%eax\n\t" - "andl %0,%%eax\n\t" - "movl %%eax,%%cr4\n" - : : "irg" (~mask) - :"ax"); + cr4 = read_cr4(); + cr4 &= ~mask; + write_cr4(cr4); } /* @@ -269,6 +288,13 @@ static inline void clear_in_cr4 (unsigned long mask) outb((data), 0x23); \ } while (0) +/* Stop speculative execution */ +static inline void sync_core(void) +{ + int tmp; + asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); +} + static inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -297,15 +323,17 @@ extern unsigned int mca_pentium_flag; extern int bootloader_type; /* - * User space process size: (3GB default). + * User space process size: 3GB (default). */ -#define __TASK_SIZE (__PAGE_OFFSET) -#define TASK_SIZE ((unsigned long)__TASK_SIZE) +#define TASK_SIZE (PAGE_OFFSET) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) + +#define __HAVE_ARCH_ALIGN_STACK +extern unsigned long arch_align_stack(unsigned long sp); #define HAVE_ARCH_PICK_MMAP_LAYOUT @@ -443,6 +471,7 @@ struct thread_struct { unsigned int saved_fs, saved_gs; /* IO permissions */ unsigned long *io_bitmap_ptr; + unsigned long iopl; /* max allowed port in the bitmap, in bytes: */ unsigned long io_bitmap_max; }; @@ -463,7 +492,6 @@ struct thread_struct { .esp0 = sizeof(init_stack) + (long)&init_stack, \ .ss0 = __KERNEL_DS, \ .ss1 = __KERNEL_CS, \ - .ldt = GDT_ENTRY_LDT, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ } @@ -487,8 +515,38 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa regs->xcs = __USER_CS; \ regs->eip = new_eip; \ regs->esp = new_esp; \ + preempt_disable(); \ + load_user_cs_desc(smp_processor_id(), current->mm); \ + preempt_enable(); \ } while (0) +/* + * These special macros can be used to get or set a debugging register + */ +#define get_debugreg(var, register) \ + __asm__("movl %%db" #register ", %0" \ + :"=r" (var)) +#define set_debugreg(value, register) \ + __asm__("movl %0,%%db" #register \ + : /* no output */ \ + :"r" (value)) + +/* + * Set IOPL bits in EFLAGS from given mask + */ +static inline void set_iopl_mask(unsigned mask) +{ + unsigned int reg; + __asm__ __volatile__ ("pushfl;" + "popl %0;" + "andl %1, %0;" + "orl %2, %0;" + "pushl %0;" + "popfl" + : "=&r" (reg) + : "i" (~X86_EFLAGS_IOPL), "r" (mask)); +} + /* Forward declaration, a strange C thing */ struct task_struct; struct mm_struct; @@ -505,7 +563,7 @@ extern void prepare_to_copy(struct task_struct *tsk); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long thread_saved_pc(struct task_struct *tsk); -void show_trace(struct task_struct *task, unsigned long *stack); +void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack); unsigned long get_wchan(struct task_struct *p); @@ -516,10 +574,20 @@ unsigned long get_wchan(struct task_struct *p); (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ }) +/* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. + * This is necessary to guarantee that the entire "struct pt_regs" + * is accessable even if the CPU haven't stored the SS/ESP registers + * on the stack (interrupt gate does not save these registers + * when switching to the same priv ring). + * Therefore beware: accessing the xss/esp fields of the + * "struct pt_regs" is possible, but they may contain the + * completely wrong values. + */ #define task_pt_regs(task) \ ({ \ struct pt_regs *__regs__; \ - __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \ + __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ __regs__ - 1; \ }) @@ -561,8 +629,6 @@ struct extended_sigtable { unsigned int reserved[3]; struct extended_signature sigs[0]; }; -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */ -#define MICROCODE_IOCFREE _IO('6',0) /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ static inline void rep_nop(void) @@ -640,7 +706,7 @@ static inline void rep_nop(void) However we don't do prefetches for pre XP Athlons currently That should be fixed. */ #define ARCH_HAS_PREFETCH -extern inline void prefetch(const void *x) +static inline void prefetch(const void *x) { alternative_input(ASM_NOP4, "prefetchnta (%1)", @@ -654,7 +720,7 @@ extern inline void prefetch(const void *x) /* 3dnow! prefetch to get an exclusive cache line. Useful for spinlocks to avoid one state transition in the cache coherency protocol. */ -extern inline void prefetchw(const void *x) +static inline void prefetchw(const void *x) { alternative_input(ASM_NOP4, "prefetchw (%1)", @@ -668,5 +734,7 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); #define cache_line_size() (boot_cpu_data.x86_cache_alignment) extern unsigned long boot_option_idle_override; +extern void enable_sep_cpu(void); +extern int sysenter_setup(void); #endif /* __ASM_I386_PROCESSOR_H */