#include <linux/cache.h>
#include <linux/config.h>
#include <linux/threads.h>
+#include <asm/percpu.h>
+#include <linux/cpumask.h>
/* flag for disabling the tsc */
extern int tsc_disable;
};
#define desc_empty(desc) \
- (!((desc)->a + (desc)->b))
+ (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) \
(((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
int x86_cache_alignment; /* In bytes */
- int fdiv_bug;
- int f00f_bug;
- int coma_bug;
+ char fdiv_bug;
+ char f00f_bug;
+ char coma_bug;
+ char pad0;
+ int x86_power;
unsigned long loops_per_jiffy;
+#ifdef CONFIG_SMP
+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
+#endif
+ unsigned char x86_max_cores; /* cpuid returned max cores value */
+ unsigned char booted_cores; /* number of cores as seen by OS */
+ unsigned char apicid;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
-extern struct tss_struct init_tss[NR_CPUS];
extern struct tss_struct doublefault_tss;
+DECLARE_PER_CPU(struct tss_struct, init_tss);
#ifdef CONFIG_SMP
extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data
#endif
+extern int phys_proc_id[NR_CPUS];
+extern int cpu_core_id[NR_CPUS];
+extern int cpu_llc_id[NR_CPUS];
extern char ignore_fpu_irq;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
-extern void dodgy_tsc(void);
+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+
+#ifdef CONFIG_X86_HT
+extern void detect_ht(struct cpuinfo_x86 *c);
+#else
+static inline void detect_ht(struct cpuinfo_x86 *c) {}
+#endif
/*
* EFLAGS bits
/*
* Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
*/
-static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op), "c"(0));
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+ int *edx)
{
__asm__("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
- : "0" (op));
+ : "0" (op), "c" (count));
}
/*
return edx;
}
-#define load_cr3(pgdir) \
- asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
-
+#define load_cr3(pgdir) write_cr3(__pa(pgdir))
/*
* Intel CPU features in CR4
static inline void set_in_cr4 (unsigned long mask)
{
+ unsigned cr4;
mmu_cr4_features |= mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "orl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
- : : "irg" (mask)
- :"ax");
+ cr4 = read_cr4();
+ cr4 |= mask;
+ write_cr4(cr4);
}
static inline void clear_in_cr4 (unsigned long mask)
{
+ unsigned cr4;
mmu_cr4_features &= ~mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "andl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
+ cr4 = read_cr4();
+ cr4 &= ~mask;
+ write_cr4(cr4);
}
/*
outb((data), 0x23); \
} while (0)
-/*
- * Bus types (default is ISA, but people can check others with these..)
- */
-extern int MCA_bus;
+/* Stop speculative execution */
+static inline void sync_core(void)
+{
+ int tmp;
+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
+}
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
extern unsigned int BIOS_revision;
extern unsigned int mca_pentium_flag;
+/* Boot loader type from the setup header */
+extern int bootloader_type;
+
+/*
+ * User space process size: 3GB (default).
+ */
+#define TASK_SIZE (PAGE_OFFSET)
+
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define HAVE_ARCH_PICK_MMAP_LAYOUT
/*
- * Size of io_bitmap, covering ports 0 to 0x3ff.
+ * Size of io_bitmap.
*/
-#define IO_BITMAP_BITS 1024
+#define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
#define INVALID_IO_BITMAP_OFFSET 0x8000
+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
struct i387_fsave_struct {
long cwd;
unsigned long seg;
} mm_segment_t;
+struct thread_struct;
+
struct tss_struct {
unsigned short back_link,__blh;
unsigned long esp0;
* be within the limit.
*/
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+ /*
+ * Cache the current maximum and the last task that used the bitmap:
+ */
+ unsigned long io_bitmap_max;
+ struct thread_struct *io_bitmap_owner;
/*
* pads the TSS to be cacheline-aligned (size is 0x100)
*/
- unsigned long __cacheline_filler[5];
+ unsigned long __cacheline_filler[35];
/*
* .. and then another 0x100 bytes for emergency kernel stack
*/
#define ARCH_MIN_TASKALIGN 16
-#if ((1<<CONFIG_STACK_SIZE_SHIFT) < PAGE_SIZE)
-#error (1<<CONFIG_STACK_SIZE_SHIFT) must be at least PAGE_SIZE
-#endif
-#define STACK_PAGE_COUNT ((1<<CONFIG_STACK_SIZE_SHIFT)/PAGE_SIZE)
-
-
struct thread_struct {
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
- void *stack_page[STACK_PAGE_COUNT];
unsigned long esp0;
unsigned long sysenter_cs;
unsigned long eip;
unsigned int saved_fs, saved_gs;
/* IO permissions */
unsigned long *io_bitmap_ptr;
+ unsigned long iopl;
+/* max allowed port in the bitmap, in bytes: */
+ unsigned long io_bitmap_max;
};
#define INIT_THREAD { \
#define INIT_TSS { \
.esp0 = sizeof(init_stack) + (long)&init_stack, \
.ss0 = __KERNEL_DS, \
- .esp1 = sizeof(init_tss[0]) + (long)&init_tss[0], \
.ss1 = __KERNEL_CS, \
- .ldt = GDT_ENTRY_LDT, \
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
.io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
}
-static inline void
-load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
{
tss->esp0 = thread->esp0;
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
regs->xcs = __USER_CS; \
regs->eip = new_eip; \
regs->esp = new_esp; \
+ preempt_disable(); \
load_user_cs_desc(smp_processor_id(), current->mm); \
+ preempt_enable(); \
} while (0)
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register) \
+ __asm__("movl %%db" #register ", %0" \
+ :"=r" (var))
+#define set_debugreg(value, register) \
+ __asm__("movl %0,%%db" #register \
+ : /* no output */ \
+ :"r" (value))
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static inline void set_iopl_mask(unsigned mask)
+{
+ unsigned int reg;
+ __asm__ __volatile__ ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+}
+
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
*/
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-#ifdef CONFIG_X86_HIGH_ENTRY
-#define virtual_esp0(tsk) \
- ((unsigned long)(tsk)->thread_info->virtual_stack + ((tsk)->thread.esp0 - (unsigned long)(tsk)->thread_info->real_stack))
-#else
-# define virtual_esp0(tsk) ((tsk)->thread.esp0)
-#endif
-
-#define load_virtual_esp0(tss, task) \
- do { \
- tss->esp0 = virtual_esp0(task); \
- if (likely(cpu_has_sep) && unlikely(tss->ss1 != task->thread.sysenter_cs)) { \
- tss->ss1 = task->thread.sysenter_cs; \
- wrmsr(MSR_IA32_SYSENTER_CS, \
- task->thread.sysenter_cs, 0); \
- } \
- } while (0)
-
extern unsigned long thread_saved_pc(struct task_struct *tsk);
void show_trace(struct task_struct *task, unsigned long *stack);
(unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
})
+/*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the xss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
#define task_pt_regs(task) \
({ \
struct pt_regs *__regs__; \
- __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
+ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
__regs__ - 1; \
})
unsigned int reserved[3];
struct extended_signature sigs[0];
};
-/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
-#define MICROCODE_IOCFREE _IO('6',0)
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
static inline void rep_nop(void)
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
#define ARCH_HAS_PREFETCH
-extern inline void prefetch(const void *x)
+static inline void prefetch(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchnta (%1)",
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
-extern inline void prefetchw(const void *x)
+static inline void prefetchw(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchw (%1)",
#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
-#ifdef CONFIG_SCHED_SMT
-#define ARCH_HAS_SCHED_DOMAIN
-#define ARCH_HAS_SCHED_WAKE_IDLE
+extern unsigned long boot_option_idle_override;
+extern void enable_sep_cpu(void);
+extern int sysenter_setup(void);
+
+#ifdef CONFIG_MTRR
+extern void mtrr_ap_init(void);
+extern void mtrr_bp_init(void);
+#else
+#define mtrr_ap_init() do {} while (0)
+#define mtrr_bp_init() do {} while (0)
+#endif
+
+#ifdef CONFIG_X86_MCE
+extern void mcheck_init(struct cpuinfo_x86 *c);
+#else
+#define mcheck_init(c) do {} while(0)
#endif
#endif /* __ASM_I386_PROCESSOR_H */