fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-i386 / tlbflush.h
index 2f4c8ee..4dd8284 100644 (file)
@@ -1,11 +1,18 @@
 #ifndef _I386_TLBFLUSH_H
 #define _I386_TLBFLUSH_H
 
-#include <linux/config.h>
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                                  \
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+#define __native_flush_tlb()                                           \
        do {                                                            \
                unsigned int tmpreg;                                    \
                                                                        \
  * Global pages have to be flushed a bit differently. Not a real
  * performance problem because this does not happen often.
  */
-#define __flush_tlb_global()                                           \
+#define __native_flush_tlb_global()                                    \
        do {                                                            \
-               unsigned int tmpreg;                                    \
+               unsigned int tmpreg, cr4, cr4_orig;                     \
                                                                        \
                __asm__ __volatile__(                                   \
-                       "movl %1, %%cr4;  # turn off PGE     \n"        \
+                       "movl %%cr4, %2;  # turn off PGE     \n"        \
+                       "movl %2, %1;                        \n"        \
+                       "andl %3, %1;                        \n"        \
+                       "movl %1, %%cr4;                     \n"        \
                        "movl %%cr3, %0;                     \n"        \
                        "movl %0, %%cr3;  # flush TLB        \n"        \
                        "movl %2, %%cr4;  # turn PGE back on \n"        \
-                       : "=&r" (tmpreg)                                \
-                       : "r" (mmu_cr4_features & ~X86_CR4_PGE),        \
-                         "r" (mmu_cr4_features)                        \
+                       : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
+                       : "i" (~X86_CR4_PGE)                            \
                        : "memory");                                    \
        } while (0)
 
-extern unsigned long pgkern_mask;
+#define __native_flush_tlb_single(addr)                                \
+       __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
 
 # define __flush_tlb_all()                                             \
        do {                                                            \
@@ -47,9 +57,6 @@ extern unsigned long pgkern_mask;
 
 #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
 
-#define __flush_tlb_single(addr) \
-       __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
-
 #ifdef CONFIG_X86_INVLPG
 # define __flush_tlb_one(addr) __flush_tlb_single(addr)
 #else
@@ -131,7 +138,7 @@ struct tlb_state
        int state;
        char __cacheline_padding[L1_CACHE_BYTES-8];
 };
-extern struct tlb_state cpu_tlbstate[NR_CPUS];
+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 
 
 #endif