1 #ifndef _X8664_TLBFLUSH_H
2 #define _X8664_TLBFLUSH_H
5 #include <asm/processor.h>
7 static inline unsigned long get_cr3(void)
10 asm volatile("mov %%cr3,%0" : "=r" (cr3));
11 return machine_to_phys(cr3);
14 static inline void set_cr3(unsigned long cr3)
17 /* What the hell is this supposed to do: JQ */
18 asm volatile("mov %0,%%cr3" :: "r" (cr3) : "memory");
21 #define __flush_tlb() xen_tlb_flush()
23 static inline unsigned long get_cr4(void)
26 asm volatile("mov %%cr4,%0" : "=r" (cr4));
30 static inline void set_cr4(unsigned long cr4)
32 asm volatile("mov %0,%%cr4" :: "r" (cr4) : "memory");
35 #define __flush_tlb_all() xen_tlb_flush()
37 #define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
43 * - flush_tlb() flushes the current mm struct TLBs
44 * - flush_tlb_all() flushes all processes TLBs
45 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
46 * - flush_tlb_page(vma, vmaddr) flushes one page
47 * - flush_tlb_range(vma, start, end) flushes a range of pages
48 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
49 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
51 * x86-64 can only flush individual pages or full VMs. For a range flush
52 * we always do the full VM. Might be worth trying if for a small
53 * range a few INVLPGs in a row are a win.
58 #define flush_tlb() __flush_tlb()
59 #define flush_tlb_all() __flush_tlb_all()
60 #define local_flush_tlb() __flush_tlb()
62 static inline void flush_tlb_mm(struct mm_struct *mm)
64 if (mm == current->active_mm)
68 static inline void flush_tlb_page(struct vm_area_struct *vma,
71 if (vma->vm_mm == current->active_mm)
72 __flush_tlb_one(addr);
75 static inline void flush_tlb_range(struct vm_area_struct *vma,
76 unsigned long start, unsigned long end)
78 if (vma->vm_mm == current->active_mm)
86 #define local_flush_tlb() \
89 extern void flush_tlb_all(void);
90 extern void flush_tlb_current_task(void);
91 extern void flush_tlb_mm(struct mm_struct *);
92 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
94 #define flush_tlb() flush_tlb_current_task()
96 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
98 flush_tlb_mm(vma->vm_mm);
101 #define TLBSTATE_OK 1
102 #define TLBSTATE_LAZY 2
104 /* Roughly an IPI every 20MB with 4k pages for freeing page table
105 ranges. Cost is about 42k of memory for each CPU. */
106 #define ARCH_FREE_PTE_NR 5350
110 #define flush_tlb_kernel_range(start, end) flush_tlb_all()
112 static inline void flush_tlb_pgtables(struct mm_struct *mm,
113 unsigned long start, unsigned long end)
115 /* x86_64 does not keep any page table caches in a software TLB.
116 The CPUs do in their hardware TLBs, but they are handled
117 by the normal TLB flushing algorithms. */
120 #endif /* _X8664_TLBFLUSH_H */