1 #ifndef _SPARC64_TLBFLUSH_H
2 #define _SPARC64_TLBFLUSH_H
4 #include <linux/config.h>
6 #include <asm/mmu_context.h>
8 /* TLB flush operations. */
10 extern void __flush_tlb_all(void);
11 extern void __flush_tlb_mm(unsigned long context, unsigned long r);
12 extern void __flush_tlb_range(unsigned long context, unsigned long start,
13 unsigned long r, unsigned long end,
14 unsigned long pgsz, unsigned long size);
15 extern void __flush_tlb_page(unsigned long context, unsigned long page, unsigned long r);
17 extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
21 #define flush_tlb_all() __flush_tlb_all()
22 #define flush_tlb_kernel_range(start,end) \
23 __flush_tlb_kernel_range(start,end)
25 #define flush_tlb_mm(__mm) \
26 do { if (CTX_VALID((__mm)->context)) \
27 __flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
30 #define flush_tlb_range(__vma, start, end) \
31 do { if (CTX_VALID((__vma)->vm_mm->context)) { \
32 unsigned long __start = (start)&PAGE_MASK; \
33 unsigned long __end = PAGE_ALIGN(end); \
34 __flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
35 SECONDARY_CONTEXT, __end, PAGE_SIZE, \
40 #define flush_tlb_vpte_range(__mm, start, end) \
41 do { if (CTX_VALID((__mm)->context)) { \
42 unsigned long __start = (start)&PAGE_MASK; \
43 unsigned long __end = PAGE_ALIGN(end); \
44 __flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
45 SECONDARY_CONTEXT, __end, PAGE_SIZE, \
50 #define flush_tlb_page(vma, page) \
51 do { struct mm_struct *__mm = (vma)->vm_mm; \
52 if (CTX_VALID(__mm->context)) \
53 __flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
57 #define flush_tlb_vpte_page(mm, addr) \
58 do { struct mm_struct *__mm = (mm); \
59 if (CTX_VALID(__mm->context)) \
60 __flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
64 #else /* CONFIG_SMP */
66 extern void smp_flush_tlb_all(void);
67 extern void smp_flush_tlb_mm(struct mm_struct *mm);
68 extern void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
70 extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
71 extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page);
73 #define flush_tlb_all() smp_flush_tlb_all()
74 #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
75 #define flush_tlb_range(vma, start, end) \
76 smp_flush_tlb_range((vma)->vm_mm, start, end)
77 #define flush_tlb_vpte_range(mm, start, end) \
78 smp_flush_tlb_range(mm, start, end)
79 #define flush_tlb_kernel_range(start, end) \
80 smp_flush_tlb_kernel_range(start, end)
81 #define flush_tlb_page(vma, page) \
82 smp_flush_tlb_page((vma)->vm_mm, page)
83 #define flush_tlb_vpte_page(mm, page) \
84 smp_flush_tlb_page((mm), page)
86 #endif /* ! CONFIG_SMP */
88 static __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
91 /* Note the signed type. */
92 long s = start, e = end, vpte_base;
93 /* Nobody should call us with start below VM hole and end above.
94 See if it is really true. */
97 /* Currently free_pgtables guarantees this. */
99 e = (e + PMD_SIZE - 1) & PMD_MASK;
101 vpte_base = (tlb_type == spitfire ?
105 flush_tlb_vpte_range(mm,
106 vpte_base + (s >> (PAGE_SHIFT - 3)),
107 vpte_base + (e >> (PAGE_SHIFT - 3)));
110 #endif /* _SPARC64_TLBFLUSH_H */