2 * include/asm-ppc/tlbflush.h
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #ifndef _PPC_TLBFLUSH_H
11 #define _PPC_TLBFLUSH_H
13 #include <linux/config.h>
16 extern void _tlbie(unsigned long address);
17 extern void _tlbia(void);
19 #if defined(CONFIG_4xx)
22 #define __tlbia() asm volatile ("sync; tlbia; isync" : : : "memory")
24 #define __tlbia _tlbia
27 static inline void flush_tlb_mm(struct mm_struct *mm)
29 static inline void flush_tlb_page(struct vm_area_struct *vma,
32 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
35 static inline void flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end)
38 static inline void flush_tlb_kernel_range(unsigned long start,
42 #elif defined(CONFIG_8xx)
43 #define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
45 static inline void flush_tlb_mm(struct mm_struct *mm)
47 static inline void flush_tlb_page(struct vm_area_struct *vma,
50 static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
53 static inline void flush_tlb_range(struct mm_struct *mm,
54 unsigned long start, unsigned long end)
56 static inline void flush_tlb_kernel_range(unsigned long start,
60 #else /* 6xx, 7xx, 7xxx cpus */
62 struct vm_area_struct;
63 extern void flush_tlb_mm(struct mm_struct *mm);
64 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
65 extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
66 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
68 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
72 * This is called in munmap when we have freed up some page-table
73 * pages. We don't need to do anything here, there's nothing special
74 * about our page-table pages. -- paulus
76 static inline void flush_tlb_pgtables(struct mm_struct *mm,
77 unsigned long start, unsigned long end)
82 * This gets called at the end of handling a page fault, when
83 * the kernel has put a new PTE into the page table for the process.
84 * We use it to ensure coherency between the i-cache and d-cache
85 * for the page which has just been mapped in.
86 * On machines which use an MMU hash table, we use this to put a
87 * corresponding HPTE into the hash table ahead of time, instead of
88 * waiting for the inevitable extra hash-table miss exception.
90 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
92 #endif /* _PPC_TLBFLUSH_H */
93 #endif /*__KERNEL__ */