1 /* arch/sparc64/mm/tlb.c
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
12 #include <asm/pgtable.h>
13 #include <asm/pgalloc.h>
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
19 /* Heavily inspired by the ppc64 code. */
21 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
22 { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
24 void flush_tlb_pending(void)
26 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
29 unsigned long context = mp->mm->context;
31 if (CTX_VALID(context)) {
33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
36 __flush_tlb_pending(CTX_HWBITS(context), mp->tlb_nr,
44 void tlb_batch_add(pte_t *ptep, pte_t orig)
46 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
49 unsigned long vaddr, nr;
51 ptepage = virt_to_page(ptep);
52 mm = (struct mm_struct *) ptepage->mapping;
54 /* It is more efficient to let flush_tlb_kernel_range()
60 vaddr = ptepage->index +
61 (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
65 if (pte_dirty(orig)) {
66 unsigned long paddr, pfn = pte_pfn(orig);
67 struct address_space *mapping;
73 page = pfn_to_page(pfn);
74 if (PageReserved(page))
77 /* A real file page? */
78 mapping = page_mapping(page);
82 paddr = (unsigned long) page_address(page);
83 if ((paddr ^ vaddr) & (1 << 13))
84 flush_dcache_page_all(mm, page);
93 if (unlikely(nr != 0 && mm != mp->mm)) {
101 mp->vaddrs[nr] = vaddr;
103 if (nr >= TLB_BATCH_NR)
107 void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
109 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
110 unsigned long nr = mp->tlb_nr;
111 long s = start, e = end, vpte_base;
116 /* Nobody should call us with start below VM hole and end above.
117 * See if it is really true.
122 /* Currently free_pgtables guarantees this. */
124 e = (e + PMD_SIZE - 1) & PMD_MASK;
126 vpte_base = (tlb_type == spitfire ?
130 if (unlikely(nr != 0 && mm != mp->mm)) {
138 start = vpte_base + (s >> (PAGE_SHIFT - 3));
139 end = vpte_base + (e >> (PAGE_SHIFT - 3));
140 while (start < end) {
141 mp->vaddrs[nr] = start;
143 if (nr >= TLB_BATCH_NR) {
153 unsigned long __ptrs_per_pmd(void)
155 if (test_thread_flag(TIF_32BIT))
156 return (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT));
157 return REAL_PTRS_PER_PMD;