2 * This file contains the routines for flushing entries from the
3 * TLB and MMU hash table.
5 * Derived from arch/ppc64/mm/init.c:
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
16 * Dave Engebretsen <engebret@us.ibm.com>
17 * Rework for PPC64 port.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
24 #include <linux/config.h>
25 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/percpu.h>
29 #include <asm/pgalloc.h>
30 #include <asm/tlbflush.h>
32 #include <asm/hardirq.h>
33 #include <linux/highmem.h>
35 DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
37 /* This is declared as we are using the more or less generic
38 * include/asm-ppc64/tlb.h file -- tgall
40 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
42 unsigned long pte_freelist_forced_free;
45 * Update the MMU hash table to correspond with a change to
46 * a Linux PTE. If wrprot is true, it is permissible to
47 * change the existing HPTE to read-only rather than removing it
48 * (if we remove it we should clear the _PTE_HPTEFLAGS bits).
50 void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
56 unsigned long context = 0;
57 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
59 ptepage = virt_to_page(ptep);
60 mm = (struct mm_struct *) ptepage->mapping;
61 addr = ptepage->index +
62 (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE);
64 if (REGION_ID(addr) == USER_REGION_ID)
65 context = mm->context.id;
69 * This can happen when we are in the middle of a TLB batch and
70 * we encounter memory pressure (eg copy_page_range when it tries
71 * to allocate a new pte). If we have to reclaim memory and end
72 * up scanning and resetting referenced bits then our batch context
73 * will change mid stream.
75 if (unlikely(i != 0 && context != batch->context)) {
81 batch->context = context;
84 batch->pte[i] = __pte(pte);
85 batch->addr[i] = addr;
87 if (i >= PPC64_TLB_BATCH_NR)
91 void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
94 cpumask_t tmp = cpumask_of_cpu(smp_processor_id());
97 BUG_ON(in_interrupt());
100 if (cpus_equal(batch->mm->cpu_vm_mask, tmp))
104 flush_hash_page(batch->context, batch->addr[0], batch->pte[0],
107 flush_hash_range(batch->context, i, local);
112 static void pte_free_smp_sync(void *arg)
114 /* Do nothing, just ensure we sync with all CPUs */
118 /* This is only called when we are critically out of memory
119 * (and fail to get a page in pte_free_tlb).
121 void pte_free_now(struct page *ptepage)
123 pte_freelist_forced_free++;
125 smp_call_function(pte_free_smp_sync, NULL, 0, 1);
130 static void pte_free_rcu_callback(void *arg)
132 struct pte_freelist_batch *batch = arg;
135 for (i = 0; i < batch->index; i++)
136 pte_free(batch->pages[i]);
137 free_page((unsigned long)batch);
140 void pte_free_submit(struct pte_freelist_batch *batch)
142 INIT_RCU_HEAD(&batch->rcu);
143 call_rcu(&batch->rcu, pte_free_rcu_callback, batch);
146 void pte_free_finish(void)
148 /* This is safe as we are holding page_table_lock */
149 struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
153 pte_free_submit(*batchp);