#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/swap.h>
-#include <linux/preempt.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
{
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
- preempt_disable();
-
if (mp->tlb_nr) {
- flush_tsb_user(mp);
-
if (CTX_VALID(mp->mm->context)) {
#ifdef CONFIG_SMP
smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
}
mp->tlb_nr = 0;
}
-
- preempt_enable();
}
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
if (pte_exec(orig))
vaddr |= 0x1UL;
- if (tlb_type != hypervisor &&
- pte_dirty(orig)) {
+ if (pte_dirty(orig)) {
unsigned long paddr, pfn = pte_pfn(orig);
struct address_space *mapping;
struct page *page;
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
}
+
+void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ unsigned long nr = mp->tlb_nr;
+ long s = start, e = end, vpte_base;
+
+ if (mp->fullmm)
+ return;
+
+ /* If start is greater than end, that is a real problem. */
+ BUG_ON(start > end);
+
+ /* However, straddling the VA space hole is quite normal. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+
+ vpte_base = (tlb_type == spitfire ?
+ VPTE_BASE_SPITFIRE :
+ VPTE_BASE_CHEETAH);
+
+ if (unlikely(nr != 0 && mm != mp->mm)) {
+ flush_tlb_pending();
+ nr = 0;
+ }
+
+ if (nr == 0)
+ mp->mm = mm;
+
+ start = vpte_base + (s >> (PAGE_SHIFT - 3));
+ end = vpte_base + (e >> (PAGE_SHIFT - 3));
+
+ /* If the request straddles the VA space hole, we
+ * need to swap start and end. The reason this
+ * occurs is that "vpte_base" is the center of
+ * the linear page table mapping area. Thus,
+ * high addresses with the sign bit set map to
+ * addresses below vpte_base and non-sign bit
+ * addresses map to addresses above vpte_base.
+ */
+ if (end < start) {
+ unsigned long tmp = start;
+
+ start = end;
+ end = tmp;
+ }
+
+ while (start < end) {
+ mp->vaddrs[nr] = start;
+ mp->tlb_nr = ++nr;
+ if (nr >= TLB_BATCH_NR) {
+ flush_tlb_pending();
+ nr = 0;
+ }
+ start += PAGE_SIZE;
+ }
+ if (nr)
+ flush_tlb_pending();
+}