X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Fmm%2Ftlb.c;fp=arch%2Fsparc64%2Fmm%2Ftlb.c;h=8b104be4662b3089e3ba5344cb61e13d85608161;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=3f10fc921b00e1bfb6279f8ccf00986ad7619089;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 3f10fc921..8b104be46 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include @@ -25,11 +24,7 @@ void flush_tlb_pending(void) { struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); - preempt_disable(); - if (mp->tlb_nr) { - flush_tsb_user(mp); - if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, @@ -41,8 +36,6 @@ void flush_tlb_pending(void) } mp->tlb_nr = 0; } - - preempt_enable(); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) @@ -54,8 +47,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t if (pte_exec(orig)) vaddr |= 0x1UL; - if (tlb_type != hypervisor && - pte_dirty(orig)) { + if (pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; @@ -97,3 +89,62 @@ no_cache_flush: if (nr >= TLB_BATCH_NR) flush_tlb_pending(); } + +void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + unsigned long nr = mp->tlb_nr; + long s = start, e = end, vpte_base; + + if (mp->fullmm) + return; + + /* If start is greater than end, that is a real problem. */ + BUG_ON(start > end); + + /* However, straddling the VA space hole is quite normal. */ + s &= PMD_MASK; + e = (e + PMD_SIZE - 1) & PMD_MASK; + + vpte_base = (tlb_type == spitfire ? + VPTE_BASE_SPITFIRE : + VPTE_BASE_CHEETAH); + + if (unlikely(nr != 0 && mm != mp->mm)) { + flush_tlb_pending(); + nr = 0; + } + + if (nr == 0) + mp->mm = mm; + + start = vpte_base + (s >> (PAGE_SHIFT - 3)); + end = vpte_base + (e >> (PAGE_SHIFT - 3)); + + /* If the request straddles the VA space hole, we + * need to swap start and end. The reason this + * occurs is that "vpte_base" is the center of + * the linear page table mapping area. Thus, + * high addresses with the sign bit set map to + * addresses below vpte_base and non-sign bit + * addresses map to addresses above vpte_base. + */ + if (end < start) { + unsigned long tmp = start; + + start = end; + end = tmp; + } + + while (start < end) { + mp->vaddrs[nr] = start; + mp->tlb_nr = ++nr; + if (nr >= TLB_BATCH_NR) { + flush_tlb_pending(); + nr = 0; + } + start += PAGE_SIZE; + } + if (nr) + flush_tlb_pending(); +}