X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsparc64%2Fmm%2Ftlb.c;h=3f10fc921b00e1bfb6279f8ccf00986ad7619089;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=90ca99d0b89cd041fe61640c4ad47ab2a2f421c8;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 90ca99d0b..3f10fc921 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -18,14 +19,17 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = - { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, }; +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, }; void flush_tlb_pending(void) { struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + preempt_disable(); + if (mp->tlb_nr) { + flush_tsb_user(mp); + if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, @@ -37,6 +41,8 @@ void flush_tlb_pending(void) } mp->tlb_nr = 0; } + + preempt_enable(); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) @@ -48,7 +54,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t if (pte_exec(orig)) vaddr |= 0x1UL; - if (pte_dirty(orig)) { + if (tlb_type != hypervisor && + pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; @@ -72,7 +79,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t no_cache_flush: - if (mp->tlb_frozen) + if (mp->fullmm) return; nr = mp->tlb_nr; @@ -90,62 +97,3 @@ no_cache_flush: if (nr >= TLB_BATCH_NR) flush_tlb_pending(); } - -void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); - unsigned long nr = mp->tlb_nr; - long s = start, e = end, vpte_base; - - if (mp->tlb_frozen) - return; - - /* If start is greater than end, that is a real problem. */ - BUG_ON(start > end); - - /* However, straddling the VA space hole is quite normal. */ - s &= PMD_MASK; - e = (e + PMD_SIZE - 1) & PMD_MASK; - - vpte_base = (tlb_type == spitfire ? - VPTE_BASE_SPITFIRE : - VPTE_BASE_CHEETAH); - - if (unlikely(nr != 0 && mm != mp->mm)) { - flush_tlb_pending(); - nr = 0; - } - - if (nr == 0) - mp->mm = mm; - - start = vpte_base + (s >> (PAGE_SHIFT - 3)); - end = vpte_base + (e >> (PAGE_SHIFT - 3)); - - /* If the request straddles the VA space hole, we - * need to swap start and end. The reason this - * occurs is that "vpte_base" is the center of - * the linear page table mapping area. Thus, - * high addresses with the sign bit set map to - * addresses below vpte_base and non-sign bit - * addresses map to addresses above vpte_base. - */ - if (end < start) { - unsigned long tmp = start; - - start = end; - end = tmp; - } - - while (start < end) { - mp->vaddrs[nr] = start; - mp->tlb_nr = ++nr; - if (nr >= TLB_BATCH_NR) { - flush_tlb_pending(); - nr = 0; - } - start += PAGE_SIZE; - } - if (nr) - flush_tlb_pending(); -}