X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Ftlb.h;h=c9677d0be8dd4c4ba90df190c6b2f43e911bcc7e;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=ab3cad4fb53d9c664f719b19a48c58ef5e6712d2;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h index ab3cad4fb..c9677d0be 100644 --- a/include/asm-arm/tlb.h +++ b/include/asm-arm/tlb.h @@ -19,7 +19,16 @@ #include #include + +#ifndef CONFIG_MMU + +#include +#include + +#else /* !CONFIG_MMU */ + #include +#include /* * TLB handling. This allows us to remove pages from the page @@ -27,11 +36,7 @@ */ struct mmu_gather { struct mm_struct *mm; - unsigned int freed; unsigned int fullmm; - - unsigned int flushes; - unsigned int avoided_flushes; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -39,11 +44,9 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { - int cpu = smp_processor_id(); - struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu); + struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); tlb->mm = mm; - tlb->freed = 0; tlb->fullmm = full_mm_flush; return tlb; @@ -52,43 +55,41 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - struct mm_struct *mm = tlb->mm; - unsigned long freed = tlb->freed; - int rss = mm->rss; - - if (rss < freed) - freed = rss; - mm->rss = rss - freed; - - if (freed) { - flush_tlb_mm(mm); - tlb->flushes++; - } else { - tlb->avoided_flushes++; - } + if (tlb->fullmm) + flush_tlb_mm(tlb->mm); /* keep the page table cache within bounds */ check_pgt_cache(); -} -static inline unsigned int -tlb_is_full_mm(struct mmu_gather *tlb) -{ - return tlb->fullmm; + put_cpu_var(mmu_gathers); } #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) -#define tlb_start_vma(tlb,vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +static inline void +tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} -#define tlb_end_vma(tlb,vma) do { } while (0) +static inline void +tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + flush_tlb_range(vma, vma->vm_start, vma->vm_end); +} #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) #define pte_free_tlb(tlb,ptep) pte_free(ptep) #define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) +#define tlb_migrate_finish(mm) do { } while (0) + +#endif /* CONFIG_MMU */ #endif