X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Ftlb.h;h=c9677d0be8dd4c4ba90df190c6b2f43e911bcc7e;hb=refs%2Fheads%2Fvserver;hp=a21e6a01e6d79b0c66eda02738947abcdc78ef4d;hpb=a8e794ca871505c8ea96cc102f4ad555c5231d7f;p=linux-2.6.git diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h index a21e6a01e..c9677d0be 100644 --- a/include/asm-arm/tlb.h +++ b/include/asm-arm/tlb.h @@ -19,7 +19,16 @@ #include #include + +#ifndef CONFIG_MMU + +#include +#include + +#else /* !CONFIG_MMU */ + #include +#include /* * TLB handling. This allows us to remove pages from the page @@ -27,11 +36,7 @@ */ struct mmu_gather { struct mm_struct *mm; - unsigned int freed; unsigned int fullmm; - - unsigned int flushes; - unsigned int avoided_flushes; }; DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); @@ -39,11 +44,9 @@ DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); static inline struct mmu_gather * tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) { - int cpu = smp_processor_id(); - struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu); + struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); tlb->mm = mm; - tlb->freed = 0; tlb->fullmm = full_mm_flush; return tlb; @@ -52,44 +55,41 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - struct mm_struct *mm = tlb->mm; - unsigned long freed = tlb->freed; - int rss = mm->rss; - - if (rss < freed) - freed = rss; - // mm->rss = rss - freed; - vx_rsspages_sub(mm, freed); - - if (freed) { - flush_tlb_mm(mm); - tlb->flushes++; - } else { - tlb->avoided_flushes++; - } + if (tlb->fullmm) + flush_tlb_mm(tlb->mm); /* keep the page table cache within bounds */ check_pgt_cache(); -} -static inline unsigned int -tlb_is_full_mm(struct mmu_gather *tlb) -{ - return tlb->fullmm; + put_cpu_var(mmu_gathers); } #define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0) -#define tlb_start_vma(tlb,vma) \ - do { \ - if (!tlb->fullmm) \ - flush_cache_range(vma, vma->vm_start, vma->vm_end); \ - } while (0) +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +static inline void +tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + flush_cache_range(vma, vma->vm_start, vma->vm_end); +} -#define tlb_end_vma(tlb,vma) do { } while (0) +static inline void +tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + flush_tlb_range(vma, vma->vm_start, vma->vm_end); +} #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) #define pte_free_tlb(tlb,ptep) pte_free(ptep) #define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp) +#define tlb_migrate_finish(mm) do { } while (0) + +#endif /* CONFIG_MMU */ #endif