#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
+
+#ifndef CONFIG_MMU
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#else /* !CONFIG_MMU */
+
#include <asm/pgalloc.h>
+#include <linux/vs_memory.h>
/*
* TLB handling. This allows us to remove pages from the page
*/
struct mmu_gather {
struct mm_struct *mm;
- unsigned int freed;
unsigned int fullmm;
-
- unsigned int flushes;
- unsigned int avoided_flushes;
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
- int cpu = smp_processor_id();
- struct mmu_gather *tlb = &per_cpu(mmu_gathers, cpu);
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
tlb->mm = mm;
- tlb->freed = 0;
tlb->fullmm = full_mm_flush;
return tlb;
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- struct mm_struct *mm = tlb->mm;
- unsigned long freed = tlb->freed;
- int rss = mm->rss;
-
- if (rss < freed)
- freed = rss;
- mm->rss = rss - freed;
-
- if (freed) {
- flush_tlb_mm(mm);
- tlb->flushes++;
- } else {
- tlb->avoided_flushes++;
- }
+ if (tlb->fullmm)
+ flush_tlb_mm(tlb->mm);
/* keep the page table cache within bounds */
check_pgt_cache();
-}
-static inline unsigned int
-tlb_is_full_mm(struct mmu_gather *tlb)
-{
- return tlb->fullmm;
+ put_cpu_var(mmu_gathers);
}
#define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
-#define tlb_start_vma(tlb,vma) \
- do { \
- if (!tlb->fullmm) \
- flush_cache_range(vma, vma->vm_start, vma->vm_end); \
- } while (0)
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush. When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm)
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
-#define tlb_end_vma(tlb,vma) do { } while (0)
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (!tlb->fullmm)
+ flush_tlb_range(vma, vma->vm_start, vma->vm_end);
+}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
#define pte_free_tlb(tlb,ptep) pte_free(ptep)
#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
+#define tlb_migrate_finish(mm) do { } while (0)
+
+#endif /* CONFIG_MMU */
#endif