return 0;
}
+#ifdef CONFIG_DCACHE_DISABLED
+#define sh64_dcache_purge_all() do { } while (0)
+#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
+#define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
+#define sh64_dcache_purge_phy_page(paddr) do { } while (0)
+#define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
+#define sh64_dcache_purge_kernel_range(start, end) do { } while (0)
+#define sh64_dcache_wback_current_user_range(start, end) do { } while (0)
+#endif
+
/*##########################################################################*/
/* From here onwards, a rewrite of the implementation,
eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
+ asm __volatile__ ("synco"); /* TAKum03020 */
}
eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
}
}
-static void sh64_dcache_purge_virt_page(struct mm_struct *mm, unsigned long eaddr)
+static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
{
- unsigned long phys;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
+ spinlock_t *ptl;
+ unsigned long paddr;
- pgd = pgd_offset(mm, eaddr);
- pmd = pmd_offset(pgd, eaddr);
+ if (!mm)
+ return; /* No way to find physical address of page */
- if (pmd_none(*pmd) || pmd_bad(*pmd))
+ pgd = pgd_offset(mm, addr);
+ if (pgd_bad(*pgd))
return;
- pte = pte_offset_kernel(pmd, eaddr);
- entry = *pte;
-
- if (pte_none(entry) || !pte_present(entry))
+ pmd = pmd_offset(pgd, addr);
+ if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
- phys = pte_val(entry) & PAGE_MASK;
-
- sh64_dcache_purge_phy_page(phys);
-}
-
-static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- pte_t entry;
- unsigned long paddr;
-
- /* NOTE : all the callers of this have mm->page_table_lock held, so the
- following page table traversal is safe even on SMP/pre-emptible. */
-
- if (!mm) return; /* No way to find physical address of page */
- pgd = pgd_offset(mm, eaddr);
- if (pgd_bad(*pgd)) return;
-
- pmd = pmd_offset(pgd, eaddr);
- if (pmd_none(*pmd) || pmd_bad(*pmd)) return;
-
- pte = pte_offset_kernel(pmd, eaddr);
- entry = *pte;
- if (pte_none(entry) || !pte_present(entry)) return;
-
- paddr = pte_val(entry) & PAGE_MASK;
-
- sh64_dcache_purge_coloured_phy_page(paddr, eaddr);
-
+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ do {
+ entry = *pte;
+ if (pte_none(entry) || !pte_present(entry))
+ continue;
+ paddr = pte_val(entry) & PAGE_MASK;
+ sh64_dcache_purge_coloured_phy_page(paddr, addr);
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap_unlock(pte - 1, ptl);
}
/****************************************************************************/
int n_pages;
n_pages = ((end - start) >> PAGE_SHIFT);
- if (n_pages >= 64) {
+ if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
#if 1
sh64_dcache_purge_all();
#else
}
#endif
} else {
- /* 'Small' range */
- unsigned long aligned_start;
- unsigned long eaddr;
- unsigned long last_page_start;
-
- aligned_start = start & PAGE_MASK;
- /* 'end' is 1 byte beyond the end of the range */
- last_page_start = (end - 1) & PAGE_MASK;
-
- eaddr = aligned_start;
- while (eaddr <= last_page_start) {
- sh64_dcache_purge_user_page(mm, eaddr);
- eaddr += PAGE_SIZE;
- }
+ /* Small range, covered by a single page table page */
+ start &= PAGE_MASK; /* should already be so */
+ end = PAGE_ALIGN(end); /* should already be so */
+ sh64_dcache_purge_user_pages(mm, start, end);
}
return;
}
}
}
-#endif /* !CONFIG_DCACHE_DISABLED */
-
/****************************************************************************/
/* These *MUST* lie in an area of virtual address space that's otherwise unused. */
sh64_teardown_dtlb_cache_slot();
}
+#endif /* !CONFIG_DCACHE_DISABLED */
+
/****************************************************************************/
/*##########################################################################
addresses from the user address space specified by mm, after writing
back any dirty data.
- Note(1), 'end' is 1 byte beyond the end of the range to flush.
-
- Note(2), this is called with mm->page_table_lock held.*/
+ Note, 'end' is 1 byte beyond the end of the range to flush. */
sh64_dcache_purge_user_range(mm, start, end);
sh64_icache_inv_user_page_range(mm, start, end);
/****************************************************************************/
-void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr)
+void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
{
/* Invalidate any entries in either cache for the vma within the user
address space vma->vm_mm for the page starting at virtual address
the I-cache must be searched too in case the page in question is
both writable and being executed from (e.g. stack trampolines.)
- Note(1), this is called with mm->page_table_lock held.
+ Note, this is called with pte lock held.
*/
- sh64_dcache_purge_virt_page(vma->vm_mm, eaddr);
+ sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
if (vma->vm_flags & VM_EXEC) {
sh64_icache_inv_user_page(vma, eaddr);