/*
* We take the easy way out of this problem - we make the
* PTE uncacheable. However, we leave the write buffer on.
+ *
+ * Note that the pte lock held when calling update_mmu_cache must also
+ * guard the pte (somewhere else in the same mm) that we modify here.
+ * Therefore those configurations which might call adjust_pte (those
+ * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
*/
static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
{
* fault (ie, is old), we can safely ignore any issues.
*/
if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
- flush_cache_page(vma, address);
+ flush_cache_page(vma, address, pte_pfn(entry));
pte_val(entry) &= ~shared_pte_mask;
set_pte(pte, entry);
flush_tlb_page(vma, address);
return 0;
}
-static void __flush_dcache_page(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *mpnt = NULL;
- struct prio_tree_iter iter;
- unsigned long offset;
- pgoff_t pgoff;
-
- __cpuc_flush_dcache_page(page_address(page));
-
- if (!mapping)
- return;
-
- /*
- * With a VIVT cache, we need to also write back
- * and invalidate any user data.
- */
- pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
- flush_dcache_mmap_lock(mapping);
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
- /*
- * If this VMA is not in our MM, we can ignore it.
- */
- if (mpnt->vm_mm != mm)
- continue;
- if (!(mpnt->vm_flags & VM_MAYSHARE))
- continue;
- offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- flush_cache_page(mpnt, mpnt->vm_start + offset);
- }
- flush_dcache_mmap_unlock(mapping);
-}
-
-void flush_dcache_page(struct page *page)
-{
- struct address_space *mapping = page_mapping(page);
-
- if (mapping && !mapping_mapped(mapping))
- set_bit(PG_dcache_dirty, &page->flags);
- else
- __flush_dcache_page(page);
-}
-EXPORT_SYMBOL(flush_dcache_page);
-
static void
-make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
+make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
{
- struct address_space *mapping = page_mapping(page);
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *mpnt = NULL;
+ struct vm_area_struct *mpnt;
struct prio_tree_iter iter;
unsigned long offset;
pgoff_t pgoff;
int aliases = 0;
- if (!mapping)
- return;
-
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
/*
* cache coherency.
*/
flush_dcache_mmap_lock(mapping);
- while ((mpnt = vma_prio_tree_next(mpnt, &mapping->i_mmap,
- &iter, pgoff, pgoff)) != NULL) {
+ vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
/*
* If this VMA is not in our MM, we can ignore it.
* Note that we intentionally mask out the VMA
if (aliases)
adjust_pte(vma, addr);
else
- flush_cache_page(vma, addr);
+ flush_cache_page(vma, addr, pfn);
}
+void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
* 2. If we have multiple shared mappings of the same space in
* an object, we need to deal with the cache aliasing issues.
*
- * Note that the page_table_lock will be held.
+ * Note that the pte lock will be held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
+ struct address_space *mapping;
struct page *page;
if (!pfn_valid(pfn))
return;
+
page = pfn_to_page(pfn);
- if (page_mapping(page)) {
+ mapping = page_mapping(page);
+ if (mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
- __cpuc_flush_dcache_page(page_address(page));
+ __flush_dcache_page(mapping, page);
- make_coherent(vma, addr, page, dirty);
+ if (cache_is_vivt())
+ make_coherent(mapping, vma, addr, pfn);
}
}