-}
-
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA. It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
- unsigned long offset;
- pte_t *ptep;
-
- if (pmd_none(*pmd))
- return;
- ptep = pte_offset_map(pmd, address);
- offset = address & ~PMD_MASK;
- if (offset + size > PMD_SIZE)
- size = PMD_SIZE - offset;
- size &= PAGE_MASK;
- for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
- pte_t pte = *ptep;
-
- if (pte_none(pte))
- continue;
-
- if (pte_present(pte) && pte_dirty(pte)) {
- struct page *page;
- unsigned long pgaddr, uaddr;
- unsigned long pfn = pte_pfn(pte);
-
- if (!pfn_valid(pfn))
- continue;
- page = pfn_to_page(pfn);
- if (PageReserved(page) || !page_mapping(page))
- continue;
- pgaddr = (unsigned long) page_address(page);
- uaddr = address + offset;
- if ((pgaddr ^ uaddr) & (1 << 13))
- flush_dcache_page_all(mm, page);
- }
- }
- pte_unmap(ptep - 1);
-}